Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.pxd +42 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.py +429 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pxd +44 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pyx +608 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pxd +55 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pyx +1542 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pxd +183 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pyx +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd +42 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_feather.pyx +117 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.pxd +94 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_generated_version.py +16 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_json.pyx +310 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.pxd +134 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.pyx +2205 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd +56 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx +484 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd +33 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_s3fs.pyx +467 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/acero.py +395 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/compat.pxi +71 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/compute.py +731 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/feather.py +277 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/api.h +47 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array.h +49 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h +587 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h +484 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h +33 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h +164 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h +275 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h +145 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h +1717 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h +697 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h +134 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h +489 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h +409 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h +81 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h +120 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h +126 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h +58 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/config.h +98 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h +311 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/device.h +394 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/extension_type.h +165 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h +296 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool_test.h +111 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/pch.h +30 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/pretty_print.h +157 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h +407 -0
llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.pxd
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
from libcpp.memory cimport shared_ptr
|
19 |
+
from pyarrow.includes.libarrow cimport (CArray, CBuffer, CDataType,
|
20 |
+
CField, CRecordBatch, CSchema,
|
21 |
+
CTable, CTensor, CSparseCOOTensor,
|
22 |
+
CSparseCSRMatrix, CSparseCSCMatrix,
|
23 |
+
CSparseCSFTensor)
|
24 |
+
|
25 |
+
cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py":
|
26 |
+
cdef int import_pyarrow() except -1
|
27 |
+
cdef object wrap_buffer(const shared_ptr[CBuffer]& buffer)
|
28 |
+
cdef object wrap_data_type(const shared_ptr[CDataType]& type)
|
29 |
+
cdef object wrap_field(const shared_ptr[CField]& field)
|
30 |
+
cdef object wrap_schema(const shared_ptr[CSchema]& schema)
|
31 |
+
cdef object wrap_array(const shared_ptr[CArray]& sp_array)
|
32 |
+
cdef object wrap_tensor(const shared_ptr[CTensor]& sp_tensor)
|
33 |
+
cdef object wrap_sparse_tensor_coo(
|
34 |
+
const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor)
|
35 |
+
cdef object wrap_sparse_tensor_csr(
|
36 |
+
const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor)
|
37 |
+
cdef object wrap_sparse_tensor_csc(
|
38 |
+
const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor)
|
39 |
+
cdef object wrap_sparse_tensor_csf(
|
40 |
+
const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor)
|
41 |
+
cdef object wrap_table(const shared_ptr[CTable]& ctable)
|
42 |
+
cdef object wrap_batch(const shared_ptr[CRecordBatch]& cbatch)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.py
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# flake8: noqa
|
19 |
+
|
20 |
+
"""
|
21 |
+
PyArrow is the python implementation of Apache Arrow.
|
22 |
+
|
23 |
+
Apache Arrow is a cross-language development platform for in-memory data.
|
24 |
+
It specifies a standardized language-independent columnar memory format for
|
25 |
+
flat and hierarchical data, organized for efficient analytic operations on
|
26 |
+
modern hardware. It also provides computational libraries and zero-copy
|
27 |
+
streaming messaging and interprocess communication.
|
28 |
+
|
29 |
+
For more information see the official page at https://arrow.apache.org
|
30 |
+
"""
|
31 |
+
|
32 |
+
import gc as _gc
|
33 |
+
import importlib as _importlib
|
34 |
+
import os as _os
|
35 |
+
import platform as _platform
|
36 |
+
import sys as _sys
|
37 |
+
import warnings as _warnings
|
38 |
+
|
39 |
+
try:
|
40 |
+
from ._generated_version import version as __version__
|
41 |
+
except ImportError:
|
42 |
+
# Package is not installed, parse git tag at runtime
|
43 |
+
try:
|
44 |
+
import setuptools_scm
|
45 |
+
# Code duplicated from setup.py to avoid a dependency on each other
|
46 |
+
|
47 |
+
def parse_git(root, **kwargs):
|
48 |
+
"""
|
49 |
+
Parse function for setuptools_scm that ignores tags for non-C++
|
50 |
+
subprojects, e.g. apache-arrow-js-XXX tags.
|
51 |
+
"""
|
52 |
+
from setuptools_scm.git import parse
|
53 |
+
kwargs['describe_command'] = \
|
54 |
+
"git describe --dirty --tags --long --match 'apache-arrow-[0-9]*.*'"
|
55 |
+
return parse(root, **kwargs)
|
56 |
+
__version__ = setuptools_scm.get_version('../',
|
57 |
+
parse=parse_git)
|
58 |
+
except ImportError:
|
59 |
+
__version__ = None
|
60 |
+
|
61 |
+
# ARROW-8684: Disable GC while initializing Cython extension module,
|
62 |
+
# to workaround Cython bug in https://github.com/cython/cython/issues/3603
|
63 |
+
_gc_enabled = _gc.isenabled()
|
64 |
+
_gc.disable()
|
65 |
+
import pyarrow.lib as _lib
|
66 |
+
if _gc_enabled:
|
67 |
+
_gc.enable()
|
68 |
+
|
69 |
+
from pyarrow.lib import (BuildInfo, RuntimeInfo, set_timezone_db_path,
|
70 |
+
MonthDayNano, VersionInfo, cpp_build_info,
|
71 |
+
cpp_version, cpp_version_info, runtime_info,
|
72 |
+
cpu_count, set_cpu_count, enable_signal_handlers,
|
73 |
+
io_thread_count, set_io_thread_count)
|
74 |
+
|
75 |
+
|
76 |
+
def show_versions():
|
77 |
+
"""
|
78 |
+
Print various version information, to help with error reporting.
|
79 |
+
"""
|
80 |
+
def print_entry(label, value):
|
81 |
+
print(f"{label: <26}: {value: <8}")
|
82 |
+
|
83 |
+
print("pyarrow version info\n--------------------")
|
84 |
+
print_entry("Package kind", cpp_build_info.package_kind
|
85 |
+
if len(cpp_build_info.package_kind) > 0
|
86 |
+
else "not indicated")
|
87 |
+
print_entry("Arrow C++ library version", cpp_build_info.version)
|
88 |
+
print_entry("Arrow C++ compiler",
|
89 |
+
f"{cpp_build_info.compiler_id} {cpp_build_info.compiler_version}")
|
90 |
+
print_entry("Arrow C++ compiler flags", cpp_build_info.compiler_flags)
|
91 |
+
print_entry("Arrow C++ git revision", cpp_build_info.git_id)
|
92 |
+
print_entry("Arrow C++ git description", cpp_build_info.git_description)
|
93 |
+
print_entry("Arrow C++ build type", cpp_build_info.build_type)
|
94 |
+
|
95 |
+
|
96 |
+
def _module_is_available(module):
|
97 |
+
try:
|
98 |
+
_importlib.import_module(f'pyarrow.{module}')
|
99 |
+
except ImportError:
|
100 |
+
return False
|
101 |
+
else:
|
102 |
+
return True
|
103 |
+
|
104 |
+
|
105 |
+
def _filesystem_is_available(fs):
|
106 |
+
try:
|
107 |
+
import pyarrow.fs
|
108 |
+
except ImportError:
|
109 |
+
return False
|
110 |
+
|
111 |
+
try:
|
112 |
+
getattr(pyarrow.fs, fs)
|
113 |
+
except (ImportError, AttributeError):
|
114 |
+
return False
|
115 |
+
else:
|
116 |
+
return True
|
117 |
+
|
118 |
+
|
119 |
+
def show_info():
|
120 |
+
"""
|
121 |
+
Print detailed version and platform information, for error reporting
|
122 |
+
"""
|
123 |
+
show_versions()
|
124 |
+
|
125 |
+
def print_entry(label, value):
|
126 |
+
print(f" {label: <20}: {value: <8}")
|
127 |
+
|
128 |
+
print("\nPlatform:")
|
129 |
+
print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}")
|
130 |
+
print_entry("SIMD Level", runtime_info().simd_level)
|
131 |
+
print_entry("Detected SIMD Level", runtime_info().detected_simd_level)
|
132 |
+
|
133 |
+
pool = default_memory_pool()
|
134 |
+
print("\nMemory:")
|
135 |
+
print_entry("Default backend", pool.backend_name)
|
136 |
+
print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes")
|
137 |
+
print_entry("Max memory", f"{pool.max_memory()} bytes")
|
138 |
+
print_entry("Supported Backends", ', '.join(supported_memory_backends()))
|
139 |
+
|
140 |
+
print("\nOptional modules:")
|
141 |
+
modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json",
|
142 |
+
"orc", "parquet"]
|
143 |
+
for module in modules:
|
144 |
+
status = "Enabled" if _module_is_available(module) else "-"
|
145 |
+
print(f" {module: <20}: {status: <8}")
|
146 |
+
|
147 |
+
print("\nFilesystems:")
|
148 |
+
filesystems = ["AzureFileSystem", "GcsFileSystem",
|
149 |
+
"HadoopFileSystem", "S3FileSystem"]
|
150 |
+
for fs in filesystems:
|
151 |
+
status = "Enabled" if _filesystem_is_available(fs) else "-"
|
152 |
+
print(f" {fs: <20}: {status: <8}")
|
153 |
+
|
154 |
+
print("\nCompression Codecs:")
|
155 |
+
codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"]
|
156 |
+
for codec in codecs:
|
157 |
+
status = "Enabled" if Codec.is_available(codec) else "-"
|
158 |
+
print(f" {codec: <20}: {status: <8}")
|
159 |
+
|
160 |
+
|
161 |
+
from pyarrow.lib import (null, bool_,
|
162 |
+
int8, int16, int32, int64,
|
163 |
+
uint8, uint16, uint32, uint64,
|
164 |
+
time32, time64, timestamp, date32, date64, duration,
|
165 |
+
month_day_nano_interval,
|
166 |
+
float16, float32, float64,
|
167 |
+
binary, string, utf8, binary_view, string_view,
|
168 |
+
large_binary, large_string, large_utf8,
|
169 |
+
decimal128, decimal256,
|
170 |
+
list_, large_list, list_view, large_list_view,
|
171 |
+
map_, struct,
|
172 |
+
union, sparse_union, dense_union,
|
173 |
+
dictionary,
|
174 |
+
run_end_encoded,
|
175 |
+
fixed_shape_tensor,
|
176 |
+
field,
|
177 |
+
type_for_alias,
|
178 |
+
DataType, DictionaryType, StructType,
|
179 |
+
ListType, LargeListType, FixedSizeListType,
|
180 |
+
ListViewType, LargeListViewType,
|
181 |
+
MapType, UnionType, SparseUnionType, DenseUnionType,
|
182 |
+
TimestampType, Time32Type, Time64Type, DurationType,
|
183 |
+
FixedSizeBinaryType, Decimal128Type, Decimal256Type,
|
184 |
+
BaseExtensionType, ExtensionType,
|
185 |
+
RunEndEncodedType, FixedShapeTensorType,
|
186 |
+
PyExtensionType, UnknownExtensionType,
|
187 |
+
register_extension_type, unregister_extension_type,
|
188 |
+
DictionaryMemo,
|
189 |
+
KeyValueMetadata,
|
190 |
+
Field,
|
191 |
+
Schema,
|
192 |
+
schema,
|
193 |
+
unify_schemas,
|
194 |
+
Array, Tensor,
|
195 |
+
array, chunked_array, record_batch, nulls, repeat,
|
196 |
+
SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix,
|
197 |
+
SparseCSFTensor,
|
198 |
+
infer_type, from_numpy_dtype,
|
199 |
+
NullArray,
|
200 |
+
NumericArray, IntegerArray, FloatingPointArray,
|
201 |
+
BooleanArray,
|
202 |
+
Int8Array, UInt8Array,
|
203 |
+
Int16Array, UInt16Array,
|
204 |
+
Int32Array, UInt32Array,
|
205 |
+
Int64Array, UInt64Array,
|
206 |
+
HalfFloatArray, FloatArray, DoubleArray,
|
207 |
+
ListArray, LargeListArray, FixedSizeListArray,
|
208 |
+
ListViewArray, LargeListViewArray,
|
209 |
+
MapArray, UnionArray,
|
210 |
+
BinaryArray, StringArray,
|
211 |
+
LargeBinaryArray, LargeStringArray,
|
212 |
+
BinaryViewArray, StringViewArray,
|
213 |
+
FixedSizeBinaryArray,
|
214 |
+
DictionaryArray,
|
215 |
+
Date32Array, Date64Array, TimestampArray,
|
216 |
+
Time32Array, Time64Array, DurationArray,
|
217 |
+
MonthDayNanoIntervalArray,
|
218 |
+
Decimal128Array, Decimal256Array, StructArray, ExtensionArray,
|
219 |
+
RunEndEncodedArray, FixedShapeTensorArray,
|
220 |
+
scalar, NA, _NULL as NULL, Scalar,
|
221 |
+
NullScalar, BooleanScalar,
|
222 |
+
Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar,
|
223 |
+
UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar,
|
224 |
+
HalfFloatScalar, FloatScalar, DoubleScalar,
|
225 |
+
Decimal128Scalar, Decimal256Scalar,
|
226 |
+
ListScalar, LargeListScalar, FixedSizeListScalar,
|
227 |
+
ListViewScalar, LargeListViewScalar,
|
228 |
+
Date32Scalar, Date64Scalar,
|
229 |
+
Time32Scalar, Time64Scalar,
|
230 |
+
TimestampScalar, DurationScalar,
|
231 |
+
MonthDayNanoIntervalScalar,
|
232 |
+
BinaryScalar, LargeBinaryScalar, BinaryViewScalar,
|
233 |
+
StringScalar, LargeStringScalar, StringViewScalar,
|
234 |
+
FixedSizeBinaryScalar, DictionaryScalar,
|
235 |
+
MapScalar, StructScalar, UnionScalar,
|
236 |
+
RunEndEncodedScalar, ExtensionScalar)
|
237 |
+
|
238 |
+
# Buffers, allocation
|
239 |
+
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
|
240 |
+
Codec, compress, decompress, allocate_buffer)
|
241 |
+
|
242 |
+
from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool,
|
243 |
+
total_allocated_bytes, set_memory_pool,
|
244 |
+
default_memory_pool, system_memory_pool,
|
245 |
+
jemalloc_memory_pool, mimalloc_memory_pool,
|
246 |
+
logging_memory_pool, proxy_memory_pool,
|
247 |
+
log_memory_allocations, jemalloc_set_decay_ms,
|
248 |
+
supported_memory_backends)
|
249 |
+
|
250 |
+
# I/O
|
251 |
+
from pyarrow.lib import (NativeFile, PythonFile,
|
252 |
+
BufferedInputStream, BufferedOutputStream, CacheOptions,
|
253 |
+
CompressedInputStream, CompressedOutputStream,
|
254 |
+
TransformInputStream, transcoding_input_stream,
|
255 |
+
FixedSizeBufferWriter,
|
256 |
+
BufferReader, BufferOutputStream,
|
257 |
+
OSFile, MemoryMappedFile, memory_map,
|
258 |
+
create_memory_map, MockOutputStream,
|
259 |
+
input_stream, output_stream,
|
260 |
+
have_libhdfs)
|
261 |
+
|
262 |
+
from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table,
|
263 |
+
concat_arrays, concat_tables, TableGroupBy,
|
264 |
+
RecordBatchReader)
|
265 |
+
|
266 |
+
# Exceptions
|
267 |
+
from pyarrow.lib import (ArrowCancelled,
|
268 |
+
ArrowCapacityError,
|
269 |
+
ArrowException,
|
270 |
+
ArrowKeyError,
|
271 |
+
ArrowIndexError,
|
272 |
+
ArrowInvalid,
|
273 |
+
ArrowIOError,
|
274 |
+
ArrowMemoryError,
|
275 |
+
ArrowNotImplementedError,
|
276 |
+
ArrowTypeError,
|
277 |
+
ArrowSerializationError)
|
278 |
+
|
279 |
+
from pyarrow.ipc import serialize_pandas, deserialize_pandas
|
280 |
+
import pyarrow.ipc as ipc
|
281 |
+
|
282 |
+
import pyarrow.types as types
|
283 |
+
|
284 |
+
|
285 |
+
# ----------------------------------------------------------------------
|
286 |
+
# Deprecations
|
287 |
+
|
288 |
+
from pyarrow.util import _deprecate_api, _deprecate_class
|
289 |
+
|
290 |
+
|
291 |
+
# TODO: Deprecate these somehow in the pyarrow namespace
|
292 |
+
from pyarrow.ipc import (Message, MessageReader, MetadataVersion,
|
293 |
+
RecordBatchFileReader, RecordBatchFileWriter,
|
294 |
+
RecordBatchStreamReader, RecordBatchStreamWriter)
|
295 |
+
|
296 |
+
# ----------------------------------------------------------------------
|
297 |
+
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
|
298 |
+
# wheels)
|
299 |
+
|
300 |
+
|
301 |
+
def get_include():
|
302 |
+
"""
|
303 |
+
Return absolute path to directory containing Arrow C++ include
|
304 |
+
headers. Similar to numpy.get_include
|
305 |
+
"""
|
306 |
+
return _os.path.join(_os.path.dirname(__file__), 'include')
|
307 |
+
|
308 |
+
|
309 |
+
def _get_pkg_config_executable():
|
310 |
+
return _os.environ.get('PKG_CONFIG', 'pkg-config')
|
311 |
+
|
312 |
+
|
313 |
+
def _has_pkg_config(pkgname):
|
314 |
+
import subprocess
|
315 |
+
try:
|
316 |
+
return subprocess.call([_get_pkg_config_executable(),
|
317 |
+
'--exists', pkgname]) == 0
|
318 |
+
except FileNotFoundError:
|
319 |
+
return False
|
320 |
+
|
321 |
+
|
322 |
+
def _read_pkg_config_variable(pkgname, cli_args):
|
323 |
+
import subprocess
|
324 |
+
cmd = [_get_pkg_config_executable(), pkgname] + cli_args
|
325 |
+
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
326 |
+
stderr=subprocess.PIPE)
|
327 |
+
out, err = proc.communicate()
|
328 |
+
if proc.returncode != 0:
|
329 |
+
raise RuntimeError("pkg-config failed: " + err.decode('utf8'))
|
330 |
+
return out.rstrip().decode('utf8')
|
331 |
+
|
332 |
+
|
333 |
+
def get_libraries():
|
334 |
+
"""
|
335 |
+
Return list of library names to include in the `libraries` argument for C
|
336 |
+
or Cython extensions using pyarrow
|
337 |
+
"""
|
338 |
+
return ['arrow_python', 'arrow']
|
339 |
+
|
340 |
+
|
341 |
+
def create_library_symlinks():
|
342 |
+
"""
|
343 |
+
With Linux and macOS wheels, the bundled shared libraries have an embedded
|
344 |
+
ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them
|
345 |
+
with -larrow won't work unless we create symlinks at locations like
|
346 |
+
site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses
|
347 |
+
prior problems we had with shipping two copies of the shared libraries to
|
348 |
+
permit third party projects like turbodbc to build their C++ extensions
|
349 |
+
against the pyarrow wheels.
|
350 |
+
|
351 |
+
This function must only be invoked once and only when the shared libraries
|
352 |
+
are bundled with the Python package, which should only apply to wheel-based
|
353 |
+
installs. It requires write access to the site-packages/pyarrow directory
|
354 |
+
and so depending on your system may need to be run with root.
|
355 |
+
"""
|
356 |
+
import glob
|
357 |
+
if _sys.platform == 'win32':
|
358 |
+
return
|
359 |
+
package_cwd = _os.path.dirname(__file__)
|
360 |
+
|
361 |
+
if _sys.platform == 'linux':
|
362 |
+
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*'))
|
363 |
+
|
364 |
+
def get_symlink_path(hard_path):
|
365 |
+
return hard_path.rsplit('.', 1)[0]
|
366 |
+
else:
|
367 |
+
bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib'))
|
368 |
+
|
369 |
+
def get_symlink_path(hard_path):
|
370 |
+
return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib'))
|
371 |
+
|
372 |
+
for lib_hard_path in bundled_libs:
|
373 |
+
symlink_path = get_symlink_path(lib_hard_path)
|
374 |
+
if _os.path.exists(symlink_path):
|
375 |
+
continue
|
376 |
+
try:
|
377 |
+
_os.symlink(lib_hard_path, symlink_path)
|
378 |
+
except PermissionError:
|
379 |
+
print("Tried creating symlink {}. If you need to link to "
|
380 |
+
"bundled shared libraries, run "
|
381 |
+
"pyarrow.create_library_symlinks() as root")
|
382 |
+
|
383 |
+
|
384 |
+
def get_library_dirs():
|
385 |
+
"""
|
386 |
+
Return lists of directories likely to contain Arrow C++ libraries for
|
387 |
+
linking C or Cython extensions using pyarrow
|
388 |
+
"""
|
389 |
+
package_cwd = _os.path.dirname(__file__)
|
390 |
+
library_dirs = [package_cwd]
|
391 |
+
|
392 |
+
def append_library_dir(library_dir):
|
393 |
+
if library_dir not in library_dirs:
|
394 |
+
library_dirs.append(library_dir)
|
395 |
+
|
396 |
+
# Search library paths via pkg-config. This is necessary if the user
|
397 |
+
# installed libarrow and the other shared libraries manually and they
|
398 |
+
# are not shipped inside the pyarrow package (see also ARROW-2976).
|
399 |
+
pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config'
|
400 |
+
for pkgname in ["arrow", "arrow_python"]:
|
401 |
+
if _has_pkg_config(pkgname):
|
402 |
+
library_dir = _read_pkg_config_variable(pkgname,
|
403 |
+
["--libs-only-L"])
|
404 |
+
# pkg-config output could be empty if Arrow is installed
|
405 |
+
# as a system package.
|
406 |
+
if library_dir:
|
407 |
+
if not library_dir.startswith("-L"):
|
408 |
+
raise ValueError(
|
409 |
+
"pkg-config --libs-only-L returned unexpected "
|
410 |
+
"value {!r}".format(library_dir))
|
411 |
+
append_library_dir(library_dir[2:])
|
412 |
+
|
413 |
+
if _sys.platform == 'win32':
|
414 |
+
# TODO(wesm): Is this necessary, or does setuptools within a conda
|
415 |
+
# installation add Library\lib to the linker path for MSVC?
|
416 |
+
python_base_install = _os.path.dirname(_sys.executable)
|
417 |
+
library_dir = _os.path.join(python_base_install, 'Library', 'lib')
|
418 |
+
|
419 |
+
if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')):
|
420 |
+
append_library_dir(library_dir)
|
421 |
+
|
422 |
+
# ARROW-4074: Allow for ARROW_HOME to be set to some other directory
|
423 |
+
if _os.environ.get('ARROW_HOME'):
|
424 |
+
append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib'))
|
425 |
+
else:
|
426 |
+
# Python wheels bundle the Arrow libraries in the pyarrow directory.
|
427 |
+
append_library_dir(_os.path.dirname(_os.path.abspath(__file__)))
|
428 |
+
|
429 |
+
return library_dirs
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pxd
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from pyarrow.lib cimport *
|
21 |
+
from pyarrow.includes.common cimport *
|
22 |
+
from pyarrow.includes.libarrow cimport *
|
23 |
+
from pyarrow.includes.libarrow_acero cimport *
|
24 |
+
|
25 |
+
|
26 |
+
cdef class ExecNodeOptions(_Weakrefable):
|
27 |
+
cdef:
|
28 |
+
shared_ptr[CExecNodeOptions] wrapped
|
29 |
+
|
30 |
+
cdef void init(self, const shared_ptr[CExecNodeOptions]& sp)
|
31 |
+
cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil
|
32 |
+
|
33 |
+
|
34 |
+
cdef class Declaration(_Weakrefable):
|
35 |
+
|
36 |
+
cdef:
|
37 |
+
CDeclaration decl
|
38 |
+
|
39 |
+
cdef void init(self, const CDeclaration& c_decl)
|
40 |
+
|
41 |
+
@staticmethod
|
42 |
+
cdef wrap(const CDeclaration& c_decl)
|
43 |
+
|
44 |
+
cdef inline CDeclaration unwrap(self) nogil
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pyx
ADDED
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# ---------------------------------------------------------------------
|
19 |
+
# Low-level Acero bindings
|
20 |
+
|
21 |
+
# cython: profile=False
|
22 |
+
# distutils: language = c++
|
23 |
+
# cython: language_level = 3
|
24 |
+
|
25 |
+
from pyarrow.includes.common cimport *
|
26 |
+
from pyarrow.includes.libarrow cimport *
|
27 |
+
from pyarrow.includes.libarrow_acero cimport *
|
28 |
+
from pyarrow.lib cimport (Table, pyarrow_unwrap_table, pyarrow_wrap_table,
|
29 |
+
RecordBatchReader)
|
30 |
+
from pyarrow.lib import frombytes, tobytes
|
31 |
+
from pyarrow._compute cimport (
|
32 |
+
Expression, FunctionOptions, _ensure_field_ref, _true,
|
33 |
+
unwrap_null_placement, unwrap_sort_order
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
cdef class ExecNodeOptions(_Weakrefable):
|
38 |
+
"""
|
39 |
+
Base class for the node options.
|
40 |
+
|
41 |
+
Use one of the subclasses to construct an options object.
|
42 |
+
"""
|
43 |
+
__slots__ = () # avoid mistakingly creating attributes
|
44 |
+
|
45 |
+
cdef void init(self, const shared_ptr[CExecNodeOptions]& sp):
|
46 |
+
self.wrapped = sp
|
47 |
+
|
48 |
+
cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil:
|
49 |
+
return self.wrapped
|
50 |
+
|
51 |
+
|
52 |
+
cdef class _TableSourceNodeOptions(ExecNodeOptions):
|
53 |
+
|
54 |
+
def _set_options(self, Table table):
|
55 |
+
cdef:
|
56 |
+
shared_ptr[CTable] c_table
|
57 |
+
|
58 |
+
c_table = pyarrow_unwrap_table(table)
|
59 |
+
self.wrapped.reset(
|
60 |
+
new CTableSourceNodeOptions(c_table)
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
class TableSourceNodeOptions(_TableSourceNodeOptions):
|
65 |
+
"""
|
66 |
+
A Source node which accepts a table.
|
67 |
+
|
68 |
+
This is the option class for the "table_source" node factory.
|
69 |
+
|
70 |
+
Parameters
|
71 |
+
----------
|
72 |
+
table : pyarrow.Table
|
73 |
+
The table which acts as the data source.
|
74 |
+
"""
|
75 |
+
|
76 |
+
def __init__(self, Table table):
|
77 |
+
self._set_options(table)
|
78 |
+
|
79 |
+
|
80 |
+
cdef class _FilterNodeOptions(ExecNodeOptions):
|
81 |
+
|
82 |
+
def _set_options(self, Expression filter_expression not None):
|
83 |
+
self.wrapped.reset(
|
84 |
+
new CFilterNodeOptions(<CExpression>filter_expression.unwrap())
|
85 |
+
)
|
86 |
+
|
87 |
+
|
88 |
+
class FilterNodeOptions(_FilterNodeOptions):
|
89 |
+
"""
|
90 |
+
Make a node which excludes some rows from batches passed through it.
|
91 |
+
|
92 |
+
This is the option class for the "filter" node factory.
|
93 |
+
|
94 |
+
The "filter" operation provides an option to define data filtering
|
95 |
+
criteria. It selects rows where the given expression evaluates to true.
|
96 |
+
Filters can be written using pyarrow.compute.Expression, and the
|
97 |
+
expression must have a return type of boolean.
|
98 |
+
|
99 |
+
Parameters
|
100 |
+
----------
|
101 |
+
filter_expression : pyarrow.compute.Expression
|
102 |
+
"""
|
103 |
+
|
104 |
+
def __init__(self, Expression filter_expression):
|
105 |
+
self._set_options(filter_expression)
|
106 |
+
|
107 |
+
|
108 |
+
cdef class _ProjectNodeOptions(ExecNodeOptions):
|
109 |
+
|
110 |
+
def _set_options(self, expressions, names=None):
|
111 |
+
cdef:
|
112 |
+
Expression expr
|
113 |
+
vector[CExpression] c_expressions
|
114 |
+
vector[c_string] c_names
|
115 |
+
|
116 |
+
for expr in expressions:
|
117 |
+
c_expressions.push_back(expr.unwrap())
|
118 |
+
|
119 |
+
if names is not None:
|
120 |
+
if len(names) != len(expressions):
|
121 |
+
raise ValueError(
|
122 |
+
"The number of names should be equal to the number of expressions"
|
123 |
+
)
|
124 |
+
|
125 |
+
for name in names:
|
126 |
+
c_names.push_back(<c_string>tobytes(name))
|
127 |
+
|
128 |
+
self.wrapped.reset(
|
129 |
+
new CProjectNodeOptions(c_expressions, c_names)
|
130 |
+
)
|
131 |
+
else:
|
132 |
+
self.wrapped.reset(
|
133 |
+
new CProjectNodeOptions(c_expressions)
|
134 |
+
)
|
135 |
+
|
136 |
+
|
137 |
+
class ProjectNodeOptions(_ProjectNodeOptions):
|
138 |
+
"""
|
139 |
+
Make a node which executes expressions on input batches,
|
140 |
+
producing batches of the same length with new columns.
|
141 |
+
|
142 |
+
This is the option class for the "project" node factory.
|
143 |
+
|
144 |
+
The "project" operation rearranges, deletes, transforms, and
|
145 |
+
creates columns. Each output column is computed by evaluating
|
146 |
+
an expression against the source record batch. These must be
|
147 |
+
scalar expressions (expressions consisting of scalar literals,
|
148 |
+
field references and scalar functions, i.e. elementwise functions
|
149 |
+
that return one value for each input row independent of the value
|
150 |
+
of all other rows).
|
151 |
+
|
152 |
+
Parameters
|
153 |
+
----------
|
154 |
+
expressions : list of pyarrow.compute.Expression
|
155 |
+
List of expressions to evaluate against the source batch. This must
|
156 |
+
be scalar expressions.
|
157 |
+
names : list of str, optional
|
158 |
+
List of names for each of the output columns (same length as
|
159 |
+
`expressions`). If `names` is not provided, the string
|
160 |
+
representations of exprs will be used.
|
161 |
+
"""
|
162 |
+
|
163 |
+
def __init__(self, expressions, names=None):
|
164 |
+
self._set_options(expressions, names)
|
165 |
+
|
166 |
+
|
167 |
+
cdef class _AggregateNodeOptions(ExecNodeOptions):
|
168 |
+
|
169 |
+
def _set_options(self, aggregates, keys=None):
|
170 |
+
cdef:
|
171 |
+
CAggregate c_aggr
|
172 |
+
vector[CAggregate] c_aggregations
|
173 |
+
vector[CFieldRef] c_keys
|
174 |
+
|
175 |
+
for arg_names, func_name, opts, name in aggregates:
|
176 |
+
c_aggr.function = tobytes(func_name)
|
177 |
+
if opts is not None:
|
178 |
+
c_aggr.options = (<FunctionOptions?>opts).wrapped
|
179 |
+
else:
|
180 |
+
c_aggr.options = <shared_ptr[CFunctionOptions]>nullptr
|
181 |
+
if not isinstance(arg_names, (list, tuple)):
|
182 |
+
arg_names = [arg_names]
|
183 |
+
for arg in arg_names:
|
184 |
+
c_aggr.target.push_back(_ensure_field_ref(arg))
|
185 |
+
c_aggr.name = tobytes(name)
|
186 |
+
|
187 |
+
c_aggregations.push_back(move(c_aggr))
|
188 |
+
|
189 |
+
if keys is None:
|
190 |
+
keys = []
|
191 |
+
for name in keys:
|
192 |
+
c_keys.push_back(_ensure_field_ref(name))
|
193 |
+
|
194 |
+
self.wrapped.reset(
|
195 |
+
new CAggregateNodeOptions(c_aggregations, c_keys)
|
196 |
+
)
|
197 |
+
|
198 |
+
|
199 |
+
class AggregateNodeOptions(_AggregateNodeOptions):
|
200 |
+
"""
|
201 |
+
Make a node which aggregates input batches, optionally grouped by keys.
|
202 |
+
|
203 |
+
This is the option class for the "aggregate" node factory.
|
204 |
+
|
205 |
+
Acero supports two types of aggregates: "scalar" aggregates,
|
206 |
+
and "hash" aggregates. Scalar aggregates reduce an array or scalar
|
207 |
+
input to a single scalar output (e.g. computing the mean of a column).
|
208 |
+
Hash aggregates act like GROUP BY in SQL and first partition data
|
209 |
+
based on one or more key columns, then reduce the data in each partition.
|
210 |
+
The aggregate node supports both types of computation, and can compute
|
211 |
+
any number of aggregations at once.
|
212 |
+
|
213 |
+
Parameters
|
214 |
+
----------
|
215 |
+
aggregates : list of tuples
|
216 |
+
Aggregations which will be applied to the targeted fields.
|
217 |
+
Specified as a list of tuples, where each tuple is one aggregation
|
218 |
+
specification and consists of: aggregation target column(s) followed
|
219 |
+
by function name, aggregation function options object and the
|
220 |
+
output field name.
|
221 |
+
The target column(s) specification can be a single field reference,
|
222 |
+
an empty list or a list of fields unary, nullary and n-ary aggregation
|
223 |
+
functions respectively. Each field reference can be a string
|
224 |
+
column name or expression.
|
225 |
+
keys : list of field references, optional
|
226 |
+
Keys by which aggregations will be grouped. Each key can reference
|
227 |
+
a field using a string name or expression.
|
228 |
+
"""
|
229 |
+
|
230 |
+
def __init__(self, aggregates, keys=None):
|
231 |
+
self._set_options(aggregates, keys)
|
232 |
+
|
233 |
+
|
234 |
+
cdef class _OrderByNodeOptions(ExecNodeOptions):
|
235 |
+
|
236 |
+
def _set_options(self, sort_keys, null_placement):
|
237 |
+
cdef:
|
238 |
+
vector[CSortKey] c_sort_keys
|
239 |
+
|
240 |
+
for name, order in sort_keys:
|
241 |
+
c_sort_keys.push_back(
|
242 |
+
CSortKey(_ensure_field_ref(name), unwrap_sort_order(order))
|
243 |
+
)
|
244 |
+
|
245 |
+
self.wrapped.reset(
|
246 |
+
new COrderByNodeOptions(
|
247 |
+
COrdering(c_sort_keys, unwrap_null_placement(null_placement))
|
248 |
+
)
|
249 |
+
)
|
250 |
+
|
251 |
+
|
252 |
+
class OrderByNodeOptions(_OrderByNodeOptions):
|
253 |
+
"""
|
254 |
+
Make a node which applies a new ordering to the data.
|
255 |
+
|
256 |
+
Currently this node works by accumulating all data, sorting, and then
|
257 |
+
emitting the new data with an updated batch index.
|
258 |
+
Larger-than-memory sort is not currently supported.
|
259 |
+
|
260 |
+
This is the option class for the "order_by" node factory.
|
261 |
+
|
262 |
+
Parameters
|
263 |
+
----------
|
264 |
+
sort_keys : sequence of (name, order) tuples
|
265 |
+
Names of field/column keys to sort the input on,
|
266 |
+
along with the order each field/column is sorted in.
|
267 |
+
Accepted values for `order` are "ascending", "descending".
|
268 |
+
Each field reference can be a string column name or expression.
|
269 |
+
null_placement : str, default "at_end"
|
270 |
+
Where nulls in input should be sorted, only applying to
|
271 |
+
columns/fields mentioned in `sort_keys`.
|
272 |
+
Accepted values are "at_start", "at_end".
|
273 |
+
"""
|
274 |
+
|
275 |
+
def __init__(self, sort_keys=(), *, null_placement="at_end"):
|
276 |
+
self._set_options(sort_keys, null_placement)
|
277 |
+
|
278 |
+
|
279 |
+
cdef class _HashJoinNodeOptions(ExecNodeOptions):
|
280 |
+
|
281 |
+
def _set_options(
|
282 |
+
self, join_type, left_keys, right_keys, left_output=None, right_output=None,
|
283 |
+
output_suffix_for_left="", output_suffix_for_right="",
|
284 |
+
):
|
285 |
+
cdef:
|
286 |
+
CJoinType c_join_type
|
287 |
+
vector[CFieldRef] c_left_keys
|
288 |
+
vector[CFieldRef] c_right_keys
|
289 |
+
vector[CFieldRef] c_left_output
|
290 |
+
vector[CFieldRef] c_right_output
|
291 |
+
|
292 |
+
# join type
|
293 |
+
if join_type == "left semi":
|
294 |
+
c_join_type = CJoinType_LEFT_SEMI
|
295 |
+
elif join_type == "right semi":
|
296 |
+
c_join_type = CJoinType_RIGHT_SEMI
|
297 |
+
elif join_type == "left anti":
|
298 |
+
c_join_type = CJoinType_LEFT_ANTI
|
299 |
+
elif join_type == "right anti":
|
300 |
+
c_join_type = CJoinType_RIGHT_ANTI
|
301 |
+
elif join_type == "inner":
|
302 |
+
c_join_type = CJoinType_INNER
|
303 |
+
elif join_type == "left outer":
|
304 |
+
c_join_type = CJoinType_LEFT_OUTER
|
305 |
+
elif join_type == "right outer":
|
306 |
+
c_join_type = CJoinType_RIGHT_OUTER
|
307 |
+
elif join_type == "full outer":
|
308 |
+
c_join_type = CJoinType_FULL_OUTER
|
309 |
+
else:
|
310 |
+
raise ValueError("Unsupported join type")
|
311 |
+
|
312 |
+
# left/right keys
|
313 |
+
if not isinstance(left_keys, (list, tuple)):
|
314 |
+
left_keys = [left_keys]
|
315 |
+
for key in left_keys:
|
316 |
+
c_left_keys.push_back(_ensure_field_ref(key))
|
317 |
+
if not isinstance(right_keys, (list, tuple)):
|
318 |
+
right_keys = [right_keys]
|
319 |
+
for key in right_keys:
|
320 |
+
c_right_keys.push_back(_ensure_field_ref(key))
|
321 |
+
|
322 |
+
# left/right output fields
|
323 |
+
if left_output is not None and right_output is not None:
|
324 |
+
for colname in left_output:
|
325 |
+
c_left_output.push_back(_ensure_field_ref(colname))
|
326 |
+
for colname in right_output:
|
327 |
+
c_right_output.push_back(_ensure_field_ref(colname))
|
328 |
+
|
329 |
+
self.wrapped.reset(
|
330 |
+
new CHashJoinNodeOptions(
|
331 |
+
c_join_type, c_left_keys, c_right_keys,
|
332 |
+
c_left_output, c_right_output,
|
333 |
+
_true,
|
334 |
+
<c_string>tobytes(output_suffix_for_left),
|
335 |
+
<c_string>tobytes(output_suffix_for_right)
|
336 |
+
)
|
337 |
+
)
|
338 |
+
else:
|
339 |
+
self.wrapped.reset(
|
340 |
+
new CHashJoinNodeOptions(
|
341 |
+
c_join_type, c_left_keys, c_right_keys,
|
342 |
+
_true,
|
343 |
+
<c_string>tobytes(output_suffix_for_left),
|
344 |
+
<c_string>tobytes(output_suffix_for_right)
|
345 |
+
)
|
346 |
+
)
|
347 |
+
|
348 |
+
|
349 |
+
class HashJoinNodeOptions(_HashJoinNodeOptions):
|
350 |
+
"""
|
351 |
+
Make a node which implements join operation using hash join strategy.
|
352 |
+
|
353 |
+
This is the option class for the "hashjoin" node factory.
|
354 |
+
|
355 |
+
Parameters
|
356 |
+
----------
|
357 |
+
join_type : str
|
358 |
+
Type of join. One of "left semi", "right semi", "left anti",
|
359 |
+
"right anti", "inner", "left outer", "right outer", "full outer".
|
360 |
+
left_keys : str, Expression or list
|
361 |
+
Key fields from left input. Each key can be a string column name
|
362 |
+
or a field expression, or a list of such field references.
|
363 |
+
right_keys : str, Expression or list
|
364 |
+
Key fields from right input. See `left_keys` for details.
|
365 |
+
left_output : list, optional
|
366 |
+
List of output fields passed from left input. If left and right
|
367 |
+
output fields are not specified, all valid fields from both left and
|
368 |
+
right input will be output. Each field can be a string column name
|
369 |
+
or a field expression.
|
370 |
+
right_output : list, optional
|
371 |
+
List of output fields passed from right input. If left and right
|
372 |
+
output fields are not specified, all valid fields from both left and
|
373 |
+
right input will be output. Each field can be a string column name
|
374 |
+
or a field expression.
|
375 |
+
output_suffix_for_left : str
|
376 |
+
Suffix added to names of output fields coming from left input
|
377 |
+
(used to distinguish, if necessary, between fields of the same
|
378 |
+
name in left and right input and can be left empty if there are
|
379 |
+
no name collisions).
|
380 |
+
output_suffix_for_right : str
|
381 |
+
Suffix added to names of output fields coming from right input,
|
382 |
+
see `output_suffix_for_left` for details.
|
383 |
+
"""
|
384 |
+
|
385 |
+
def __init__(
|
386 |
+
self, join_type, left_keys, right_keys, left_output=None, right_output=None,
|
387 |
+
output_suffix_for_left="", output_suffix_for_right=""
|
388 |
+
):
|
389 |
+
self._set_options(
|
390 |
+
join_type, left_keys, right_keys, left_output, right_output,
|
391 |
+
output_suffix_for_left, output_suffix_for_right
|
392 |
+
)
|
393 |
+
|
394 |
+
|
395 |
+
cdef class _AsofJoinNodeOptions(ExecNodeOptions):
|
396 |
+
|
397 |
+
def _set_options(self, left_on, left_by, right_on, right_by, tolerance):
|
398 |
+
cdef:
|
399 |
+
vector[CFieldRef] c_left_by
|
400 |
+
vector[CFieldRef] c_right_by
|
401 |
+
CAsofJoinKeys c_left_keys
|
402 |
+
CAsofJoinKeys c_right_keys
|
403 |
+
vector[CAsofJoinKeys] c_input_keys
|
404 |
+
|
405 |
+
# Prepare left AsofJoinNodeOption::Keys
|
406 |
+
if not isinstance(left_by, (list, tuple)):
|
407 |
+
left_by = [left_by]
|
408 |
+
for key in left_by:
|
409 |
+
c_left_by.push_back(_ensure_field_ref(key))
|
410 |
+
|
411 |
+
c_left_keys.on_key = _ensure_field_ref(left_on)
|
412 |
+
c_left_keys.by_key = c_left_by
|
413 |
+
|
414 |
+
c_input_keys.push_back(c_left_keys)
|
415 |
+
|
416 |
+
# Prepare right AsofJoinNodeOption::Keys
|
417 |
+
if not isinstance(right_by, (list, tuple)):
|
418 |
+
right_by = [right_by]
|
419 |
+
for key in right_by:
|
420 |
+
c_right_by.push_back(_ensure_field_ref(key))
|
421 |
+
|
422 |
+
c_right_keys.on_key = _ensure_field_ref(right_on)
|
423 |
+
c_right_keys.by_key = c_right_by
|
424 |
+
|
425 |
+
c_input_keys.push_back(c_right_keys)
|
426 |
+
|
427 |
+
self.wrapped.reset(
|
428 |
+
new CAsofJoinNodeOptions(
|
429 |
+
c_input_keys,
|
430 |
+
tolerance,
|
431 |
+
)
|
432 |
+
)
|
433 |
+
|
434 |
+
|
435 |
+
class AsofJoinNodeOptions(_AsofJoinNodeOptions):
|
436 |
+
"""
|
437 |
+
Make a node which implements 'as of join' operation.
|
438 |
+
|
439 |
+
This is the option class for the "asofjoin" node factory.
|
440 |
+
|
441 |
+
Parameters
|
442 |
+
----------
|
443 |
+
left_on : str, Expression
|
444 |
+
The left key on which the join operation should be performed.
|
445 |
+
Can be a string column name or a field expression.
|
446 |
+
|
447 |
+
An inexact match is used on the "on" key, i.e. a row is considered a
|
448 |
+
match if and only if left_on - tolerance <= right_on <= left_on.
|
449 |
+
|
450 |
+
The input dataset must be sorted by the "on" key. Must be a single
|
451 |
+
field of a common type.
|
452 |
+
|
453 |
+
Currently, the "on" key must be an integer, date, or timestamp type.
|
454 |
+
left_by: str, Expression or list
|
455 |
+
The left keys on which the join operation should be performed.
|
456 |
+
Exact equality is used for each field of the "by" keys.
|
457 |
+
Each key can be a string column name or a field expression,
|
458 |
+
or a list of such field references.
|
459 |
+
right_on : str, Expression
|
460 |
+
The right key on which the join operation should be performed.
|
461 |
+
See `left_on` for details.
|
462 |
+
right_by: str, Expression or list
|
463 |
+
The right keys on which the join operation should be performed.
|
464 |
+
See `left_by` for details.
|
465 |
+
tolerance : int
|
466 |
+
The tolerance to use for the asof join. The tolerance is interpreted in
|
467 |
+
the same units as the "on" key.
|
468 |
+
"""
|
469 |
+
|
470 |
+
def __init__(self, left_on, left_by, right_on, right_by, tolerance):
|
471 |
+
self._set_options(left_on, left_by, right_on, right_by, tolerance)
|
472 |
+
|
473 |
+
|
474 |
+
cdef class Declaration(_Weakrefable):
|
475 |
+
"""
|
476 |
+
Helper class for declaring the nodes of an ExecPlan.
|
477 |
+
|
478 |
+
A Declaration represents an unconstructed ExecNode, and potentially
|
479 |
+
more since its inputs may also be Declarations or when constructed
|
480 |
+
with ``from_sequence``.
|
481 |
+
|
482 |
+
The possible ExecNodes to use are registered with a name,
|
483 |
+
the "factory name", and need to be specified using this name, together
|
484 |
+
with its corresponding ExecNodeOptions subclass.
|
485 |
+
|
486 |
+
Parameters
|
487 |
+
----------
|
488 |
+
factory_name : str
|
489 |
+
The ExecNode factory name, such as "table_source", "filter",
|
490 |
+
"project" etc. See the ExecNodeOptions subclasses for the exact
|
491 |
+
factory names to use.
|
492 |
+
options : ExecNodeOptions
|
493 |
+
Corresponding ExecNodeOptions subclass (matching the factory name).
|
494 |
+
inputs : list of Declaration, optional
|
495 |
+
Input nodes for this declaration. Optional if the node is a source
|
496 |
+
node, or when the declaration gets combined later with
|
497 |
+
``from_sequence``.
|
498 |
+
|
499 |
+
Returns
|
500 |
+
-------
|
501 |
+
Declaration
|
502 |
+
"""
|
503 |
+
cdef void init(self, const CDeclaration& c_decl):
|
504 |
+
self.decl = c_decl
|
505 |
+
|
506 |
+
@staticmethod
|
507 |
+
cdef wrap(const CDeclaration& c_decl):
|
508 |
+
cdef Declaration self = Declaration.__new__(Declaration)
|
509 |
+
self.init(c_decl)
|
510 |
+
return self
|
511 |
+
|
512 |
+
cdef inline CDeclaration unwrap(self) nogil:
|
513 |
+
return self.decl
|
514 |
+
|
515 |
+
def __init__(self, factory_name, ExecNodeOptions options, inputs=None):
|
516 |
+
cdef:
|
517 |
+
c_string c_factory_name
|
518 |
+
CDeclaration c_decl
|
519 |
+
vector[CDeclaration.Input] c_inputs
|
520 |
+
|
521 |
+
c_factory_name = tobytes(factory_name)
|
522 |
+
|
523 |
+
if inputs is not None:
|
524 |
+
for ipt in inputs:
|
525 |
+
c_inputs.push_back(
|
526 |
+
CDeclaration.Input((<Declaration>ipt).unwrap())
|
527 |
+
)
|
528 |
+
|
529 |
+
c_decl = CDeclaration(c_factory_name, c_inputs, options.unwrap())
|
530 |
+
self.init(c_decl)
|
531 |
+
|
532 |
+
@staticmethod
|
533 |
+
def from_sequence(decls):
|
534 |
+
"""
|
535 |
+
Convenience factory for the common case of a simple sequence of nodes.
|
536 |
+
|
537 |
+
Each of the declarations will be appended to the inputs of the
|
538 |
+
subsequent declaration, and the final modified declaration will
|
539 |
+
be returned.
|
540 |
+
|
541 |
+
Parameters
|
542 |
+
----------
|
543 |
+
decls : list of Declaration
|
544 |
+
|
545 |
+
Returns
|
546 |
+
-------
|
547 |
+
Declaration
|
548 |
+
"""
|
549 |
+
cdef:
|
550 |
+
vector[CDeclaration] c_decls
|
551 |
+
CDeclaration c_decl
|
552 |
+
|
553 |
+
for decl in decls:
|
554 |
+
c_decls.push_back((<Declaration> decl).unwrap())
|
555 |
+
|
556 |
+
c_decl = CDeclaration.Sequence(c_decls)
|
557 |
+
return Declaration.wrap(c_decl)
|
558 |
+
|
559 |
+
def __str__(self):
|
560 |
+
return frombytes(GetResultValue(DeclarationToString(self.decl)))
|
561 |
+
|
562 |
+
def __repr__(self):
|
563 |
+
return "<pyarrow.acero.Declaration>\n{0}".format(str(self))
|
564 |
+
|
565 |
+
def to_table(self, bint use_threads=True):
|
566 |
+
"""
|
567 |
+
Run the declaration and collect the results into a table.
|
568 |
+
|
569 |
+
This method will implicitly add a sink node to the declaration
|
570 |
+
to collect results into a table. It will then create an ExecPlan
|
571 |
+
from the declaration, start the exec plan, block until the plan
|
572 |
+
has finished, and return the created table.
|
573 |
+
|
574 |
+
Parameters
|
575 |
+
----------
|
576 |
+
use_threads : bool, default True
|
577 |
+
If set to False, then all CPU work will be done on the calling
|
578 |
+
thread. I/O tasks will still happen on the I/O executor
|
579 |
+
and may be multi-threaded (but should not use significant CPU
|
580 |
+
resources).
|
581 |
+
|
582 |
+
Returns
|
583 |
+
-------
|
584 |
+
pyarrow.Table
|
585 |
+
"""
|
586 |
+
cdef:
|
587 |
+
shared_ptr[CTable] c_table
|
588 |
+
|
589 |
+
with nogil:
|
590 |
+
c_table = GetResultValue(DeclarationToTable(self.unwrap(), use_threads))
|
591 |
+
return pyarrow_wrap_table(c_table)
|
592 |
+
|
593 |
+
def to_reader(self, bint use_threads=True):
|
594 |
+
"""Run the declaration and return results as a RecordBatchReader.
|
595 |
+
|
596 |
+
For details about the parameters, see `to_table`.
|
597 |
+
|
598 |
+
Returns
|
599 |
+
-------
|
600 |
+
pyarrow.RecordBatchReader
|
601 |
+
"""
|
602 |
+
cdef:
|
603 |
+
RecordBatchReader reader
|
604 |
+
reader = RecordBatchReader.__new__(RecordBatchReader)
|
605 |
+
reader.reader.reset(
|
606 |
+
GetResultValue(DeclarationToReader(self.unwrap(), use_threads)).release()
|
607 |
+
)
|
608 |
+
return reader
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pxd
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from pyarrow.includes.libarrow cimport *
|
21 |
+
from pyarrow.lib cimport _Weakrefable
|
22 |
+
|
23 |
+
|
24 |
+
cdef class ConvertOptions(_Weakrefable):
|
25 |
+
cdef:
|
26 |
+
unique_ptr[CCSVConvertOptions] options
|
27 |
+
|
28 |
+
@staticmethod
|
29 |
+
cdef ConvertOptions wrap(CCSVConvertOptions options)
|
30 |
+
|
31 |
+
|
32 |
+
cdef class ParseOptions(_Weakrefable):
|
33 |
+
cdef:
|
34 |
+
unique_ptr[CCSVParseOptions] options
|
35 |
+
object _invalid_row_handler
|
36 |
+
|
37 |
+
@staticmethod
|
38 |
+
cdef ParseOptions wrap(CCSVParseOptions options)
|
39 |
+
|
40 |
+
|
41 |
+
cdef class ReadOptions(_Weakrefable):
|
42 |
+
cdef:
|
43 |
+
unique_ptr[CCSVReadOptions] options
|
44 |
+
public object encoding
|
45 |
+
|
46 |
+
@staticmethod
|
47 |
+
cdef ReadOptions wrap(CCSVReadOptions options)
|
48 |
+
|
49 |
+
|
50 |
+
cdef class WriteOptions(_Weakrefable):
|
51 |
+
cdef:
|
52 |
+
unique_ptr[CCSVWriteOptions] options
|
53 |
+
|
54 |
+
@staticmethod
|
55 |
+
cdef WriteOptions wrap(CCSVWriteOptions options)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pyx
ADDED
@@ -0,0 +1,1542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
# cython: language_level = 3
|
21 |
+
|
22 |
+
from cython.operator cimport dereference as deref
|
23 |
+
|
24 |
+
from collections import namedtuple
|
25 |
+
from collections.abc import Mapping
|
26 |
+
|
27 |
+
from pyarrow.includes.common cimport *
|
28 |
+
from pyarrow.includes.libarrow cimport *
|
29 |
+
from pyarrow.includes.libarrow_python cimport *
|
30 |
+
from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema,
|
31 |
+
RecordBatchReader, ensure_type,
|
32 |
+
maybe_unbox_memory_pool, get_input_stream,
|
33 |
+
get_writer, native_transcoding_input_stream,
|
34 |
+
pyarrow_unwrap_batch, pyarrow_unwrap_schema,
|
35 |
+
pyarrow_unwrap_table, pyarrow_wrap_schema,
|
36 |
+
pyarrow_wrap_table, pyarrow_wrap_data_type,
|
37 |
+
pyarrow_unwrap_data_type, Table, RecordBatch,
|
38 |
+
StopToken, _CRecordBatchWriter)
|
39 |
+
from pyarrow.lib import frombytes, tobytes, SignalStopHandler
|
40 |
+
|
41 |
+
|
42 |
+
cdef unsigned char _single_char(s) except 0:
|
43 |
+
val = ord(s)
|
44 |
+
if val == 0 or val > 127:
|
45 |
+
raise ValueError("Expecting an ASCII character")
|
46 |
+
return <unsigned char> val
|
47 |
+
|
48 |
+
|
49 |
+
_InvalidRow = namedtuple(
|
50 |
+
"_InvalidRow", ("expected_columns", "actual_columns", "number", "text"),
|
51 |
+
module=__name__)
|
52 |
+
|
53 |
+
|
54 |
+
class InvalidRow(_InvalidRow):
|
55 |
+
"""
|
56 |
+
Description of an invalid row in a CSV file.
|
57 |
+
|
58 |
+
Parameters
|
59 |
+
----------
|
60 |
+
expected_columns : int
|
61 |
+
The expected number of columns in the row.
|
62 |
+
actual_columns : int
|
63 |
+
The actual number of columns in the row.
|
64 |
+
number : int or None
|
65 |
+
The physical row number if known, otherwise None.
|
66 |
+
text : str
|
67 |
+
The contents of the row.
|
68 |
+
"""
|
69 |
+
__slots__ = ()
|
70 |
+
|
71 |
+
|
72 |
+
cdef CInvalidRowResult _handle_invalid_row(
|
73 |
+
handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error:
|
74 |
+
# A negative row number means undetermined (because of parallel reading)
|
75 |
+
row_number = c_row.number if c_row.number >= 0 else None
|
76 |
+
row = InvalidRow(c_row.expected_columns, c_row.actual_columns,
|
77 |
+
row_number, frombytes(<c_string> c_row.text))
|
78 |
+
result = handler(row)
|
79 |
+
if result == 'error':
|
80 |
+
return CInvalidRowResult_Error
|
81 |
+
elif result == 'skip':
|
82 |
+
return CInvalidRowResult_Skip
|
83 |
+
else:
|
84 |
+
raise ValueError("Invalid return value for invalid row handler: "
|
85 |
+
f"expected 'error' or 'skip', got {result!r}")
|
86 |
+
|
87 |
+
|
88 |
+
cdef class ReadOptions(_Weakrefable):
|
89 |
+
"""
|
90 |
+
Options for reading CSV files.
|
91 |
+
|
92 |
+
Parameters
|
93 |
+
----------
|
94 |
+
use_threads : bool, optional (default True)
|
95 |
+
Whether to use multiple threads to accelerate reading
|
96 |
+
block_size : int, optional
|
97 |
+
How much bytes to process at a time from the input stream.
|
98 |
+
This will determine multi-threading granularity as well as
|
99 |
+
the size of individual record batches or table chunks.
|
100 |
+
Minimum valid value for block size is 1
|
101 |
+
skip_rows : int, optional (default 0)
|
102 |
+
The number of rows to skip before the column names (if any)
|
103 |
+
and the CSV data.
|
104 |
+
skip_rows_after_names : int, optional (default 0)
|
105 |
+
The number of rows to skip after the column names.
|
106 |
+
This number can be larger than the number of rows in one
|
107 |
+
block, and empty rows are counted.
|
108 |
+
The order of application is as follows:
|
109 |
+
- `skip_rows` is applied (if non-zero);
|
110 |
+
- column names are read (unless `column_names` is set);
|
111 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
112 |
+
column_names : list, optional
|
113 |
+
The column names of the target table. If empty, fall back on
|
114 |
+
`autogenerate_column_names`.
|
115 |
+
autogenerate_column_names : bool, optional (default False)
|
116 |
+
Whether to autogenerate column names if `column_names` is empty.
|
117 |
+
If true, column names will be of the form "f0", "f1"...
|
118 |
+
If false, column names will be read from the first CSV row
|
119 |
+
after `skip_rows`.
|
120 |
+
encoding : str, optional (default 'utf8')
|
121 |
+
The character encoding of the CSV data. Columns that cannot
|
122 |
+
decode using this encoding can still be read as Binary.
|
123 |
+
|
124 |
+
Examples
|
125 |
+
--------
|
126 |
+
|
127 |
+
Defining an example data:
|
128 |
+
|
129 |
+
>>> import io
|
130 |
+
>>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04"
|
131 |
+
>>> print(s)
|
132 |
+
1,2,3
|
133 |
+
Flamingo,2,2022-03-01
|
134 |
+
Horse,4,2022-03-02
|
135 |
+
Brittle stars,5,2022-03-03
|
136 |
+
Centipede,100,2022-03-04
|
137 |
+
|
138 |
+
Ignore the first numbered row and substitute it with defined
|
139 |
+
or autogenerated column names:
|
140 |
+
|
141 |
+
>>> from pyarrow import csv
|
142 |
+
>>> read_options = csv.ReadOptions(
|
143 |
+
... column_names=["animals", "n_legs", "entry"],
|
144 |
+
... skip_rows=1)
|
145 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
146 |
+
pyarrow.Table
|
147 |
+
animals: string
|
148 |
+
n_legs: int64
|
149 |
+
entry: date32[day]
|
150 |
+
----
|
151 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
152 |
+
n_legs: [[2,4,5,100]]
|
153 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
154 |
+
|
155 |
+
>>> read_options = csv.ReadOptions(autogenerate_column_names=True,
|
156 |
+
... skip_rows=1)
|
157 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
158 |
+
pyarrow.Table
|
159 |
+
f0: string
|
160 |
+
f1: int64
|
161 |
+
f2: date32[day]
|
162 |
+
----
|
163 |
+
f0: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
164 |
+
f1: [[2,4,5,100]]
|
165 |
+
f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
166 |
+
|
167 |
+
Remove the first 2 rows of the data:
|
168 |
+
|
169 |
+
>>> read_options = csv.ReadOptions(skip_rows_after_names=2)
|
170 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
171 |
+
pyarrow.Table
|
172 |
+
1: string
|
173 |
+
2: int64
|
174 |
+
3: date32[day]
|
175 |
+
----
|
176 |
+
1: [["Brittle stars","Centipede"]]
|
177 |
+
2: [[5,100]]
|
178 |
+
3: [[2022-03-03,2022-03-04]]
|
179 |
+
"""
|
180 |
+
|
181 |
+
# Avoid mistakingly creating attributes
|
182 |
+
__slots__ = ()
|
183 |
+
|
184 |
+
# __init__() is not called when unpickling, initialize storage here
|
185 |
+
def __cinit__(self, *argw, **kwargs):
|
186 |
+
self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults()))
|
187 |
+
|
188 |
+
def __init__(self, *, use_threads=None, block_size=None, skip_rows=None,
|
189 |
+
skip_rows_after_names=None, column_names=None,
|
190 |
+
autogenerate_column_names=None, encoding='utf8'):
|
191 |
+
if use_threads is not None:
|
192 |
+
self.use_threads = use_threads
|
193 |
+
if block_size is not None:
|
194 |
+
self.block_size = block_size
|
195 |
+
if skip_rows is not None:
|
196 |
+
self.skip_rows = skip_rows
|
197 |
+
if skip_rows_after_names is not None:
|
198 |
+
self.skip_rows_after_names = skip_rows_after_names
|
199 |
+
if column_names is not None:
|
200 |
+
self.column_names = column_names
|
201 |
+
if autogenerate_column_names is not None:
|
202 |
+
self.autogenerate_column_names= autogenerate_column_names
|
203 |
+
# Python-specific option
|
204 |
+
self.encoding = encoding
|
205 |
+
|
206 |
+
@property
|
207 |
+
def use_threads(self):
|
208 |
+
"""
|
209 |
+
Whether to use multiple threads to accelerate reading.
|
210 |
+
"""
|
211 |
+
return deref(self.options).use_threads
|
212 |
+
|
213 |
+
@use_threads.setter
|
214 |
+
def use_threads(self, value):
|
215 |
+
deref(self.options).use_threads = value
|
216 |
+
|
217 |
+
@property
|
218 |
+
def block_size(self):
|
219 |
+
"""
|
220 |
+
How much bytes to process at a time from the input stream.
|
221 |
+
This will determine multi-threading granularity as well as
|
222 |
+
the size of individual record batches or table chunks.
|
223 |
+
"""
|
224 |
+
return deref(self.options).block_size
|
225 |
+
|
226 |
+
@block_size.setter
|
227 |
+
def block_size(self, value):
|
228 |
+
deref(self.options).block_size = value
|
229 |
+
|
230 |
+
@property
|
231 |
+
def skip_rows(self):
|
232 |
+
"""
|
233 |
+
The number of rows to skip before the column names (if any)
|
234 |
+
and the CSV data.
|
235 |
+
See `skip_rows_after_names` for interaction description
|
236 |
+
"""
|
237 |
+
return deref(self.options).skip_rows
|
238 |
+
|
239 |
+
@skip_rows.setter
|
240 |
+
def skip_rows(self, value):
|
241 |
+
deref(self.options).skip_rows = value
|
242 |
+
|
243 |
+
@property
|
244 |
+
def skip_rows_after_names(self):
|
245 |
+
"""
|
246 |
+
The number of rows to skip after the column names.
|
247 |
+
This number can be larger than the number of rows in one
|
248 |
+
block, and empty rows are counted.
|
249 |
+
The order of application is as follows:
|
250 |
+
- `skip_rows` is applied (if non-zero);
|
251 |
+
- column names are read (unless `column_names` is set);
|
252 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
253 |
+
"""
|
254 |
+
return deref(self.options).skip_rows_after_names
|
255 |
+
|
256 |
+
@skip_rows_after_names.setter
|
257 |
+
def skip_rows_after_names(self, value):
|
258 |
+
deref(self.options).skip_rows_after_names = value
|
259 |
+
|
260 |
+
@property
|
261 |
+
def column_names(self):
|
262 |
+
"""
|
263 |
+
The column names of the target table. If empty, fall back on
|
264 |
+
`autogenerate_column_names`.
|
265 |
+
"""
|
266 |
+
return [frombytes(s) for s in deref(self.options).column_names]
|
267 |
+
|
268 |
+
@column_names.setter
|
269 |
+
def column_names(self, value):
|
270 |
+
deref(self.options).column_names.clear()
|
271 |
+
for item in value:
|
272 |
+
deref(self.options).column_names.push_back(tobytes(item))
|
273 |
+
|
274 |
+
@property
|
275 |
+
def autogenerate_column_names(self):
|
276 |
+
"""
|
277 |
+
Whether to autogenerate column names if `column_names` is empty.
|
278 |
+
If true, column names will be of the form "f0", "f1"...
|
279 |
+
If false, column names will be read from the first CSV row
|
280 |
+
after `skip_rows`.
|
281 |
+
"""
|
282 |
+
return deref(self.options).autogenerate_column_names
|
283 |
+
|
284 |
+
@autogenerate_column_names.setter
|
285 |
+
def autogenerate_column_names(self, value):
|
286 |
+
deref(self.options).autogenerate_column_names = value
|
287 |
+
|
288 |
+
def validate(self):
|
289 |
+
check_status(deref(self.options).Validate())
|
290 |
+
|
291 |
+
def equals(self, ReadOptions other):
|
292 |
+
"""
|
293 |
+
Parameters
|
294 |
+
----------
|
295 |
+
other : pyarrow.csv.ReadOptions
|
296 |
+
|
297 |
+
Returns
|
298 |
+
-------
|
299 |
+
bool
|
300 |
+
"""
|
301 |
+
return (
|
302 |
+
self.use_threads == other.use_threads and
|
303 |
+
self.block_size == other.block_size and
|
304 |
+
self.skip_rows == other.skip_rows and
|
305 |
+
self.skip_rows_after_names == other.skip_rows_after_names and
|
306 |
+
self.column_names == other.column_names and
|
307 |
+
self.autogenerate_column_names ==
|
308 |
+
other.autogenerate_column_names and
|
309 |
+
self.encoding == other.encoding
|
310 |
+
)
|
311 |
+
|
312 |
+
@staticmethod
|
313 |
+
cdef ReadOptions wrap(CCSVReadOptions options):
|
314 |
+
out = ReadOptions()
|
315 |
+
out.options.reset(new CCSVReadOptions(move(options)))
|
316 |
+
out.encoding = 'utf8' # No way to know this
|
317 |
+
return out
|
318 |
+
|
319 |
+
def __getstate__(self):
|
320 |
+
return (self.use_threads, self.block_size, self.skip_rows,
|
321 |
+
self.column_names, self.autogenerate_column_names,
|
322 |
+
self.encoding, self.skip_rows_after_names)
|
323 |
+
|
324 |
+
def __setstate__(self, state):
|
325 |
+
(self.use_threads, self.block_size, self.skip_rows,
|
326 |
+
self.column_names, self.autogenerate_column_names,
|
327 |
+
self.encoding, self.skip_rows_after_names) = state
|
328 |
+
|
329 |
+
def __eq__(self, other):
|
330 |
+
try:
|
331 |
+
return self.equals(other)
|
332 |
+
except TypeError:
|
333 |
+
return False
|
334 |
+
|
335 |
+
|
336 |
+
cdef class ParseOptions(_Weakrefable):
|
337 |
+
"""
|
338 |
+
Options for parsing CSV files.
|
339 |
+
|
340 |
+
Parameters
|
341 |
+
----------
|
342 |
+
delimiter : 1-character string, optional (default ',')
|
343 |
+
The character delimiting individual cells in the CSV data.
|
344 |
+
quote_char : 1-character string or False, optional (default '"')
|
345 |
+
The character used optionally for quoting CSV values
|
346 |
+
(False if quoting is not allowed).
|
347 |
+
double_quote : bool, optional (default True)
|
348 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
349 |
+
in the data.
|
350 |
+
escape_char : 1-character string or False, optional (default False)
|
351 |
+
The character used optionally for escaping special characters
|
352 |
+
(False if escaping is not allowed).
|
353 |
+
newlines_in_values : bool, optional (default False)
|
354 |
+
Whether newline characters are allowed in CSV values.
|
355 |
+
Setting this to True reduces the performance of multi-threaded
|
356 |
+
CSV reading.
|
357 |
+
ignore_empty_lines : bool, optional (default True)
|
358 |
+
Whether empty lines are ignored in CSV input.
|
359 |
+
If False, an empty line is interpreted as containing a single empty
|
360 |
+
value (assuming a one-column CSV file).
|
361 |
+
invalid_row_handler : callable, optional (default None)
|
362 |
+
If not None, this object is called for each CSV row that fails
|
363 |
+
parsing (because of a mismatching number of columns).
|
364 |
+
It should accept a single InvalidRow argument and return either
|
365 |
+
"skip" or "error" depending on the desired outcome.
|
366 |
+
|
367 |
+
Examples
|
368 |
+
--------
|
369 |
+
|
370 |
+
Defining an example file from bytes object:
|
371 |
+
|
372 |
+
>>> import io
|
373 |
+
>>> s = (
|
374 |
+
... "animals;n_legs;entry\\n"
|
375 |
+
... "Flamingo;2;2022-03-01\\n"
|
376 |
+
... "# Comment here:\\n"
|
377 |
+
... "Horse;4;2022-03-02\\n"
|
378 |
+
... "Brittle stars;5;2022-03-03\\n"
|
379 |
+
... "Centipede;100;2022-03-04"
|
380 |
+
... )
|
381 |
+
>>> print(s)
|
382 |
+
animals;n_legs;entry
|
383 |
+
Flamingo;2;2022-03-01
|
384 |
+
# Comment here:
|
385 |
+
Horse;4;2022-03-02
|
386 |
+
Brittle stars;5;2022-03-03
|
387 |
+
Centipede;100;2022-03-04
|
388 |
+
>>> source = io.BytesIO(s.encode())
|
389 |
+
|
390 |
+
Read the data from a file skipping rows with comments
|
391 |
+
and defining the delimiter:
|
392 |
+
|
393 |
+
>>> from pyarrow import csv
|
394 |
+
>>> def skip_comment(row):
|
395 |
+
... if row.text.startswith("# "):
|
396 |
+
... return 'skip'
|
397 |
+
... else:
|
398 |
+
... return 'error'
|
399 |
+
...
|
400 |
+
>>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment)
|
401 |
+
>>> csv.read_csv(source, parse_options=parse_options)
|
402 |
+
pyarrow.Table
|
403 |
+
animals: string
|
404 |
+
n_legs: int64
|
405 |
+
entry: date32[day]
|
406 |
+
----
|
407 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
408 |
+
n_legs: [[2,4,5,100]]
|
409 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
410 |
+
"""
|
411 |
+
__slots__ = ()
|
412 |
+
|
413 |
+
def __cinit__(self, *argw, **kwargs):
|
414 |
+
self._invalid_row_handler = None
|
415 |
+
self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults()))
|
416 |
+
|
417 |
+
def __init__(self, *, delimiter=None, quote_char=None, double_quote=None,
|
418 |
+
escape_char=None, newlines_in_values=None,
|
419 |
+
ignore_empty_lines=None, invalid_row_handler=None):
|
420 |
+
if delimiter is not None:
|
421 |
+
self.delimiter = delimiter
|
422 |
+
if quote_char is not None:
|
423 |
+
self.quote_char = quote_char
|
424 |
+
if double_quote is not None:
|
425 |
+
self.double_quote = double_quote
|
426 |
+
if escape_char is not None:
|
427 |
+
self.escape_char = escape_char
|
428 |
+
if newlines_in_values is not None:
|
429 |
+
self.newlines_in_values = newlines_in_values
|
430 |
+
if ignore_empty_lines is not None:
|
431 |
+
self.ignore_empty_lines = ignore_empty_lines
|
432 |
+
if invalid_row_handler is not None:
|
433 |
+
self.invalid_row_handler = invalid_row_handler
|
434 |
+
|
435 |
+
@property
|
436 |
+
def delimiter(self):
|
437 |
+
"""
|
438 |
+
The character delimiting individual cells in the CSV data.
|
439 |
+
"""
|
440 |
+
return chr(deref(self.options).delimiter)
|
441 |
+
|
442 |
+
@delimiter.setter
|
443 |
+
def delimiter(self, value):
|
444 |
+
deref(self.options).delimiter = _single_char(value)
|
445 |
+
|
446 |
+
@property
|
447 |
+
def quote_char(self):
|
448 |
+
"""
|
449 |
+
The character used optionally for quoting CSV values
|
450 |
+
(False if quoting is not allowed).
|
451 |
+
"""
|
452 |
+
if deref(self.options).quoting:
|
453 |
+
return chr(deref(self.options).quote_char)
|
454 |
+
else:
|
455 |
+
return False
|
456 |
+
|
457 |
+
@quote_char.setter
|
458 |
+
def quote_char(self, value):
|
459 |
+
if value is False:
|
460 |
+
deref(self.options).quoting = False
|
461 |
+
else:
|
462 |
+
deref(self.options).quote_char = _single_char(value)
|
463 |
+
deref(self.options).quoting = True
|
464 |
+
|
465 |
+
@property
|
466 |
+
def double_quote(self):
|
467 |
+
"""
|
468 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
469 |
+
in the data.
|
470 |
+
"""
|
471 |
+
return deref(self.options).double_quote
|
472 |
+
|
473 |
+
@double_quote.setter
|
474 |
+
def double_quote(self, value):
|
475 |
+
deref(self.options).double_quote = value
|
476 |
+
|
477 |
+
@property
|
478 |
+
def escape_char(self):
|
479 |
+
"""
|
480 |
+
The character used optionally for escaping special characters
|
481 |
+
(False if escaping is not allowed).
|
482 |
+
"""
|
483 |
+
if deref(self.options).escaping:
|
484 |
+
return chr(deref(self.options).escape_char)
|
485 |
+
else:
|
486 |
+
return False
|
487 |
+
|
488 |
+
@escape_char.setter
|
489 |
+
def escape_char(self, value):
|
490 |
+
if value is False:
|
491 |
+
deref(self.options).escaping = False
|
492 |
+
else:
|
493 |
+
deref(self.options).escape_char = _single_char(value)
|
494 |
+
deref(self.options).escaping = True
|
495 |
+
|
496 |
+
@property
|
497 |
+
def newlines_in_values(self):
|
498 |
+
"""
|
499 |
+
Whether newline characters are allowed in CSV values.
|
500 |
+
Setting this to True reduces the performance of multi-threaded
|
501 |
+
CSV reading.
|
502 |
+
"""
|
503 |
+
return deref(self.options).newlines_in_values
|
504 |
+
|
505 |
+
@newlines_in_values.setter
|
506 |
+
def newlines_in_values(self, value):
|
507 |
+
deref(self.options).newlines_in_values = value
|
508 |
+
|
509 |
+
@property
|
510 |
+
def ignore_empty_lines(self):
|
511 |
+
"""
|
512 |
+
Whether empty lines are ignored in CSV input.
|
513 |
+
If False, an empty line is interpreted as containing a single empty
|
514 |
+
value (assuming a one-column CSV file).
|
515 |
+
"""
|
516 |
+
return deref(self.options).ignore_empty_lines
|
517 |
+
|
518 |
+
@property
|
519 |
+
def invalid_row_handler(self):
|
520 |
+
"""
|
521 |
+
Optional handler for invalid rows.
|
522 |
+
|
523 |
+
If not None, this object is called for each CSV row that fails
|
524 |
+
parsing (because of a mismatching number of columns).
|
525 |
+
It should accept a single InvalidRow argument and return either
|
526 |
+
"skip" or "error" depending on the desired outcome.
|
527 |
+
"""
|
528 |
+
return self._invalid_row_handler
|
529 |
+
|
530 |
+
@invalid_row_handler.setter
|
531 |
+
def invalid_row_handler(self, value):
|
532 |
+
if value is not None and not callable(value):
|
533 |
+
raise TypeError("Expected callable or None, "
|
534 |
+
f"got instance of {type(value)!r}")
|
535 |
+
self._invalid_row_handler = value
|
536 |
+
deref(self.options).invalid_row_handler = MakeInvalidRowHandler(
|
537 |
+
<function[PyInvalidRowCallback]> &_handle_invalid_row, value)
|
538 |
+
|
539 |
+
@ignore_empty_lines.setter
|
540 |
+
def ignore_empty_lines(self, value):
|
541 |
+
deref(self.options).ignore_empty_lines = value
|
542 |
+
|
543 |
+
def validate(self):
|
544 |
+
check_status(deref(self.options).Validate())
|
545 |
+
|
546 |
+
def equals(self, ParseOptions other):
|
547 |
+
"""
|
548 |
+
Parameters
|
549 |
+
----------
|
550 |
+
other : pyarrow.csv.ParseOptions
|
551 |
+
|
552 |
+
Returns
|
553 |
+
-------
|
554 |
+
bool
|
555 |
+
"""
|
556 |
+
return (
|
557 |
+
self.delimiter == other.delimiter and
|
558 |
+
self.quote_char == other.quote_char and
|
559 |
+
self.double_quote == other.double_quote and
|
560 |
+
self.escape_char == other.escape_char and
|
561 |
+
self.newlines_in_values == other.newlines_in_values and
|
562 |
+
self.ignore_empty_lines == other.ignore_empty_lines and
|
563 |
+
self._invalid_row_handler == other._invalid_row_handler
|
564 |
+
)
|
565 |
+
|
566 |
+
@staticmethod
|
567 |
+
cdef ParseOptions wrap(CCSVParseOptions options):
|
568 |
+
out = ParseOptions()
|
569 |
+
out.options.reset(new CCSVParseOptions(move(options)))
|
570 |
+
return out
|
571 |
+
|
572 |
+
def __getstate__(self):
|
573 |
+
return (self.delimiter, self.quote_char, self.double_quote,
|
574 |
+
self.escape_char, self.newlines_in_values,
|
575 |
+
self.ignore_empty_lines, self.invalid_row_handler)
|
576 |
+
|
577 |
+
def __setstate__(self, state):
|
578 |
+
(self.delimiter, self.quote_char, self.double_quote,
|
579 |
+
self.escape_char, self.newlines_in_values,
|
580 |
+
self.ignore_empty_lines, self.invalid_row_handler) = state
|
581 |
+
|
582 |
+
def __eq__(self, other):
|
583 |
+
try:
|
584 |
+
return self.equals(other)
|
585 |
+
except TypeError:
|
586 |
+
return False
|
587 |
+
|
588 |
+
|
589 |
+
cdef class _ISO8601(_Weakrefable):
|
590 |
+
"""
|
591 |
+
A special object indicating ISO-8601 parsing.
|
592 |
+
"""
|
593 |
+
__slots__ = ()
|
594 |
+
|
595 |
+
def __str__(self):
|
596 |
+
return 'ISO8601'
|
597 |
+
|
598 |
+
def __eq__(self, other):
|
599 |
+
return isinstance(other, _ISO8601)
|
600 |
+
|
601 |
+
|
602 |
+
ISO8601 = _ISO8601()
|
603 |
+
|
604 |
+
|
605 |
+
cdef class ConvertOptions(_Weakrefable):
|
606 |
+
"""
|
607 |
+
Options for converting CSV data.
|
608 |
+
|
609 |
+
Parameters
|
610 |
+
----------
|
611 |
+
check_utf8 : bool, optional (default True)
|
612 |
+
Whether to check UTF8 validity of string columns.
|
613 |
+
column_types : pyarrow.Schema or dict, optional
|
614 |
+
Explicitly map column names to column types. Passing this argument
|
615 |
+
disables type inference on the defined columns.
|
616 |
+
null_values : list, optional
|
617 |
+
A sequence of strings that denote nulls in the data
|
618 |
+
(defaults are appropriate in most cases). Note that by default,
|
619 |
+
string columns are not checked for null values. To enable
|
620 |
+
null checking for those, specify ``strings_can_be_null=True``.
|
621 |
+
true_values : list, optional
|
622 |
+
A sequence of strings that denote true booleans in the data
|
623 |
+
(defaults are appropriate in most cases).
|
624 |
+
false_values : list, optional
|
625 |
+
A sequence of strings that denote false booleans in the data
|
626 |
+
(defaults are appropriate in most cases).
|
627 |
+
decimal_point : 1-character string, optional (default '.')
|
628 |
+
The character used as decimal point in floating-point and decimal
|
629 |
+
data.
|
630 |
+
strings_can_be_null : bool, optional (default False)
|
631 |
+
Whether string / binary columns can have null values.
|
632 |
+
If true, then strings in null_values are considered null for
|
633 |
+
string columns.
|
634 |
+
If false, then all strings are valid string values.
|
635 |
+
quoted_strings_can_be_null : bool, optional (default True)
|
636 |
+
Whether quoted values can be null.
|
637 |
+
If true, then strings in "null_values" are also considered null
|
638 |
+
when they appear quoted in the CSV file. Otherwise, quoted values
|
639 |
+
are never considered null.
|
640 |
+
include_columns : list, optional
|
641 |
+
The names of columns to include in the Table.
|
642 |
+
If empty, the Table will include all columns from the CSV file.
|
643 |
+
If not empty, only these columns will be included, in this order.
|
644 |
+
include_missing_columns : bool, optional (default False)
|
645 |
+
If false, columns in `include_columns` but not in the CSV file will
|
646 |
+
error out.
|
647 |
+
If true, columns in `include_columns` but not in the CSV file will
|
648 |
+
produce a column of nulls (whose type is selected using
|
649 |
+
`column_types`, or null by default).
|
650 |
+
This option is ignored if `include_columns` is empty.
|
651 |
+
auto_dict_encode : bool, optional (default False)
|
652 |
+
Whether to try to automatically dict-encode string / binary data.
|
653 |
+
If true, then when type inference detects a string or binary column,
|
654 |
+
it it dict-encoded up to `auto_dict_max_cardinality` distinct values
|
655 |
+
(per chunk), after which it switches to regular encoding.
|
656 |
+
This setting is ignored for non-inferred columns (those in
|
657 |
+
`column_types`).
|
658 |
+
auto_dict_max_cardinality : int, optional
|
659 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
660 |
+
This value is per chunk.
|
661 |
+
timestamp_parsers : list, optional
|
662 |
+
A sequence of strptime()-compatible format strings, tried in order
|
663 |
+
when attempting to infer or convert timestamp values (the special
|
664 |
+
value ISO8601() can also be given). By default, a fast built-in
|
665 |
+
ISO-8601 parser is used.
|
666 |
+
|
667 |
+
Examples
|
668 |
+
--------
|
669 |
+
|
670 |
+
Defining an example data:
|
671 |
+
|
672 |
+
>>> import io
|
673 |
+
>>> s = (
|
674 |
+
... "animals,n_legs,entry,fast\\n"
|
675 |
+
... "Flamingo,2,01/03/2022,Yes\\n"
|
676 |
+
... "Horse,4,02/03/2022,Yes\\n"
|
677 |
+
... "Brittle stars,5,03/03/2022,No\\n"
|
678 |
+
... "Centipede,100,04/03/2022,No\\n"
|
679 |
+
... ",6,05/03/2022,"
|
680 |
+
... )
|
681 |
+
>>> print(s)
|
682 |
+
animals,n_legs,entry,fast
|
683 |
+
Flamingo,2,01/03/2022,Yes
|
684 |
+
Horse,4,02/03/2022,Yes
|
685 |
+
Brittle stars,5,03/03/2022,No
|
686 |
+
Centipede,100,04/03/2022,No
|
687 |
+
,6,05/03/2022,
|
688 |
+
|
689 |
+
Change the type of a column:
|
690 |
+
|
691 |
+
>>> import pyarrow as pa
|
692 |
+
>>> from pyarrow import csv
|
693 |
+
>>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()})
|
694 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
695 |
+
pyarrow.Table
|
696 |
+
animals: string
|
697 |
+
n_legs: double
|
698 |
+
entry: string
|
699 |
+
fast: string
|
700 |
+
----
|
701 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
702 |
+
n_legs: [[2,4,5,100,6]]
|
703 |
+
entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]]
|
704 |
+
fast: [["Yes","Yes","No","No",""]]
|
705 |
+
|
706 |
+
Define a date parsing format to get a timestamp type column
|
707 |
+
(in case dates are not in ISO format and not converted by default):
|
708 |
+
|
709 |
+
>>> convert_options = csv.ConvertOptions(
|
710 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"])
|
711 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
712 |
+
pyarrow.Table
|
713 |
+
animals: string
|
714 |
+
n_legs: int64
|
715 |
+
entry: timestamp[s]
|
716 |
+
fast: string
|
717 |
+
----
|
718 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
719 |
+
n_legs: [[2,4,5,100,6]]
|
720 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
721 |
+
fast: [["Yes","Yes","No","No",""]]
|
722 |
+
|
723 |
+
Specify a subset of columns to be read:
|
724 |
+
|
725 |
+
>>> convert_options = csv.ConvertOptions(
|
726 |
+
... include_columns=["animals", "n_legs"])
|
727 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
728 |
+
pyarrow.Table
|
729 |
+
animals: string
|
730 |
+
n_legs: int64
|
731 |
+
----
|
732 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
733 |
+
n_legs: [[2,4,5,100,6]]
|
734 |
+
|
735 |
+
List additional column to be included as a null typed column:
|
736 |
+
|
737 |
+
>>> convert_options = csv.ConvertOptions(
|
738 |
+
... include_columns=["animals", "n_legs", "location"],
|
739 |
+
... include_missing_columns=True)
|
740 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
741 |
+
pyarrow.Table
|
742 |
+
animals: string
|
743 |
+
n_legs: int64
|
744 |
+
location: null
|
745 |
+
----
|
746 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
747 |
+
n_legs: [[2,4,5,100,6]]
|
748 |
+
location: [5 nulls]
|
749 |
+
|
750 |
+
Define columns as dictionary type (by default only the
|
751 |
+
string/binary columns are dictionary encoded):
|
752 |
+
|
753 |
+
>>> convert_options = csv.ConvertOptions(
|
754 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"],
|
755 |
+
... auto_dict_encode=True)
|
756 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
757 |
+
pyarrow.Table
|
758 |
+
animals: dictionary<values=string, indices=int32, ordered=0>
|
759 |
+
n_legs: int64
|
760 |
+
entry: timestamp[s]
|
761 |
+
fast: dictionary<values=string, indices=int32, ordered=0>
|
762 |
+
----
|
763 |
+
animals: [ -- dictionary:
|
764 |
+
["Flamingo","Horse","Brittle stars","Centipede",""] -- indices:
|
765 |
+
[0,1,2,3,4]]
|
766 |
+
n_legs: [[2,4,5,100,6]]
|
767 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
768 |
+
fast: [ -- dictionary:
|
769 |
+
["Yes","No",""] -- indices:
|
770 |
+
[0,0,1,1,2]]
|
771 |
+
|
772 |
+
Set upper limit for the number of categories. If the categories
|
773 |
+
is more than the limit, the conversion to dictionary will not
|
774 |
+
happen:
|
775 |
+
|
776 |
+
>>> convert_options = csv.ConvertOptions(
|
777 |
+
... include_columns=["animals"],
|
778 |
+
... auto_dict_encode=True,
|
779 |
+
... auto_dict_max_cardinality=2)
|
780 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
781 |
+
pyarrow.Table
|
782 |
+
animals: string
|
783 |
+
----
|
784 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
785 |
+
|
786 |
+
Set empty strings to missing values:
|
787 |
+
|
788 |
+
>>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"],
|
789 |
+
... strings_can_be_null=True)
|
790 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
791 |
+
pyarrow.Table
|
792 |
+
animals: string
|
793 |
+
n_legs: int64
|
794 |
+
----
|
795 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",null]]
|
796 |
+
n_legs: [[2,4,5,100,6]]
|
797 |
+
|
798 |
+
Define values to be True and False when converting a column
|
799 |
+
into a bool type:
|
800 |
+
|
801 |
+
>>> convert_options = csv.ConvertOptions(
|
802 |
+
... include_columns=["fast"],
|
803 |
+
... false_values=["No"],
|
804 |
+
... true_values=["Yes"])
|
805 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
806 |
+
pyarrow.Table
|
807 |
+
fast: bool
|
808 |
+
----
|
809 |
+
fast: [[true,true,false,false,null]]
|
810 |
+
"""
|
811 |
+
|
812 |
+
# Avoid mistakingly creating attributes
|
813 |
+
__slots__ = ()
|
814 |
+
|
815 |
+
def __cinit__(self, *argw, **kwargs):
|
816 |
+
self.options.reset(
|
817 |
+
new CCSVConvertOptions(CCSVConvertOptions.Defaults()))
|
818 |
+
|
819 |
+
def __init__(self, *, check_utf8=None, column_types=None, null_values=None,
|
820 |
+
true_values=None, false_values=None, decimal_point=None,
|
821 |
+
strings_can_be_null=None, quoted_strings_can_be_null=None,
|
822 |
+
include_columns=None, include_missing_columns=None,
|
823 |
+
auto_dict_encode=None, auto_dict_max_cardinality=None,
|
824 |
+
timestamp_parsers=None):
|
825 |
+
if check_utf8 is not None:
|
826 |
+
self.check_utf8 = check_utf8
|
827 |
+
if column_types is not None:
|
828 |
+
self.column_types = column_types
|
829 |
+
if null_values is not None:
|
830 |
+
self.null_values = null_values
|
831 |
+
if true_values is not None:
|
832 |
+
self.true_values = true_values
|
833 |
+
if false_values is not None:
|
834 |
+
self.false_values = false_values
|
835 |
+
if decimal_point is not None:
|
836 |
+
self.decimal_point = decimal_point
|
837 |
+
if strings_can_be_null is not None:
|
838 |
+
self.strings_can_be_null = strings_can_be_null
|
839 |
+
if quoted_strings_can_be_null is not None:
|
840 |
+
self.quoted_strings_can_be_null = quoted_strings_can_be_null
|
841 |
+
if include_columns is not None:
|
842 |
+
self.include_columns = include_columns
|
843 |
+
if include_missing_columns is not None:
|
844 |
+
self.include_missing_columns = include_missing_columns
|
845 |
+
if auto_dict_encode is not None:
|
846 |
+
self.auto_dict_encode = auto_dict_encode
|
847 |
+
if auto_dict_max_cardinality is not None:
|
848 |
+
self.auto_dict_max_cardinality = auto_dict_max_cardinality
|
849 |
+
if timestamp_parsers is not None:
|
850 |
+
self.timestamp_parsers = timestamp_parsers
|
851 |
+
|
852 |
+
@property
|
853 |
+
def check_utf8(self):
|
854 |
+
"""
|
855 |
+
Whether to check UTF8 validity of string columns.
|
856 |
+
"""
|
857 |
+
return deref(self.options).check_utf8
|
858 |
+
|
859 |
+
@check_utf8.setter
|
860 |
+
def check_utf8(self, value):
|
861 |
+
deref(self.options).check_utf8 = value
|
862 |
+
|
863 |
+
@property
|
864 |
+
def strings_can_be_null(self):
|
865 |
+
"""
|
866 |
+
Whether string / binary columns can have null values.
|
867 |
+
"""
|
868 |
+
return deref(self.options).strings_can_be_null
|
869 |
+
|
870 |
+
@strings_can_be_null.setter
|
871 |
+
def strings_can_be_null(self, value):
|
872 |
+
deref(self.options).strings_can_be_null = value
|
873 |
+
|
874 |
+
@property
|
875 |
+
def quoted_strings_can_be_null(self):
|
876 |
+
"""
|
877 |
+
Whether quoted values can be null.
|
878 |
+
"""
|
879 |
+
return deref(self.options).quoted_strings_can_be_null
|
880 |
+
|
881 |
+
@quoted_strings_can_be_null.setter
|
882 |
+
def quoted_strings_can_be_null(self, value):
|
883 |
+
deref(self.options).quoted_strings_can_be_null = value
|
884 |
+
|
885 |
+
@property
|
886 |
+
def column_types(self):
|
887 |
+
"""
|
888 |
+
Explicitly map column names to column types.
|
889 |
+
"""
|
890 |
+
d = {frombytes(item.first): pyarrow_wrap_data_type(item.second)
|
891 |
+
for item in deref(self.options).column_types}
|
892 |
+
return d
|
893 |
+
|
894 |
+
@column_types.setter
|
895 |
+
def column_types(self, value):
|
896 |
+
cdef:
|
897 |
+
shared_ptr[CDataType] typ
|
898 |
+
|
899 |
+
if isinstance(value, Mapping):
|
900 |
+
value = value.items()
|
901 |
+
|
902 |
+
deref(self.options).column_types.clear()
|
903 |
+
for item in value:
|
904 |
+
if isinstance(item, Field):
|
905 |
+
k = item.name
|
906 |
+
v = item.type
|
907 |
+
else:
|
908 |
+
k, v = item
|
909 |
+
typ = pyarrow_unwrap_data_type(ensure_type(v))
|
910 |
+
assert typ != NULL
|
911 |
+
deref(self.options).column_types[tobytes(k)] = typ
|
912 |
+
|
913 |
+
@property
|
914 |
+
def null_values(self):
|
915 |
+
"""
|
916 |
+
A sequence of strings that denote nulls in the data.
|
917 |
+
"""
|
918 |
+
return [frombytes(x) for x in deref(self.options).null_values]
|
919 |
+
|
920 |
+
@null_values.setter
|
921 |
+
def null_values(self, value):
|
922 |
+
deref(self.options).null_values = [tobytes(x) for x in value]
|
923 |
+
|
924 |
+
@property
|
925 |
+
def true_values(self):
|
926 |
+
"""
|
927 |
+
A sequence of strings that denote true booleans in the data.
|
928 |
+
"""
|
929 |
+
return [frombytes(x) for x in deref(self.options).true_values]
|
930 |
+
|
931 |
+
@true_values.setter
|
932 |
+
def true_values(self, value):
|
933 |
+
deref(self.options).true_values = [tobytes(x) for x in value]
|
934 |
+
|
935 |
+
@property
|
936 |
+
def false_values(self):
|
937 |
+
"""
|
938 |
+
A sequence of strings that denote false booleans in the data.
|
939 |
+
"""
|
940 |
+
return [frombytes(x) for x in deref(self.options).false_values]
|
941 |
+
|
942 |
+
@false_values.setter
|
943 |
+
def false_values(self, value):
|
944 |
+
deref(self.options).false_values = [tobytes(x) for x in value]
|
945 |
+
|
946 |
+
@property
|
947 |
+
def decimal_point(self):
|
948 |
+
"""
|
949 |
+
The character used as decimal point in floating-point and decimal
|
950 |
+
data.
|
951 |
+
"""
|
952 |
+
return chr(deref(self.options).decimal_point)
|
953 |
+
|
954 |
+
@decimal_point.setter
|
955 |
+
def decimal_point(self, value):
|
956 |
+
deref(self.options).decimal_point = _single_char(value)
|
957 |
+
|
958 |
+
@property
|
959 |
+
def auto_dict_encode(self):
|
960 |
+
"""
|
961 |
+
Whether to try to automatically dict-encode string / binary data.
|
962 |
+
"""
|
963 |
+
return deref(self.options).auto_dict_encode
|
964 |
+
|
965 |
+
@auto_dict_encode.setter
|
966 |
+
def auto_dict_encode(self, value):
|
967 |
+
deref(self.options).auto_dict_encode = value
|
968 |
+
|
969 |
+
@property
|
970 |
+
def auto_dict_max_cardinality(self):
|
971 |
+
"""
|
972 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
973 |
+
|
974 |
+
This value is per chunk.
|
975 |
+
"""
|
976 |
+
return deref(self.options).auto_dict_max_cardinality
|
977 |
+
|
978 |
+
@auto_dict_max_cardinality.setter
|
979 |
+
def auto_dict_max_cardinality(self, value):
|
980 |
+
deref(self.options).auto_dict_max_cardinality = value
|
981 |
+
|
982 |
+
@property
|
983 |
+
def include_columns(self):
|
984 |
+
"""
|
985 |
+
The names of columns to include in the Table.
|
986 |
+
|
987 |
+
If empty, the Table will include all columns from the CSV file.
|
988 |
+
If not empty, only these columns will be included, in this order.
|
989 |
+
"""
|
990 |
+
return [frombytes(s) for s in deref(self.options).include_columns]
|
991 |
+
|
992 |
+
@include_columns.setter
|
993 |
+
def include_columns(self, value):
|
994 |
+
deref(self.options).include_columns.clear()
|
995 |
+
for item in value:
|
996 |
+
deref(self.options).include_columns.push_back(tobytes(item))
|
997 |
+
|
998 |
+
@property
|
999 |
+
def include_missing_columns(self):
|
1000 |
+
"""
|
1001 |
+
If false, columns in `include_columns` but not in the CSV file will
|
1002 |
+
error out.
|
1003 |
+
If true, columns in `include_columns` but not in the CSV file will
|
1004 |
+
produce a null column (whose type is selected using `column_types`,
|
1005 |
+
or null by default).
|
1006 |
+
This option is ignored if `include_columns` is empty.
|
1007 |
+
"""
|
1008 |
+
return deref(self.options).include_missing_columns
|
1009 |
+
|
1010 |
+
@include_missing_columns.setter
|
1011 |
+
def include_missing_columns(self, value):
|
1012 |
+
deref(self.options).include_missing_columns = value
|
1013 |
+
|
1014 |
+
@property
|
1015 |
+
def timestamp_parsers(self):
|
1016 |
+
"""
|
1017 |
+
A sequence of strptime()-compatible format strings, tried in order
|
1018 |
+
when attempting to infer or convert timestamp values (the special
|
1019 |
+
value ISO8601() can also be given). By default, a fast built-in
|
1020 |
+
ISO-8601 parser is used.
|
1021 |
+
"""
|
1022 |
+
cdef:
|
1023 |
+
shared_ptr[CTimestampParser] c_parser
|
1024 |
+
c_string kind
|
1025 |
+
|
1026 |
+
parsers = []
|
1027 |
+
for c_parser in deref(self.options).timestamp_parsers:
|
1028 |
+
kind = deref(c_parser).kind()
|
1029 |
+
if kind == b'strptime':
|
1030 |
+
parsers.append(frombytes(deref(c_parser).format()))
|
1031 |
+
else:
|
1032 |
+
assert kind == b'iso8601'
|
1033 |
+
parsers.append(ISO8601)
|
1034 |
+
|
1035 |
+
return parsers
|
1036 |
+
|
1037 |
+
@timestamp_parsers.setter
|
1038 |
+
def timestamp_parsers(self, value):
|
1039 |
+
cdef:
|
1040 |
+
vector[shared_ptr[CTimestampParser]] c_parsers
|
1041 |
+
|
1042 |
+
for v in value:
|
1043 |
+
if isinstance(v, str):
|
1044 |
+
c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v)))
|
1045 |
+
elif v == ISO8601:
|
1046 |
+
c_parsers.push_back(CTimestampParser.MakeISO8601())
|
1047 |
+
else:
|
1048 |
+
raise TypeError("Expected list of str or ISO8601 objects")
|
1049 |
+
|
1050 |
+
deref(self.options).timestamp_parsers = move(c_parsers)
|
1051 |
+
|
1052 |
+
@staticmethod
|
1053 |
+
cdef ConvertOptions wrap(CCSVConvertOptions options):
|
1054 |
+
out = ConvertOptions()
|
1055 |
+
out.options.reset(new CCSVConvertOptions(move(options)))
|
1056 |
+
return out
|
1057 |
+
|
1058 |
+
def validate(self):
|
1059 |
+
check_status(deref(self.options).Validate())
|
1060 |
+
|
1061 |
+
def equals(self, ConvertOptions other):
|
1062 |
+
"""
|
1063 |
+
Parameters
|
1064 |
+
----------
|
1065 |
+
other : pyarrow.csv.ConvertOptions
|
1066 |
+
|
1067 |
+
Returns
|
1068 |
+
-------
|
1069 |
+
bool
|
1070 |
+
"""
|
1071 |
+
return (
|
1072 |
+
self.check_utf8 == other.check_utf8 and
|
1073 |
+
self.column_types == other.column_types and
|
1074 |
+
self.null_values == other.null_values and
|
1075 |
+
self.true_values == other.true_values and
|
1076 |
+
self.false_values == other.false_values and
|
1077 |
+
self.decimal_point == other.decimal_point and
|
1078 |
+
self.timestamp_parsers == other.timestamp_parsers and
|
1079 |
+
self.strings_can_be_null == other.strings_can_be_null and
|
1080 |
+
self.quoted_strings_can_be_null ==
|
1081 |
+
other.quoted_strings_can_be_null and
|
1082 |
+
self.auto_dict_encode == other.auto_dict_encode and
|
1083 |
+
self.auto_dict_max_cardinality ==
|
1084 |
+
other.auto_dict_max_cardinality and
|
1085 |
+
self.include_columns == other.include_columns and
|
1086 |
+
self.include_missing_columns == other.include_missing_columns
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
def __getstate__(self):
|
1090 |
+
return (self.check_utf8, self.column_types, self.null_values,
|
1091 |
+
self.true_values, self.false_values, self.decimal_point,
|
1092 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
1093 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
1094 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
1095 |
+
self.include_missing_columns)
|
1096 |
+
|
1097 |
+
def __setstate__(self, state):
|
1098 |
+
(self.check_utf8, self.column_types, self.null_values,
|
1099 |
+
self.true_values, self.false_values, self.decimal_point,
|
1100 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
1101 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
1102 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
1103 |
+
self.include_missing_columns) = state
|
1104 |
+
|
1105 |
+
def __eq__(self, other):
|
1106 |
+
try:
|
1107 |
+
return self.equals(other)
|
1108 |
+
except TypeError:
|
1109 |
+
return False
|
1110 |
+
|
1111 |
+
|
1112 |
+
cdef _get_reader(input_file, ReadOptions read_options,
|
1113 |
+
shared_ptr[CInputStream]* out):
|
1114 |
+
use_memory_map = False
|
1115 |
+
get_input_stream(input_file, use_memory_map, out)
|
1116 |
+
if read_options is not None:
|
1117 |
+
out[0] = native_transcoding_input_stream(out[0],
|
1118 |
+
read_options.encoding,
|
1119 |
+
'utf8')
|
1120 |
+
|
1121 |
+
|
1122 |
+
cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out):
|
1123 |
+
if read_options is None:
|
1124 |
+
out[0] = CCSVReadOptions.Defaults()
|
1125 |
+
else:
|
1126 |
+
out[0] = deref(read_options.options)
|
1127 |
+
|
1128 |
+
|
1129 |
+
cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out):
|
1130 |
+
if parse_options is None:
|
1131 |
+
out[0] = CCSVParseOptions.Defaults()
|
1132 |
+
else:
|
1133 |
+
out[0] = deref(parse_options.options)
|
1134 |
+
|
1135 |
+
|
1136 |
+
cdef _get_convert_options(ConvertOptions convert_options,
|
1137 |
+
CCSVConvertOptions* out):
|
1138 |
+
if convert_options is None:
|
1139 |
+
out[0] = CCSVConvertOptions.Defaults()
|
1140 |
+
else:
|
1141 |
+
out[0] = deref(convert_options.options)
|
1142 |
+
|
1143 |
+
|
1144 |
+
cdef class CSVStreamingReader(RecordBatchReader):
|
1145 |
+
"""An object that reads record batches incrementally from a CSV file.
|
1146 |
+
|
1147 |
+
Should not be instantiated directly by user code.
|
1148 |
+
"""
|
1149 |
+
cdef readonly:
|
1150 |
+
Schema schema
|
1151 |
+
|
1152 |
+
def __init__(self):
|
1153 |
+
raise TypeError("Do not call {}'s constructor directly, "
|
1154 |
+
"use pyarrow.csv.open_csv() instead."
|
1155 |
+
.format(self.__class__.__name__))
|
1156 |
+
|
1157 |
+
# Note about cancellation: we cannot create a SignalStopHandler
|
1158 |
+
# by default here, as several CSVStreamingReader instances may be
|
1159 |
+
# created (including by the same thread). Handling cancellation
|
1160 |
+
# would require having the user pass the SignalStopHandler.
|
1161 |
+
# (in addition to solving ARROW-11853)
|
1162 |
+
|
1163 |
+
cdef _open(self, shared_ptr[CInputStream] stream,
|
1164 |
+
CCSVReadOptions c_read_options,
|
1165 |
+
CCSVParseOptions c_parse_options,
|
1166 |
+
CCSVConvertOptions c_convert_options,
|
1167 |
+
MemoryPool memory_pool):
|
1168 |
+
cdef:
|
1169 |
+
shared_ptr[CSchema] c_schema
|
1170 |
+
CIOContext io_context
|
1171 |
+
|
1172 |
+
io_context = CIOContext(maybe_unbox_memory_pool(memory_pool))
|
1173 |
+
|
1174 |
+
with nogil:
|
1175 |
+
self.reader = <shared_ptr[CRecordBatchReader]> GetResultValue(
|
1176 |
+
CCSVStreamingReader.Make(
|
1177 |
+
io_context, stream,
|
1178 |
+
move(c_read_options), move(c_parse_options),
|
1179 |
+
move(c_convert_options)))
|
1180 |
+
c_schema = self.reader.get().schema()
|
1181 |
+
|
1182 |
+
self.schema = pyarrow_wrap_schema(c_schema)
|
1183 |
+
|
1184 |
+
|
1185 |
+
def read_csv(input_file, read_options=None, parse_options=None,
|
1186 |
+
convert_options=None, MemoryPool memory_pool=None):
|
1187 |
+
"""
|
1188 |
+
Read a Table from a stream of CSV data.
|
1189 |
+
|
1190 |
+
Parameters
|
1191 |
+
----------
|
1192 |
+
input_file : string, path or file-like object
|
1193 |
+
The location of CSV data. If a string or path, and if it ends
|
1194 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
1195 |
+
the data is automatically decompressed when reading.
|
1196 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
1197 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
1198 |
+
for defaults)
|
1199 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
1200 |
+
Options for the CSV parser
|
1201 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
1202 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
1203 |
+
Options for converting CSV data
|
1204 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
1205 |
+
memory_pool : MemoryPool, optional
|
1206 |
+
Pool to allocate Table memory from
|
1207 |
+
|
1208 |
+
Returns
|
1209 |
+
-------
|
1210 |
+
:class:`pyarrow.Table`
|
1211 |
+
Contents of the CSV file as a in-memory table.
|
1212 |
+
|
1213 |
+
Examples
|
1214 |
+
--------
|
1215 |
+
|
1216 |
+
Defining an example file from bytes object:
|
1217 |
+
|
1218 |
+
>>> import io
|
1219 |
+
>>> s = (
|
1220 |
+
... "animals,n_legs,entry\\n"
|
1221 |
+
... "Flamingo,2,2022-03-01\\n"
|
1222 |
+
... "Horse,4,2022-03-02\\n"
|
1223 |
+
... "Brittle stars,5,2022-03-03\\n"
|
1224 |
+
... "Centipede,100,2022-03-04"
|
1225 |
+
... )
|
1226 |
+
>>> print(s)
|
1227 |
+
animals,n_legs,entry
|
1228 |
+
Flamingo,2,2022-03-01
|
1229 |
+
Horse,4,2022-03-02
|
1230 |
+
Brittle stars,5,2022-03-03
|
1231 |
+
Centipede,100,2022-03-04
|
1232 |
+
>>> source = io.BytesIO(s.encode())
|
1233 |
+
|
1234 |
+
Reading from the file
|
1235 |
+
|
1236 |
+
>>> from pyarrow import csv
|
1237 |
+
>>> csv.read_csv(source)
|
1238 |
+
pyarrow.Table
|
1239 |
+
animals: string
|
1240 |
+
n_legs: int64
|
1241 |
+
entry: date32[day]
|
1242 |
+
----
|
1243 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
1244 |
+
n_legs: [[2,4,5,100]]
|
1245 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
1246 |
+
"""
|
1247 |
+
cdef:
|
1248 |
+
shared_ptr[CInputStream] stream
|
1249 |
+
CCSVReadOptions c_read_options
|
1250 |
+
CCSVParseOptions c_parse_options
|
1251 |
+
CCSVConvertOptions c_convert_options
|
1252 |
+
CIOContext io_context
|
1253 |
+
SharedPtrNoGIL[CCSVReader] reader
|
1254 |
+
shared_ptr[CTable] table
|
1255 |
+
|
1256 |
+
_get_reader(input_file, read_options, &stream)
|
1257 |
+
_get_read_options(read_options, &c_read_options)
|
1258 |
+
_get_parse_options(parse_options, &c_parse_options)
|
1259 |
+
_get_convert_options(convert_options, &c_convert_options)
|
1260 |
+
|
1261 |
+
with SignalStopHandler() as stop_handler:
|
1262 |
+
io_context = CIOContext(
|
1263 |
+
maybe_unbox_memory_pool(memory_pool),
|
1264 |
+
(<StopToken> stop_handler.stop_token).stop_token)
|
1265 |
+
reader = GetResultValue(CCSVReader.Make(
|
1266 |
+
io_context, stream,
|
1267 |
+
c_read_options, c_parse_options, c_convert_options))
|
1268 |
+
|
1269 |
+
with nogil:
|
1270 |
+
table = GetResultValue(reader.get().Read())
|
1271 |
+
|
1272 |
+
return pyarrow_wrap_table(table)
|
1273 |
+
|
1274 |
+
|
1275 |
+
def open_csv(input_file, read_options=None, parse_options=None,
|
1276 |
+
convert_options=None, MemoryPool memory_pool=None):
|
1277 |
+
"""
|
1278 |
+
Open a streaming reader of CSV data.
|
1279 |
+
|
1280 |
+
Reading using this function is always single-threaded.
|
1281 |
+
|
1282 |
+
Parameters
|
1283 |
+
----------
|
1284 |
+
input_file : string, path or file-like object
|
1285 |
+
The location of CSV data. If a string or path, and if it ends
|
1286 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
1287 |
+
the data is automatically decompressed when reading.
|
1288 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
1289 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
1290 |
+
for defaults)
|
1291 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
1292 |
+
Options for the CSV parser
|
1293 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
1294 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
1295 |
+
Options for converting CSV data
|
1296 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
1297 |
+
memory_pool : MemoryPool, optional
|
1298 |
+
Pool to allocate Table memory from
|
1299 |
+
|
1300 |
+
Returns
|
1301 |
+
-------
|
1302 |
+
:class:`pyarrow.csv.CSVStreamingReader`
|
1303 |
+
"""
|
1304 |
+
cdef:
|
1305 |
+
shared_ptr[CInputStream] stream
|
1306 |
+
CCSVReadOptions c_read_options
|
1307 |
+
CCSVParseOptions c_parse_options
|
1308 |
+
CCSVConvertOptions c_convert_options
|
1309 |
+
CSVStreamingReader reader
|
1310 |
+
|
1311 |
+
_get_reader(input_file, read_options, &stream)
|
1312 |
+
_get_read_options(read_options, &c_read_options)
|
1313 |
+
_get_parse_options(parse_options, &c_parse_options)
|
1314 |
+
_get_convert_options(convert_options, &c_convert_options)
|
1315 |
+
|
1316 |
+
reader = CSVStreamingReader.__new__(CSVStreamingReader)
|
1317 |
+
reader._open(stream, move(c_read_options), move(c_parse_options),
|
1318 |
+
move(c_convert_options), memory_pool)
|
1319 |
+
return reader
|
1320 |
+
|
1321 |
+
|
1322 |
+
def _raise_invalid_function_option(value, description, *,
|
1323 |
+
exception_class=ValueError):
|
1324 |
+
raise exception_class(f"\"{value}\" is not a valid {description}")
|
1325 |
+
|
1326 |
+
|
1327 |
+
cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *:
|
1328 |
+
if quoting_style == "needed":
|
1329 |
+
return CQuotingStyle_Needed
|
1330 |
+
elif quoting_style == "all_valid":
|
1331 |
+
return CQuotingStyle_AllValid
|
1332 |
+
elif quoting_style == "none":
|
1333 |
+
return CQuotingStyle_None
|
1334 |
+
_raise_invalid_function_option(quoting_style, "quoting style")
|
1335 |
+
|
1336 |
+
|
1337 |
+
cdef wrap_quoting_style(quoting_style):
|
1338 |
+
if quoting_style == CQuotingStyle_Needed:
|
1339 |
+
return 'needed'
|
1340 |
+
elif quoting_style == CQuotingStyle_AllValid:
|
1341 |
+
return 'all_valid'
|
1342 |
+
elif quoting_style == CQuotingStyle_None:
|
1343 |
+
return 'none'
|
1344 |
+
|
1345 |
+
|
1346 |
+
cdef class WriteOptions(_Weakrefable):
|
1347 |
+
"""
|
1348 |
+
Options for writing CSV files.
|
1349 |
+
|
1350 |
+
Parameters
|
1351 |
+
----------
|
1352 |
+
include_header : bool, optional (default True)
|
1353 |
+
Whether to write an initial header line with column names
|
1354 |
+
batch_size : int, optional (default 1024)
|
1355 |
+
How many rows to process together when converting and writing
|
1356 |
+
CSV data
|
1357 |
+
delimiter : 1-character string, optional (default ",")
|
1358 |
+
The character delimiting individual cells in the CSV data.
|
1359 |
+
quoting_style : str, optional (default "needed")
|
1360 |
+
Whether to quote values, and if so, which quoting style to use.
|
1361 |
+
The following values are accepted:
|
1362 |
+
|
1363 |
+
- "needed" (default): only enclose values in quotes when needed.
|
1364 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
1365 |
+
- "none": do not enclose any values in quotes; values containing
|
1366 |
+
special characters (such as quotes, cell delimiters or line endings)
|
1367 |
+
will raise an error.
|
1368 |
+
"""
|
1369 |
+
|
1370 |
+
# Avoid mistakingly creating attributes
|
1371 |
+
__slots__ = ()
|
1372 |
+
|
1373 |
+
def __init__(self, *, include_header=None, batch_size=None,
|
1374 |
+
delimiter=None, quoting_style=None):
|
1375 |
+
self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults()))
|
1376 |
+
if include_header is not None:
|
1377 |
+
self.include_header = include_header
|
1378 |
+
if batch_size is not None:
|
1379 |
+
self.batch_size = batch_size
|
1380 |
+
if delimiter is not None:
|
1381 |
+
self.delimiter = delimiter
|
1382 |
+
if quoting_style is not None:
|
1383 |
+
self.quoting_style = quoting_style
|
1384 |
+
|
1385 |
+
@property
|
1386 |
+
def include_header(self):
|
1387 |
+
"""
|
1388 |
+
Whether to write an initial header line with column names.
|
1389 |
+
"""
|
1390 |
+
return deref(self.options).include_header
|
1391 |
+
|
1392 |
+
@include_header.setter
|
1393 |
+
def include_header(self, value):
|
1394 |
+
deref(self.options).include_header = value
|
1395 |
+
|
1396 |
+
@property
|
1397 |
+
def batch_size(self):
|
1398 |
+
"""
|
1399 |
+
How many rows to process together when converting and writing
|
1400 |
+
CSV data.
|
1401 |
+
"""
|
1402 |
+
return deref(self.options).batch_size
|
1403 |
+
|
1404 |
+
@batch_size.setter
|
1405 |
+
def batch_size(self, value):
|
1406 |
+
deref(self.options).batch_size = value
|
1407 |
+
|
1408 |
+
@property
|
1409 |
+
def delimiter(self):
|
1410 |
+
"""
|
1411 |
+
The character delimiting individual cells in the CSV data.
|
1412 |
+
"""
|
1413 |
+
return chr(deref(self.options).delimiter)
|
1414 |
+
|
1415 |
+
@delimiter.setter
|
1416 |
+
def delimiter(self, value):
|
1417 |
+
deref(self.options).delimiter = _single_char(value)
|
1418 |
+
|
1419 |
+
@property
|
1420 |
+
def quoting_style(self):
|
1421 |
+
"""
|
1422 |
+
Whether to quote values, and if so, which quoting style to use.
|
1423 |
+
The following values are accepted:
|
1424 |
+
|
1425 |
+
- "needed" (default): only enclose values in quotes when needed.
|
1426 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
1427 |
+
- "none": do not enclose any values in quotes; values containing
|
1428 |
+
special characters (such as quotes, cell delimiters or line endings)
|
1429 |
+
will raise an error.
|
1430 |
+
"""
|
1431 |
+
return wrap_quoting_style(deref(self.options).quoting_style)
|
1432 |
+
|
1433 |
+
@quoting_style.setter
|
1434 |
+
def quoting_style(self, value):
|
1435 |
+
deref(self.options).quoting_style = unwrap_quoting_style(value)
|
1436 |
+
|
1437 |
+
@staticmethod
|
1438 |
+
cdef WriteOptions wrap(CCSVWriteOptions options):
|
1439 |
+
out = WriteOptions()
|
1440 |
+
out.options.reset(new CCSVWriteOptions(move(options)))
|
1441 |
+
return out
|
1442 |
+
|
1443 |
+
def validate(self):
|
1444 |
+
check_status(self.options.get().Validate())
|
1445 |
+
|
1446 |
+
|
1447 |
+
cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out):
|
1448 |
+
if write_options is None:
|
1449 |
+
out[0] = CCSVWriteOptions.Defaults()
|
1450 |
+
else:
|
1451 |
+
out[0] = deref(write_options.options)
|
1452 |
+
|
1453 |
+
|
1454 |
+
def write_csv(data, output_file, write_options=None,
|
1455 |
+
MemoryPool memory_pool=None):
|
1456 |
+
"""
|
1457 |
+
Write record batch or table to a CSV file.
|
1458 |
+
|
1459 |
+
Parameters
|
1460 |
+
----------
|
1461 |
+
data : pyarrow.RecordBatch or pyarrow.Table
|
1462 |
+
The data to write.
|
1463 |
+
output_file : string, path, pyarrow.NativeFile, or file-like object
|
1464 |
+
The location where to write the CSV data.
|
1465 |
+
write_options : pyarrow.csv.WriteOptions
|
1466 |
+
Options to configure writing the CSV data.
|
1467 |
+
memory_pool : MemoryPool, optional
|
1468 |
+
Pool for temporary allocations.
|
1469 |
+
|
1470 |
+
Examples
|
1471 |
+
--------
|
1472 |
+
|
1473 |
+
>>> import pyarrow as pa
|
1474 |
+
>>> from pyarrow import csv
|
1475 |
+
|
1476 |
+
>>> legs = pa.array([2, 4, 5, 100])
|
1477 |
+
>>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"])
|
1478 |
+
>>> entry_date = pa.array(["01/03/2022", "02/03/2022",
|
1479 |
+
... "03/03/2022", "04/03/2022"])
|
1480 |
+
>>> table = pa.table([animals, legs, entry_date],
|
1481 |
+
... names=["animals", "n_legs", "entry"])
|
1482 |
+
|
1483 |
+
>>> csv.write_csv(table, "animals.csv")
|
1484 |
+
|
1485 |
+
>>> write_options = csv.WriteOptions(include_header=False)
|
1486 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
1487 |
+
|
1488 |
+
>>> write_options = csv.WriteOptions(delimiter=";")
|
1489 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
1490 |
+
"""
|
1491 |
+
cdef:
|
1492 |
+
shared_ptr[COutputStream] stream
|
1493 |
+
CCSVWriteOptions c_write_options
|
1494 |
+
CMemoryPool* c_memory_pool
|
1495 |
+
CRecordBatch* batch
|
1496 |
+
CTable* table
|
1497 |
+
_get_write_options(write_options, &c_write_options)
|
1498 |
+
|
1499 |
+
get_writer(output_file, &stream)
|
1500 |
+
c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
1501 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
1502 |
+
if isinstance(data, RecordBatch):
|
1503 |
+
batch = pyarrow_unwrap_batch(data).get()
|
1504 |
+
with nogil:
|
1505 |
+
check_status(WriteCSV(deref(batch), c_write_options, stream.get()))
|
1506 |
+
elif isinstance(data, Table):
|
1507 |
+
table = pyarrow_unwrap_table(data).get()
|
1508 |
+
with nogil:
|
1509 |
+
check_status(WriteCSV(deref(table), c_write_options, stream.get()))
|
1510 |
+
else:
|
1511 |
+
raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'")
|
1512 |
+
|
1513 |
+
|
1514 |
+
cdef class CSVWriter(_CRecordBatchWriter):
|
1515 |
+
"""
|
1516 |
+
Writer to create a CSV file.
|
1517 |
+
|
1518 |
+
Parameters
|
1519 |
+
----------
|
1520 |
+
sink : str, path, pyarrow.OutputStream or file-like object
|
1521 |
+
The location where to write the CSV data.
|
1522 |
+
schema : pyarrow.Schema
|
1523 |
+
The schema of the data to be written.
|
1524 |
+
write_options : pyarrow.csv.WriteOptions
|
1525 |
+
Options to configure writing the CSV data.
|
1526 |
+
memory_pool : MemoryPool, optional
|
1527 |
+
Pool for temporary allocations.
|
1528 |
+
"""
|
1529 |
+
|
1530 |
+
def __init__(self, sink, Schema schema, *,
|
1531 |
+
WriteOptions write_options=None, MemoryPool memory_pool=None):
|
1532 |
+
cdef:
|
1533 |
+
shared_ptr[COutputStream] c_stream
|
1534 |
+
shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema)
|
1535 |
+
CCSVWriteOptions c_write_options
|
1536 |
+
CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
1537 |
+
_get_write_options(write_options, &c_write_options)
|
1538 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
1539 |
+
get_writer(sink, &c_stream)
|
1540 |
+
with nogil:
|
1541 |
+
self.writer = GetResultValue(MakeCSVWriter(
|
1542 |
+
c_stream, c_schema, c_write_options))
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pxd
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
"""Dataset is currently unstable. APIs subject to change without notice."""
|
21 |
+
|
22 |
+
from pyarrow.includes.common cimport *
|
23 |
+
from pyarrow.includes.libarrow_dataset cimport *
|
24 |
+
from pyarrow.lib cimport *
|
25 |
+
from pyarrow._fs cimport FileSystem, FileInfo
|
26 |
+
|
27 |
+
|
28 |
+
cdef CFileSource _make_file_source(object file, FileSystem filesystem=*, object file_size=*)
|
29 |
+
|
30 |
+
cdef class DatasetFactory(_Weakrefable):
|
31 |
+
|
32 |
+
cdef:
|
33 |
+
SharedPtrNoGIL[CDatasetFactory] wrapped
|
34 |
+
CDatasetFactory* factory
|
35 |
+
|
36 |
+
cdef init(self, const shared_ptr[CDatasetFactory]& sp)
|
37 |
+
|
38 |
+
@staticmethod
|
39 |
+
cdef wrap(const shared_ptr[CDatasetFactory]& sp)
|
40 |
+
|
41 |
+
cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil
|
42 |
+
|
43 |
+
|
44 |
+
cdef class Dataset(_Weakrefable):
|
45 |
+
|
46 |
+
cdef:
|
47 |
+
SharedPtrNoGIL[CDataset] wrapped
|
48 |
+
CDataset* dataset
|
49 |
+
public dict _scan_options
|
50 |
+
|
51 |
+
cdef void init(self, const shared_ptr[CDataset]& sp)
|
52 |
+
|
53 |
+
@staticmethod
|
54 |
+
cdef wrap(const shared_ptr[CDataset]& sp)
|
55 |
+
|
56 |
+
cdef shared_ptr[CDataset] unwrap(self) nogil
|
57 |
+
|
58 |
+
|
59 |
+
cdef class Scanner(_Weakrefable):
|
60 |
+
cdef:
|
61 |
+
SharedPtrNoGIL[CScanner] wrapped
|
62 |
+
CScanner* scanner
|
63 |
+
|
64 |
+
cdef void init(self, const shared_ptr[CScanner]& sp)
|
65 |
+
|
66 |
+
@staticmethod
|
67 |
+
cdef wrap(const shared_ptr[CScanner]& sp)
|
68 |
+
|
69 |
+
cdef shared_ptr[CScanner] unwrap(self)
|
70 |
+
|
71 |
+
@staticmethod
|
72 |
+
cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except *
|
73 |
+
|
74 |
+
|
75 |
+
cdef class FragmentScanOptions(_Weakrefable):
|
76 |
+
|
77 |
+
cdef:
|
78 |
+
shared_ptr[CFragmentScanOptions] wrapped
|
79 |
+
|
80 |
+
cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
|
81 |
+
|
82 |
+
@staticmethod
|
83 |
+
cdef wrap(const shared_ptr[CFragmentScanOptions]& sp)
|
84 |
+
|
85 |
+
|
86 |
+
cdef class FileFormat(_Weakrefable):
|
87 |
+
|
88 |
+
cdef:
|
89 |
+
shared_ptr[CFileFormat] wrapped
|
90 |
+
CFileFormat* format
|
91 |
+
|
92 |
+
cdef void init(self, const shared_ptr[CFileFormat]& sp)
|
93 |
+
|
94 |
+
@staticmethod
|
95 |
+
cdef wrap(const shared_ptr[CFileFormat]& sp)
|
96 |
+
|
97 |
+
cdef inline shared_ptr[CFileFormat] unwrap(self)
|
98 |
+
|
99 |
+
cdef _set_default_fragment_scan_options(self, FragmentScanOptions options)
|
100 |
+
|
101 |
+
# Return a WrittenFile after a file was written.
|
102 |
+
# May be overridden by subclasses, e.g. to add metadata.
|
103 |
+
cdef WrittenFile _finish_write(self, path, base_dir,
|
104 |
+
CFileWriter* file_writer)
|
105 |
+
|
106 |
+
|
107 |
+
cdef class FileWriteOptions(_Weakrefable):
|
108 |
+
|
109 |
+
cdef:
|
110 |
+
shared_ptr[CFileWriteOptions] wrapped
|
111 |
+
CFileWriteOptions* c_options
|
112 |
+
|
113 |
+
cdef void init(self, const shared_ptr[CFileWriteOptions]& sp)
|
114 |
+
|
115 |
+
@staticmethod
|
116 |
+
cdef wrap(const shared_ptr[CFileWriteOptions]& sp)
|
117 |
+
|
118 |
+
cdef inline shared_ptr[CFileWriteOptions] unwrap(self)
|
119 |
+
|
120 |
+
|
121 |
+
cdef class Fragment(_Weakrefable):
|
122 |
+
|
123 |
+
cdef:
|
124 |
+
SharedPtrNoGIL[CFragment] wrapped
|
125 |
+
CFragment* fragment
|
126 |
+
|
127 |
+
cdef void init(self, const shared_ptr[CFragment]& sp)
|
128 |
+
|
129 |
+
@staticmethod
|
130 |
+
cdef wrap(const shared_ptr[CFragment]& sp)
|
131 |
+
|
132 |
+
cdef inline shared_ptr[CFragment] unwrap(self)
|
133 |
+
|
134 |
+
|
135 |
+
cdef class FileFragment(Fragment):
|
136 |
+
|
137 |
+
cdef:
|
138 |
+
CFileFragment* file_fragment
|
139 |
+
|
140 |
+
cdef void init(self, const shared_ptr[CFragment]& sp)
|
141 |
+
|
142 |
+
|
143 |
+
cdef class Partitioning(_Weakrefable):
|
144 |
+
|
145 |
+
cdef:
|
146 |
+
shared_ptr[CPartitioning] wrapped
|
147 |
+
CPartitioning* partitioning
|
148 |
+
|
149 |
+
cdef init(self, const shared_ptr[CPartitioning]& sp)
|
150 |
+
|
151 |
+
@staticmethod
|
152 |
+
cdef wrap(const shared_ptr[CPartitioning]& sp)
|
153 |
+
|
154 |
+
cdef inline shared_ptr[CPartitioning] unwrap(self)
|
155 |
+
|
156 |
+
|
157 |
+
cdef class PartitioningFactory(_Weakrefable):
|
158 |
+
|
159 |
+
cdef:
|
160 |
+
shared_ptr[CPartitioningFactory] wrapped
|
161 |
+
CPartitioningFactory* factory
|
162 |
+
object constructor
|
163 |
+
object options
|
164 |
+
|
165 |
+
cdef init(self, const shared_ptr[CPartitioningFactory]& sp)
|
166 |
+
|
167 |
+
@staticmethod
|
168 |
+
cdef wrap(const shared_ptr[CPartitioningFactory]& sp,
|
169 |
+
object constructor, object options)
|
170 |
+
|
171 |
+
cdef inline shared_ptr[CPartitioningFactory] unwrap(self)
|
172 |
+
|
173 |
+
|
174 |
+
cdef class WrittenFile(_Weakrefable):
|
175 |
+
|
176 |
+
# The full path to the created file
|
177 |
+
cdef public str path
|
178 |
+
# Optional Parquet metadata
|
179 |
+
# This metadata will have the file path attribute set to the path of
|
180 |
+
# the written file.
|
181 |
+
cdef public object metadata
|
182 |
+
# The size of the file in bytes
|
183 |
+
cdef public int64_t size
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pyx
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
"""Dataset support for Parquet file format."""
|
21 |
+
|
22 |
+
from pyarrow.includes.libarrow_dataset cimport *
|
23 |
+
from pyarrow.includes.libarrow_dataset_parquet cimport *
|
24 |
+
|
25 |
+
from pyarrow._dataset cimport FragmentScanOptions, FileWriteOptions
|
26 |
+
|
27 |
+
|
28 |
+
cdef class ParquetFragmentScanOptions(FragmentScanOptions):
|
29 |
+
cdef:
|
30 |
+
CParquetFragmentScanOptions* parquet_options
|
31 |
+
object _parquet_decryption_config
|
32 |
+
|
33 |
+
cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp)
|
34 |
+
cdef CReaderProperties* reader_properties(self)
|
35 |
+
cdef ArrowReaderProperties* arrow_reader_properties(self)
|
36 |
+
|
37 |
+
|
38 |
+
cdef class ParquetFileWriteOptions(FileWriteOptions):
|
39 |
+
|
40 |
+
cdef:
|
41 |
+
CParquetFileWriteOptions* parquet_options
|
42 |
+
object _properties
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_feather.pyx
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# ---------------------------------------------------------------------
|
19 |
+
# Implement Feather file format
|
20 |
+
|
21 |
+
# cython: profile=False
|
22 |
+
# distutils: language = c++
|
23 |
+
# cython: language_level=3
|
24 |
+
|
25 |
+
from cython.operator cimport dereference as deref
|
26 |
+
from pyarrow.includes.common cimport *
|
27 |
+
from pyarrow.includes.libarrow cimport *
|
28 |
+
from pyarrow.includes.libarrow_feather cimport *
|
29 |
+
from pyarrow.lib cimport (check_status, Table, _Weakrefable,
|
30 |
+
get_writer, get_reader, pyarrow_wrap_table)
|
31 |
+
from pyarrow.lib import tobytes
|
32 |
+
|
33 |
+
|
34 |
+
class FeatherError(Exception):
|
35 |
+
pass
|
36 |
+
|
37 |
+
|
38 |
+
def write_feather(Table table, object dest, compression=None,
|
39 |
+
compression_level=None, chunksize=None, version=2):
|
40 |
+
cdef shared_ptr[COutputStream] sink
|
41 |
+
get_writer(dest, &sink)
|
42 |
+
|
43 |
+
cdef CFeatherProperties properties
|
44 |
+
if version == 2:
|
45 |
+
properties.version = kFeatherV2Version
|
46 |
+
else:
|
47 |
+
properties.version = kFeatherV1Version
|
48 |
+
|
49 |
+
if compression == 'zstd':
|
50 |
+
properties.compression = CCompressionType_ZSTD
|
51 |
+
elif compression == 'lz4':
|
52 |
+
properties.compression = CCompressionType_LZ4_FRAME
|
53 |
+
else:
|
54 |
+
properties.compression = CCompressionType_UNCOMPRESSED
|
55 |
+
|
56 |
+
if chunksize is not None:
|
57 |
+
properties.chunksize = chunksize
|
58 |
+
|
59 |
+
if compression_level is not None:
|
60 |
+
properties.compression_level = compression_level
|
61 |
+
|
62 |
+
with nogil:
|
63 |
+
check_status(WriteFeather(deref(table.table), sink.get(),
|
64 |
+
properties))
|
65 |
+
|
66 |
+
|
67 |
+
cdef class FeatherReader(_Weakrefable):
|
68 |
+
cdef:
|
69 |
+
shared_ptr[CFeatherReader] reader
|
70 |
+
|
71 |
+
def __cinit__(self, source, c_bool use_memory_map, c_bool use_threads):
|
72 |
+
cdef:
|
73 |
+
shared_ptr[CRandomAccessFile] reader
|
74 |
+
CIpcReadOptions options = CIpcReadOptions.Defaults()
|
75 |
+
options.use_threads = use_threads
|
76 |
+
|
77 |
+
get_reader(source, use_memory_map, &reader)
|
78 |
+
with nogil:
|
79 |
+
self.reader = GetResultValue(CFeatherReader.Open(reader, options))
|
80 |
+
|
81 |
+
@property
|
82 |
+
def version(self):
|
83 |
+
return self.reader.get().version()
|
84 |
+
|
85 |
+
def read(self):
|
86 |
+
cdef shared_ptr[CTable] sp_table
|
87 |
+
with nogil:
|
88 |
+
check_status(self.reader.get()
|
89 |
+
.Read(&sp_table))
|
90 |
+
|
91 |
+
return pyarrow_wrap_table(sp_table)
|
92 |
+
|
93 |
+
def read_indices(self, indices):
|
94 |
+
cdef:
|
95 |
+
shared_ptr[CTable] sp_table
|
96 |
+
vector[int] c_indices
|
97 |
+
|
98 |
+
for index in indices:
|
99 |
+
c_indices.push_back(index)
|
100 |
+
with nogil:
|
101 |
+
check_status(self.reader.get()
|
102 |
+
.Read(c_indices, &sp_table))
|
103 |
+
|
104 |
+
return pyarrow_wrap_table(sp_table)
|
105 |
+
|
106 |
+
def read_names(self, names):
|
107 |
+
cdef:
|
108 |
+
shared_ptr[CTable] sp_table
|
109 |
+
vector[c_string] c_names
|
110 |
+
|
111 |
+
for name in names:
|
112 |
+
c_names.push_back(tobytes(name))
|
113 |
+
with nogil:
|
114 |
+
check_status(self.reader.get()
|
115 |
+
.Read(c_names, &sp_table))
|
116 |
+
|
117 |
+
return pyarrow_wrap_table(sp_table)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.pxd
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from pyarrow.includes.common cimport *
|
21 |
+
from pyarrow.includes.libarrow_fs cimport *
|
22 |
+
from pyarrow.lib import _detect_compression, frombytes, tobytes
|
23 |
+
from pyarrow.lib cimport *
|
24 |
+
|
25 |
+
|
26 |
+
cpdef enum FileType:
|
27 |
+
NotFound = <int8_t> CFileType_NotFound
|
28 |
+
Unknown = <int8_t> CFileType_Unknown
|
29 |
+
File = <int8_t> CFileType_File
|
30 |
+
Directory = <int8_t> CFileType_Directory
|
31 |
+
|
32 |
+
|
33 |
+
cdef class FileInfo(_Weakrefable):
|
34 |
+
cdef:
|
35 |
+
CFileInfo info
|
36 |
+
|
37 |
+
@staticmethod
|
38 |
+
cdef wrap(CFileInfo info)
|
39 |
+
|
40 |
+
cdef inline CFileInfo unwrap(self) nogil
|
41 |
+
|
42 |
+
@staticmethod
|
43 |
+
cdef CFileInfo unwrap_safe(obj)
|
44 |
+
|
45 |
+
|
46 |
+
cdef class FileSelector(_Weakrefable):
|
47 |
+
cdef:
|
48 |
+
CFileSelector selector
|
49 |
+
|
50 |
+
@staticmethod
|
51 |
+
cdef FileSelector wrap(CFileSelector selector)
|
52 |
+
|
53 |
+
cdef inline CFileSelector unwrap(self) nogil
|
54 |
+
|
55 |
+
|
56 |
+
cdef class FileSystem(_Weakrefable):
|
57 |
+
cdef:
|
58 |
+
shared_ptr[CFileSystem] wrapped
|
59 |
+
CFileSystem* fs
|
60 |
+
|
61 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped)
|
62 |
+
|
63 |
+
@staticmethod
|
64 |
+
cdef wrap(const shared_ptr[CFileSystem]& sp)
|
65 |
+
|
66 |
+
cdef inline shared_ptr[CFileSystem] unwrap(self) nogil
|
67 |
+
|
68 |
+
|
69 |
+
cdef class LocalFileSystem(FileSystem):
|
70 |
+
cdef:
|
71 |
+
CLocalFileSystem* localfs
|
72 |
+
|
73 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped)
|
74 |
+
|
75 |
+
|
76 |
+
cdef class SubTreeFileSystem(FileSystem):
|
77 |
+
cdef:
|
78 |
+
CSubTreeFileSystem* subtreefs
|
79 |
+
|
80 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped)
|
81 |
+
|
82 |
+
|
83 |
+
cdef class _MockFileSystem(FileSystem):
|
84 |
+
cdef:
|
85 |
+
CMockFileSystem* mockfs
|
86 |
+
|
87 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped)
|
88 |
+
|
89 |
+
|
90 |
+
cdef class PyFileSystem(FileSystem):
|
91 |
+
cdef:
|
92 |
+
CPyFileSystem* pyfs
|
93 |
+
|
94 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_generated_version.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# file generated by setuptools_scm
|
2 |
+
# don't change, don't track in version control
|
3 |
+
TYPE_CHECKING = False
|
4 |
+
if TYPE_CHECKING:
|
5 |
+
from typing import Tuple, Union
|
6 |
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
7 |
+
else:
|
8 |
+
VERSION_TUPLE = object
|
9 |
+
|
10 |
+
version: str
|
11 |
+
__version__: str
|
12 |
+
__version_tuple__: VERSION_TUPLE
|
13 |
+
version_tuple: VERSION_TUPLE
|
14 |
+
|
15 |
+
__version__ = version = '16.0.0'
|
16 |
+
__version_tuple__ = version_tuple = (16, 0, 0)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_json.pyx
ADDED
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
# cython: language_level = 3
|
21 |
+
|
22 |
+
from pyarrow.includes.common cimport *
|
23 |
+
from pyarrow.includes.libarrow cimport *
|
24 |
+
from pyarrow.lib cimport (_Weakrefable, MemoryPool,
|
25 |
+
maybe_unbox_memory_pool,
|
26 |
+
get_input_stream, pyarrow_wrap_table,
|
27 |
+
pyarrow_wrap_schema, pyarrow_unwrap_schema)
|
28 |
+
|
29 |
+
|
30 |
+
cdef class ReadOptions(_Weakrefable):
|
31 |
+
"""
|
32 |
+
Options for reading JSON files.
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
use_threads : bool, optional (default True)
|
37 |
+
Whether to use multiple threads to accelerate reading
|
38 |
+
block_size : int, optional
|
39 |
+
How much bytes to process at a time from the input stream.
|
40 |
+
This will determine multi-threading granularity as well as
|
41 |
+
the size of individual chunks in the Table.
|
42 |
+
"""
|
43 |
+
|
44 |
+
# Avoid mistakingly creating attributes
|
45 |
+
__slots__ = ()
|
46 |
+
|
47 |
+
def __init__(self, use_threads=None, block_size=None):
|
48 |
+
self.options = CJSONReadOptions.Defaults()
|
49 |
+
if use_threads is not None:
|
50 |
+
self.use_threads = use_threads
|
51 |
+
if block_size is not None:
|
52 |
+
self.block_size = block_size
|
53 |
+
|
54 |
+
@property
|
55 |
+
def use_threads(self):
|
56 |
+
"""
|
57 |
+
Whether to use multiple threads to accelerate reading.
|
58 |
+
"""
|
59 |
+
return self.options.use_threads
|
60 |
+
|
61 |
+
@use_threads.setter
|
62 |
+
def use_threads(self, value):
|
63 |
+
self.options.use_threads = value
|
64 |
+
|
65 |
+
@property
|
66 |
+
def block_size(self):
|
67 |
+
"""
|
68 |
+
How much bytes to process at a time from the input stream.
|
69 |
+
|
70 |
+
This will determine multi-threading granularity as well as the size of
|
71 |
+
individual chunks in the Table.
|
72 |
+
"""
|
73 |
+
return self.options.block_size
|
74 |
+
|
75 |
+
@block_size.setter
|
76 |
+
def block_size(self, value):
|
77 |
+
self.options.block_size = value
|
78 |
+
|
79 |
+
def __reduce__(self):
|
80 |
+
return ReadOptions, (
|
81 |
+
self.use_threads,
|
82 |
+
self.block_size
|
83 |
+
)
|
84 |
+
|
85 |
+
def equals(self, ReadOptions other):
|
86 |
+
"""
|
87 |
+
Parameters
|
88 |
+
----------
|
89 |
+
other : pyarrow.json.ReadOptions
|
90 |
+
|
91 |
+
Returns
|
92 |
+
-------
|
93 |
+
bool
|
94 |
+
"""
|
95 |
+
return (
|
96 |
+
self.use_threads == other.use_threads and
|
97 |
+
self.block_size == other.block_size
|
98 |
+
)
|
99 |
+
|
100 |
+
def __eq__(self, other):
|
101 |
+
try:
|
102 |
+
return self.equals(other)
|
103 |
+
except TypeError:
|
104 |
+
return False
|
105 |
+
|
106 |
+
@staticmethod
|
107 |
+
cdef ReadOptions wrap(CJSONReadOptions options):
|
108 |
+
out = ReadOptions()
|
109 |
+
out.options = options # shallow copy
|
110 |
+
return out
|
111 |
+
|
112 |
+
|
113 |
+
cdef class ParseOptions(_Weakrefable):
|
114 |
+
"""
|
115 |
+
Options for parsing JSON files.
|
116 |
+
|
117 |
+
Parameters
|
118 |
+
----------
|
119 |
+
explicit_schema : Schema, optional (default None)
|
120 |
+
Optional explicit schema (no type inference, ignores other fields).
|
121 |
+
newlines_in_values : bool, optional (default False)
|
122 |
+
Whether objects may be printed across multiple lines (for example
|
123 |
+
pretty printed). If false, input must end with an empty line.
|
124 |
+
unexpected_field_behavior : str, default "infer"
|
125 |
+
How JSON fields outside of explicit_schema (if given) are treated.
|
126 |
+
|
127 |
+
Possible behaviors:
|
128 |
+
|
129 |
+
- "ignore": unexpected JSON fields are ignored
|
130 |
+
- "error": error out on unexpected JSON fields
|
131 |
+
- "infer": unexpected JSON fields are type-inferred and included in
|
132 |
+
the output
|
133 |
+
"""
|
134 |
+
|
135 |
+
__slots__ = ()
|
136 |
+
|
137 |
+
def __init__(self, explicit_schema=None, newlines_in_values=None,
|
138 |
+
unexpected_field_behavior=None):
|
139 |
+
self.options = CJSONParseOptions.Defaults()
|
140 |
+
if explicit_schema is not None:
|
141 |
+
self.explicit_schema = explicit_schema
|
142 |
+
if newlines_in_values is not None:
|
143 |
+
self.newlines_in_values = newlines_in_values
|
144 |
+
if unexpected_field_behavior is not None:
|
145 |
+
self.unexpected_field_behavior = unexpected_field_behavior
|
146 |
+
|
147 |
+
def __reduce__(self):
|
148 |
+
return ParseOptions, (
|
149 |
+
self.explicit_schema,
|
150 |
+
self.newlines_in_values,
|
151 |
+
self.unexpected_field_behavior
|
152 |
+
)
|
153 |
+
|
154 |
+
@property
|
155 |
+
def explicit_schema(self):
|
156 |
+
"""
|
157 |
+
Optional explicit schema (no type inference, ignores other fields)
|
158 |
+
"""
|
159 |
+
if self.options.explicit_schema.get() == NULL:
|
160 |
+
return None
|
161 |
+
else:
|
162 |
+
return pyarrow_wrap_schema(self.options.explicit_schema)
|
163 |
+
|
164 |
+
@explicit_schema.setter
|
165 |
+
def explicit_schema(self, value):
|
166 |
+
self.options.explicit_schema = pyarrow_unwrap_schema(value)
|
167 |
+
|
168 |
+
@property
|
169 |
+
def newlines_in_values(self):
|
170 |
+
"""
|
171 |
+
Whether newline characters are allowed in JSON values.
|
172 |
+
Setting this to True reduces the performance of multi-threaded
|
173 |
+
JSON reading.
|
174 |
+
"""
|
175 |
+
return self.options.newlines_in_values
|
176 |
+
|
177 |
+
@newlines_in_values.setter
|
178 |
+
def newlines_in_values(self, value):
|
179 |
+
self.options.newlines_in_values = value
|
180 |
+
|
181 |
+
@property
|
182 |
+
def unexpected_field_behavior(self):
|
183 |
+
"""
|
184 |
+
How JSON fields outside of explicit_schema (if given) are treated.
|
185 |
+
|
186 |
+
Possible behaviors:
|
187 |
+
|
188 |
+
- "ignore": unexpected JSON fields are ignored
|
189 |
+
- "error": error out on unexpected JSON fields
|
190 |
+
- "infer": unexpected JSON fields are type-inferred and included in
|
191 |
+
the output
|
192 |
+
|
193 |
+
Set to "infer" by default.
|
194 |
+
"""
|
195 |
+
v = self.options.unexpected_field_behavior
|
196 |
+
if v == CUnexpectedFieldBehavior_Ignore:
|
197 |
+
return "ignore"
|
198 |
+
elif v == CUnexpectedFieldBehavior_Error:
|
199 |
+
return "error"
|
200 |
+
elif v == CUnexpectedFieldBehavior_InferType:
|
201 |
+
return "infer"
|
202 |
+
else:
|
203 |
+
raise ValueError('Unexpected value for unexpected_field_behavior')
|
204 |
+
|
205 |
+
@unexpected_field_behavior.setter
|
206 |
+
def unexpected_field_behavior(self, value):
|
207 |
+
cdef CUnexpectedFieldBehavior v
|
208 |
+
|
209 |
+
if value == "ignore":
|
210 |
+
v = CUnexpectedFieldBehavior_Ignore
|
211 |
+
elif value == "error":
|
212 |
+
v = CUnexpectedFieldBehavior_Error
|
213 |
+
elif value == "infer":
|
214 |
+
v = CUnexpectedFieldBehavior_InferType
|
215 |
+
else:
|
216 |
+
raise ValueError(
|
217 |
+
"Unexpected value `{}` for `unexpected_field_behavior`, pass "
|
218 |
+
"either `ignore`, `error` or `infer`.".format(value)
|
219 |
+
)
|
220 |
+
|
221 |
+
self.options.unexpected_field_behavior = v
|
222 |
+
|
223 |
+
def equals(self, ParseOptions other):
|
224 |
+
"""
|
225 |
+
Parameters
|
226 |
+
----------
|
227 |
+
other : pyarrow.json.ParseOptions
|
228 |
+
|
229 |
+
Returns
|
230 |
+
-------
|
231 |
+
bool
|
232 |
+
"""
|
233 |
+
return (
|
234 |
+
self.explicit_schema == other.explicit_schema and
|
235 |
+
self.newlines_in_values == other.newlines_in_values and
|
236 |
+
self.unexpected_field_behavior == other.unexpected_field_behavior
|
237 |
+
)
|
238 |
+
|
239 |
+
def __eq__(self, other):
|
240 |
+
try:
|
241 |
+
return self.equals(other)
|
242 |
+
except TypeError:
|
243 |
+
return False
|
244 |
+
|
245 |
+
@staticmethod
|
246 |
+
cdef ParseOptions wrap(CJSONParseOptions options):
|
247 |
+
out = ParseOptions()
|
248 |
+
out.options = options # shallow copy
|
249 |
+
return out
|
250 |
+
|
251 |
+
|
252 |
+
cdef _get_reader(input_file, shared_ptr[CInputStream]* out):
|
253 |
+
use_memory_map = False
|
254 |
+
get_input_stream(input_file, use_memory_map, out)
|
255 |
+
|
256 |
+
cdef _get_read_options(ReadOptions read_options, CJSONReadOptions* out):
|
257 |
+
if read_options is None:
|
258 |
+
out[0] = CJSONReadOptions.Defaults()
|
259 |
+
else:
|
260 |
+
out[0] = read_options.options
|
261 |
+
|
262 |
+
cdef _get_parse_options(ParseOptions parse_options, CJSONParseOptions* out):
|
263 |
+
if parse_options is None:
|
264 |
+
out[0] = CJSONParseOptions.Defaults()
|
265 |
+
else:
|
266 |
+
out[0] = parse_options.options
|
267 |
+
|
268 |
+
|
269 |
+
def read_json(input_file, read_options=None, parse_options=None,
|
270 |
+
MemoryPool memory_pool=None):
|
271 |
+
"""
|
272 |
+
Read a Table from a stream of JSON data.
|
273 |
+
|
274 |
+
Parameters
|
275 |
+
----------
|
276 |
+
input_file : str, path or file-like object
|
277 |
+
The location of JSON data. Currently only the line-delimited JSON
|
278 |
+
format is supported.
|
279 |
+
read_options : pyarrow.json.ReadOptions, optional
|
280 |
+
Options for the JSON reader (see ReadOptions constructor for defaults).
|
281 |
+
parse_options : pyarrow.json.ParseOptions, optional
|
282 |
+
Options for the JSON parser
|
283 |
+
(see ParseOptions constructor for defaults).
|
284 |
+
memory_pool : MemoryPool, optional
|
285 |
+
Pool to allocate Table memory from.
|
286 |
+
|
287 |
+
Returns
|
288 |
+
-------
|
289 |
+
:class:`pyarrow.Table`
|
290 |
+
Contents of the JSON file as a in-memory table.
|
291 |
+
"""
|
292 |
+
cdef:
|
293 |
+
shared_ptr[CInputStream] stream
|
294 |
+
CJSONReadOptions c_read_options
|
295 |
+
CJSONParseOptions c_parse_options
|
296 |
+
shared_ptr[CJSONReader] reader
|
297 |
+
shared_ptr[CTable] table
|
298 |
+
|
299 |
+
_get_reader(input_file, &stream)
|
300 |
+
_get_read_options(read_options, &c_read_options)
|
301 |
+
_get_parse_options(parse_options, &c_parse_options)
|
302 |
+
|
303 |
+
reader = GetResultValue(
|
304 |
+
CJSONReader.Make(maybe_unbox_memory_pool(memory_pool),
|
305 |
+
stream, c_read_options, c_parse_options))
|
306 |
+
|
307 |
+
with nogil:
|
308 |
+
table = GetResultValue(reader.get().Read())
|
309 |
+
|
310 |
+
return pyarrow_wrap_table(table)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.pxd
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from libcpp cimport bool as c_bool
|
22 |
+
from libc.string cimport const_char
|
23 |
+
from libcpp.vector cimport vector as std_vector
|
24 |
+
from pyarrow.includes.common cimport *
|
25 |
+
from pyarrow.includes.libarrow cimport (CArray, CSchema, CStatus,
|
26 |
+
CResult, CTable, CMemoryPool,
|
27 |
+
CKeyValueMetadata,
|
28 |
+
CRecordBatch,
|
29 |
+
CTable, CCompressionType,
|
30 |
+
CRandomAccessFile, COutputStream,
|
31 |
+
TimeUnit)
|
32 |
+
|
33 |
+
cdef extern from "arrow/adapters/orc/options.h" \
|
34 |
+
namespace "arrow::adapters::orc" nogil:
|
35 |
+
cdef enum CompressionStrategy \
|
36 |
+
" arrow::adapters::orc::CompressionStrategy":
|
37 |
+
_CompressionStrategy_SPEED \
|
38 |
+
" arrow::adapters::orc::CompressionStrategy::kSpeed"
|
39 |
+
_CompressionStrategy_COMPRESSION \
|
40 |
+
" arrow::adapters::orc::CompressionStrategy::kCompression"
|
41 |
+
|
42 |
+
cdef enum WriterId" arrow::adapters::orc::WriterId":
|
43 |
+
_WriterId_ORC_JAVA_WRITER" arrow::adapters::orc::WriterId::kOrcJava"
|
44 |
+
_WriterId_ORC_CPP_WRITER" arrow::adapters::orc::WriterId::kOrcCpp"
|
45 |
+
_WriterId_PRESTO_WRITER" arrow::adapters::orc::WriterId::kPresto"
|
46 |
+
_WriterId_SCRITCHLEY_GO \
|
47 |
+
" arrow::adapters::orc::WriterId::kScritchleyGo"
|
48 |
+
_WriterId_TRINO_WRITER" arrow::adapters::orc::WriterId::kTrino"
|
49 |
+
_WriterId_UNKNOWN_WRITER" arrow::adapters::orc::WriterId::kUnknown"
|
50 |
+
|
51 |
+
cdef enum WriterVersion" arrow::adapters::orc::WriterVersion":
|
52 |
+
_WriterVersion_ORIGINAL \
|
53 |
+
" arrow::adapters::orc::WriterVersion::kOriginal"
|
54 |
+
_WriterVersion_HIVE_8732 \
|
55 |
+
" arrow::adapters::orc::WriterVersion::kHive8732"
|
56 |
+
_WriterVersion_HIVE_4243 \
|
57 |
+
" arrow::adapters::orc::WriterVersion::kHive4243"
|
58 |
+
_WriterVersion_HIVE_12055 \
|
59 |
+
" arrow::adapters::orc::WriterVersion::kHive12055"
|
60 |
+
_WriterVersion_HIVE_13083 \
|
61 |
+
" arrow::adapters::orc::WriterVersion::kHive13083"
|
62 |
+
_WriterVersion_ORC_101" arrow::adapters::orc::WriterVersion::kOrc101"
|
63 |
+
_WriterVersion_ORC_135" arrow::adapters::orc::WriterVersion::kOrc135"
|
64 |
+
_WriterVersion_ORC_517" arrow::adapters::orc::WriterVersion::kOrc517"
|
65 |
+
_WriterVersion_ORC_203" arrow::adapters::orc::WriterVersion::kOrc203"
|
66 |
+
_WriterVersion_ORC_14" arrow::adapters::orc::WriterVersion::kOrc14"
|
67 |
+
_WriterVersion_MAX" arrow::adapters::orc::WriterVersion::kMax"
|
68 |
+
|
69 |
+
cdef cppclass FileVersion" arrow::adapters::orc::FileVersion":
|
70 |
+
FileVersion(uint32_t major_version, uint32_t minor_version)
|
71 |
+
uint32_t major_version()
|
72 |
+
uint32_t minor_version()
|
73 |
+
c_string ToString()
|
74 |
+
|
75 |
+
cdef struct WriteOptions" arrow::adapters::orc::WriteOptions":
|
76 |
+
int64_t batch_size
|
77 |
+
FileVersion file_version
|
78 |
+
int64_t stripe_size
|
79 |
+
CCompressionType compression
|
80 |
+
int64_t compression_block_size
|
81 |
+
CompressionStrategy compression_strategy
|
82 |
+
int64_t row_index_stride
|
83 |
+
double padding_tolerance
|
84 |
+
double dictionary_key_size_threshold
|
85 |
+
std_vector[int64_t] bloom_filter_columns
|
86 |
+
double bloom_filter_fpp
|
87 |
+
|
88 |
+
|
89 |
+
cdef extern from "arrow/adapters/orc/adapter.h" \
|
90 |
+
namespace "arrow::adapters::orc" nogil:
|
91 |
+
|
92 |
+
cdef cppclass ORCFileReader:
|
93 |
+
@staticmethod
|
94 |
+
CResult[unique_ptr[ORCFileReader]] Open(
|
95 |
+
const shared_ptr[CRandomAccessFile]& file,
|
96 |
+
CMemoryPool* pool)
|
97 |
+
|
98 |
+
CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata()
|
99 |
+
|
100 |
+
CResult[shared_ptr[CSchema]] ReadSchema()
|
101 |
+
|
102 |
+
CResult[shared_ptr[CRecordBatch]] ReadStripe(int64_t stripe)
|
103 |
+
CResult[shared_ptr[CRecordBatch]] ReadStripe(
|
104 |
+
int64_t stripe, std_vector[c_string])
|
105 |
+
|
106 |
+
CResult[shared_ptr[CTable]] Read()
|
107 |
+
CResult[shared_ptr[CTable]] Read(std_vector[c_string])
|
108 |
+
|
109 |
+
int64_t NumberOfStripes()
|
110 |
+
int64_t NumberOfRows()
|
111 |
+
FileVersion GetFileVersion()
|
112 |
+
c_string GetSoftwareVersion()
|
113 |
+
CResult[CCompressionType] GetCompression()
|
114 |
+
int64_t GetCompressionSize()
|
115 |
+
int64_t GetRowIndexStride()
|
116 |
+
WriterId GetWriterId()
|
117 |
+
int32_t GetWriterIdValue()
|
118 |
+
WriterVersion GetWriterVersion()
|
119 |
+
int64_t GetNumberOfStripeStatistics()
|
120 |
+
int64_t GetContentLength()
|
121 |
+
int64_t GetStripeStatisticsLength()
|
122 |
+
int64_t GetFileFooterLength()
|
123 |
+
int64_t GetFilePostscriptLength()
|
124 |
+
int64_t GetFileLength()
|
125 |
+
c_string GetSerializedFileTail()
|
126 |
+
|
127 |
+
cdef cppclass ORCFileWriter:
|
128 |
+
@staticmethod
|
129 |
+
CResult[unique_ptr[ORCFileWriter]] Open(
|
130 |
+
COutputStream* output_stream, const WriteOptions& writer_options)
|
131 |
+
|
132 |
+
CStatus Write(const CTable& table)
|
133 |
+
|
134 |
+
CStatus Close()
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.pyx
ADDED
@@ -0,0 +1,2205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
|
21 |
+
from collections.abc import Sequence
|
22 |
+
from textwrap import indent
|
23 |
+
import warnings
|
24 |
+
|
25 |
+
from cython.operator cimport dereference as deref
|
26 |
+
from pyarrow.includes.common cimport *
|
27 |
+
from pyarrow.includes.libarrow cimport *
|
28 |
+
from pyarrow.includes.libarrow_python cimport *
|
29 |
+
from pyarrow.lib cimport (_Weakrefable, Buffer, Schema,
|
30 |
+
check_status,
|
31 |
+
MemoryPool, maybe_unbox_memory_pool,
|
32 |
+
Table, NativeFile,
|
33 |
+
pyarrow_wrap_chunked_array,
|
34 |
+
pyarrow_wrap_schema,
|
35 |
+
pyarrow_unwrap_schema,
|
36 |
+
pyarrow_wrap_table,
|
37 |
+
pyarrow_wrap_batch,
|
38 |
+
pyarrow_wrap_scalar,
|
39 |
+
NativeFile, get_reader, get_writer,
|
40 |
+
string_to_timeunit)
|
41 |
+
|
42 |
+
from pyarrow.lib import (ArrowException, NativeFile, BufferOutputStream,
|
43 |
+
_stringify_path,
|
44 |
+
tobytes, frombytes)
|
45 |
+
|
46 |
+
cimport cpython as cp
|
47 |
+
|
48 |
+
_DEFAULT_ROW_GROUP_SIZE = 1024*1024
|
49 |
+
_MAX_ROW_GROUP_SIZE = 64*1024*1024
|
50 |
+
|
51 |
+
cdef class Statistics(_Weakrefable):
|
52 |
+
"""Statistics for a single column in a single row group."""
|
53 |
+
|
54 |
+
def __cinit__(self):
|
55 |
+
pass
|
56 |
+
|
57 |
+
def __repr__(self):
|
58 |
+
return """{}
|
59 |
+
has_min_max: {}
|
60 |
+
min: {}
|
61 |
+
max: {}
|
62 |
+
null_count: {}
|
63 |
+
distinct_count: {}
|
64 |
+
num_values: {}
|
65 |
+
physical_type: {}
|
66 |
+
logical_type: {}
|
67 |
+
converted_type (legacy): {}""".format(object.__repr__(self),
|
68 |
+
self.has_min_max,
|
69 |
+
self.min,
|
70 |
+
self.max,
|
71 |
+
self.null_count,
|
72 |
+
self.distinct_count,
|
73 |
+
self.num_values,
|
74 |
+
self.physical_type,
|
75 |
+
str(self.logical_type),
|
76 |
+
self.converted_type)
|
77 |
+
|
78 |
+
def to_dict(self):
|
79 |
+
"""
|
80 |
+
Get dictionary representation of statistics.
|
81 |
+
|
82 |
+
Returns
|
83 |
+
-------
|
84 |
+
dict
|
85 |
+
Dictionary with a key for each attribute of this class.
|
86 |
+
"""
|
87 |
+
d = dict(
|
88 |
+
has_min_max=self.has_min_max,
|
89 |
+
min=self.min,
|
90 |
+
max=self.max,
|
91 |
+
null_count=self.null_count,
|
92 |
+
distinct_count=self.distinct_count,
|
93 |
+
num_values=self.num_values,
|
94 |
+
physical_type=self.physical_type
|
95 |
+
)
|
96 |
+
return d
|
97 |
+
|
98 |
+
def __eq__(self, other):
|
99 |
+
try:
|
100 |
+
return self.equals(other)
|
101 |
+
except TypeError:
|
102 |
+
return NotImplemented
|
103 |
+
|
104 |
+
def equals(self, Statistics other):
|
105 |
+
"""
|
106 |
+
Return whether the two column statistics objects are equal.
|
107 |
+
|
108 |
+
Parameters
|
109 |
+
----------
|
110 |
+
other : Statistics
|
111 |
+
Statistics to compare against.
|
112 |
+
|
113 |
+
Returns
|
114 |
+
-------
|
115 |
+
are_equal : bool
|
116 |
+
"""
|
117 |
+
return self.statistics.get().Equals(deref(other.statistics.get()))
|
118 |
+
|
119 |
+
@property
|
120 |
+
def has_min_max(self):
|
121 |
+
"""Whether min and max are present (bool)."""
|
122 |
+
return self.statistics.get().HasMinMax()
|
123 |
+
|
124 |
+
@property
|
125 |
+
def has_null_count(self):
|
126 |
+
"""Whether null count is present (bool)."""
|
127 |
+
return self.statistics.get().HasNullCount()
|
128 |
+
|
129 |
+
@property
|
130 |
+
def has_distinct_count(self):
|
131 |
+
"""Whether distinct count is preset (bool)."""
|
132 |
+
return self.statistics.get().HasDistinctCount()
|
133 |
+
|
134 |
+
@property
|
135 |
+
def min_raw(self):
|
136 |
+
"""Min value as physical type (bool, int, float, or bytes)."""
|
137 |
+
if self.has_min_max:
|
138 |
+
return _cast_statistic_raw_min(self.statistics.get())
|
139 |
+
else:
|
140 |
+
return None
|
141 |
+
|
142 |
+
@property
|
143 |
+
def max_raw(self):
|
144 |
+
"""Max value as physical type (bool, int, float, or bytes)."""
|
145 |
+
if self.has_min_max:
|
146 |
+
return _cast_statistic_raw_max(self.statistics.get())
|
147 |
+
else:
|
148 |
+
return None
|
149 |
+
|
150 |
+
@property
|
151 |
+
def min(self):
|
152 |
+
"""
|
153 |
+
Min value as logical type.
|
154 |
+
|
155 |
+
Returned as the Python equivalent of logical type, such as datetime.date
|
156 |
+
for dates and decimal.Decimal for decimals.
|
157 |
+
"""
|
158 |
+
if self.has_min_max:
|
159 |
+
min_scalar, _ = _cast_statistics(self.statistics.get())
|
160 |
+
return min_scalar.as_py()
|
161 |
+
else:
|
162 |
+
return None
|
163 |
+
|
164 |
+
@property
|
165 |
+
def max(self):
|
166 |
+
"""
|
167 |
+
Max value as logical type.
|
168 |
+
|
169 |
+
Returned as the Python equivalent of logical type, such as datetime.date
|
170 |
+
for dates and decimal.Decimal for decimals.
|
171 |
+
"""
|
172 |
+
if self.has_min_max:
|
173 |
+
_, max_scalar = _cast_statistics(self.statistics.get())
|
174 |
+
return max_scalar.as_py()
|
175 |
+
else:
|
176 |
+
return None
|
177 |
+
|
178 |
+
@property
|
179 |
+
def null_count(self):
|
180 |
+
"""Number of null values in chunk (int)."""
|
181 |
+
if self.has_null_count:
|
182 |
+
return self.statistics.get().null_count()
|
183 |
+
else:
|
184 |
+
return None
|
185 |
+
|
186 |
+
@property
|
187 |
+
def distinct_count(self):
|
188 |
+
"""Distinct number of values in chunk (int)."""
|
189 |
+
if self.has_distinct_count:
|
190 |
+
return self.statistics.get().distinct_count()
|
191 |
+
else:
|
192 |
+
return None
|
193 |
+
|
194 |
+
@property
|
195 |
+
def num_values(self):
|
196 |
+
"""Number of non-null values (int)."""
|
197 |
+
return self.statistics.get().num_values()
|
198 |
+
|
199 |
+
@property
|
200 |
+
def physical_type(self):
|
201 |
+
"""Physical type of column (str)."""
|
202 |
+
raw_physical_type = self.statistics.get().physical_type()
|
203 |
+
return physical_type_name_from_enum(raw_physical_type)
|
204 |
+
|
205 |
+
@property
|
206 |
+
def logical_type(self):
|
207 |
+
"""Logical type of column (:class:`ParquetLogicalType`)."""
|
208 |
+
return wrap_logical_type(self.statistics.get().descr().logical_type())
|
209 |
+
|
210 |
+
@property
|
211 |
+
def converted_type(self):
|
212 |
+
"""Legacy converted type (str or None)."""
|
213 |
+
raw_converted_type = self.statistics.get().descr().converted_type()
|
214 |
+
return converted_type_name_from_enum(raw_converted_type)
|
215 |
+
|
216 |
+
|
217 |
+
cdef class ParquetLogicalType(_Weakrefable):
|
218 |
+
"""Logical type of parquet type."""
|
219 |
+
cdef:
|
220 |
+
shared_ptr[const CParquetLogicalType] type
|
221 |
+
|
222 |
+
def __cinit__(self):
|
223 |
+
pass
|
224 |
+
|
225 |
+
cdef init(self, const shared_ptr[const CParquetLogicalType]& type):
|
226 |
+
self.type = type
|
227 |
+
|
228 |
+
def __repr__(self):
|
229 |
+
return "{}\n {}".format(object.__repr__(self), str(self))
|
230 |
+
|
231 |
+
def __str__(self):
|
232 |
+
return frombytes(self.type.get().ToString(), safe=True)
|
233 |
+
|
234 |
+
def to_json(self):
|
235 |
+
"""
|
236 |
+
Get a JSON string containing type and type parameters.
|
237 |
+
|
238 |
+
Returns
|
239 |
+
-------
|
240 |
+
json : str
|
241 |
+
JSON representation of type, with at least a field called 'Type'
|
242 |
+
which contains the type name. If the type is parameterized, such
|
243 |
+
as a decimal with scale and precision, will contain those as fields
|
244 |
+
as well.
|
245 |
+
"""
|
246 |
+
return frombytes(self.type.get().ToJSON())
|
247 |
+
|
248 |
+
@property
|
249 |
+
def type(self):
|
250 |
+
"""Name of the logical type (str)."""
|
251 |
+
return logical_type_name_from_enum(self.type.get().type())
|
252 |
+
|
253 |
+
|
254 |
+
cdef wrap_logical_type(const shared_ptr[const CParquetLogicalType]& type):
|
255 |
+
cdef ParquetLogicalType out = ParquetLogicalType()
|
256 |
+
out.init(type)
|
257 |
+
return out
|
258 |
+
|
259 |
+
|
260 |
+
cdef _cast_statistic_raw_min(CStatistics* statistics):
|
261 |
+
cdef ParquetType physical_type = statistics.physical_type()
|
262 |
+
cdef uint32_t type_length = statistics.descr().type_length()
|
263 |
+
if physical_type == ParquetType_BOOLEAN:
|
264 |
+
return (<CBoolStatistics*> statistics).min()
|
265 |
+
elif physical_type == ParquetType_INT32:
|
266 |
+
return (<CInt32Statistics*> statistics).min()
|
267 |
+
elif physical_type == ParquetType_INT64:
|
268 |
+
return (<CInt64Statistics*> statistics).min()
|
269 |
+
elif physical_type == ParquetType_FLOAT:
|
270 |
+
return (<CFloatStatistics*> statistics).min()
|
271 |
+
elif physical_type == ParquetType_DOUBLE:
|
272 |
+
return (<CDoubleStatistics*> statistics).min()
|
273 |
+
elif physical_type == ParquetType_BYTE_ARRAY:
|
274 |
+
return _box_byte_array((<CByteArrayStatistics*> statistics).min())
|
275 |
+
elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
|
276 |
+
return _box_flba((<CFLBAStatistics*> statistics).min(), type_length)
|
277 |
+
|
278 |
+
|
279 |
+
cdef _cast_statistic_raw_max(CStatistics* statistics):
|
280 |
+
cdef ParquetType physical_type = statistics.physical_type()
|
281 |
+
cdef uint32_t type_length = statistics.descr().type_length()
|
282 |
+
if physical_type == ParquetType_BOOLEAN:
|
283 |
+
return (<CBoolStatistics*> statistics).max()
|
284 |
+
elif physical_type == ParquetType_INT32:
|
285 |
+
return (<CInt32Statistics*> statistics).max()
|
286 |
+
elif physical_type == ParquetType_INT64:
|
287 |
+
return (<CInt64Statistics*> statistics).max()
|
288 |
+
elif physical_type == ParquetType_FLOAT:
|
289 |
+
return (<CFloatStatistics*> statistics).max()
|
290 |
+
elif physical_type == ParquetType_DOUBLE:
|
291 |
+
return (<CDoubleStatistics*> statistics).max()
|
292 |
+
elif physical_type == ParquetType_BYTE_ARRAY:
|
293 |
+
return _box_byte_array((<CByteArrayStatistics*> statistics).max())
|
294 |
+
elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY:
|
295 |
+
return _box_flba((<CFLBAStatistics*> statistics).max(), type_length)
|
296 |
+
|
297 |
+
|
298 |
+
cdef _cast_statistics(CStatistics* statistics):
|
299 |
+
cdef:
|
300 |
+
shared_ptr[CScalar] c_min
|
301 |
+
shared_ptr[CScalar] c_max
|
302 |
+
check_status(StatisticsAsScalars(statistics[0], &c_min, &c_max))
|
303 |
+
return (pyarrow_wrap_scalar(c_min), pyarrow_wrap_scalar(c_max))
|
304 |
+
|
305 |
+
|
306 |
+
cdef _box_byte_array(ParquetByteArray val):
|
307 |
+
return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> val.len)
|
308 |
+
|
309 |
+
|
310 |
+
cdef _box_flba(ParquetFLBA val, uint32_t len):
|
311 |
+
return cp.PyBytes_FromStringAndSize(<char*> val.ptr, <Py_ssize_t> len)
|
312 |
+
|
313 |
+
|
314 |
+
cdef class ColumnChunkMetaData(_Weakrefable):
|
315 |
+
"""Column metadata for a single row group."""
|
316 |
+
|
317 |
+
def __cinit__(self):
|
318 |
+
pass
|
319 |
+
|
320 |
+
def __repr__(self):
|
321 |
+
statistics = indent(repr(self.statistics), 4 * ' ')
|
322 |
+
return """{0}
|
323 |
+
file_offset: {1}
|
324 |
+
file_path: {2}
|
325 |
+
physical_type: {3}
|
326 |
+
num_values: {4}
|
327 |
+
path_in_schema: {5}
|
328 |
+
is_stats_set: {6}
|
329 |
+
statistics:
|
330 |
+
{7}
|
331 |
+
compression: {8}
|
332 |
+
encodings: {9}
|
333 |
+
has_dictionary_page: {10}
|
334 |
+
dictionary_page_offset: {11}
|
335 |
+
data_page_offset: {12}
|
336 |
+
total_compressed_size: {13}
|
337 |
+
total_uncompressed_size: {14}""".format(object.__repr__(self),
|
338 |
+
self.file_offset,
|
339 |
+
self.file_path,
|
340 |
+
self.physical_type,
|
341 |
+
self.num_values,
|
342 |
+
self.path_in_schema,
|
343 |
+
self.is_stats_set,
|
344 |
+
statistics,
|
345 |
+
self.compression,
|
346 |
+
self.encodings,
|
347 |
+
self.has_dictionary_page,
|
348 |
+
self.dictionary_page_offset,
|
349 |
+
self.data_page_offset,
|
350 |
+
self.total_compressed_size,
|
351 |
+
self.total_uncompressed_size)
|
352 |
+
|
353 |
+
def to_dict(self):
|
354 |
+
"""
|
355 |
+
Get dictionary representation of the column chunk metadata.
|
356 |
+
|
357 |
+
Returns
|
358 |
+
-------
|
359 |
+
dict
|
360 |
+
Dictionary with a key for each attribute of this class.
|
361 |
+
"""
|
362 |
+
statistics = self.statistics.to_dict() if self.is_stats_set else None
|
363 |
+
d = dict(
|
364 |
+
file_offset=self.file_offset,
|
365 |
+
file_path=self.file_path,
|
366 |
+
physical_type=self.physical_type,
|
367 |
+
num_values=self.num_values,
|
368 |
+
path_in_schema=self.path_in_schema,
|
369 |
+
is_stats_set=self.is_stats_set,
|
370 |
+
statistics=statistics,
|
371 |
+
compression=self.compression,
|
372 |
+
encodings=self.encodings,
|
373 |
+
has_dictionary_page=self.has_dictionary_page,
|
374 |
+
dictionary_page_offset=self.dictionary_page_offset,
|
375 |
+
data_page_offset=self.data_page_offset,
|
376 |
+
total_compressed_size=self.total_compressed_size,
|
377 |
+
total_uncompressed_size=self.total_uncompressed_size
|
378 |
+
)
|
379 |
+
return d
|
380 |
+
|
381 |
+
def __eq__(self, other):
|
382 |
+
try:
|
383 |
+
return self.equals(other)
|
384 |
+
except TypeError:
|
385 |
+
return NotImplemented
|
386 |
+
|
387 |
+
def equals(self, ColumnChunkMetaData other):
|
388 |
+
"""
|
389 |
+
Return whether the two column chunk metadata objects are equal.
|
390 |
+
|
391 |
+
Parameters
|
392 |
+
----------
|
393 |
+
other : ColumnChunkMetaData
|
394 |
+
Metadata to compare against.
|
395 |
+
|
396 |
+
Returns
|
397 |
+
-------
|
398 |
+
are_equal : bool
|
399 |
+
"""
|
400 |
+
return self.metadata.Equals(deref(other.metadata))
|
401 |
+
|
402 |
+
@property
|
403 |
+
def file_offset(self):
|
404 |
+
"""Offset into file where column chunk is located (int)."""
|
405 |
+
return self.metadata.file_offset()
|
406 |
+
|
407 |
+
@property
|
408 |
+
def file_path(self):
|
409 |
+
"""Optional file path if set (str or None)."""
|
410 |
+
return frombytes(self.metadata.file_path())
|
411 |
+
|
412 |
+
@property
|
413 |
+
def physical_type(self):
|
414 |
+
"""Physical type of column (str)."""
|
415 |
+
return physical_type_name_from_enum(self.metadata.type())
|
416 |
+
|
417 |
+
@property
|
418 |
+
def num_values(self):
|
419 |
+
"""Total number of values (int)."""
|
420 |
+
return self.metadata.num_values()
|
421 |
+
|
422 |
+
@property
|
423 |
+
def path_in_schema(self):
|
424 |
+
"""Nested path to field, separated by periods (str)."""
|
425 |
+
path = self.metadata.path_in_schema().get().ToDotString()
|
426 |
+
return frombytes(path)
|
427 |
+
|
428 |
+
@property
|
429 |
+
def is_stats_set(self):
|
430 |
+
"""Whether or not statistics are present in metadata (bool)."""
|
431 |
+
return self.metadata.is_stats_set()
|
432 |
+
|
433 |
+
@property
|
434 |
+
def statistics(self):
|
435 |
+
"""Statistics for column chunk (:class:`Statistics`)."""
|
436 |
+
if not self.metadata.is_stats_set():
|
437 |
+
return None
|
438 |
+
statistics = Statistics()
|
439 |
+
statistics.init(self.metadata.statistics(), self)
|
440 |
+
return statistics
|
441 |
+
|
442 |
+
@property
|
443 |
+
def compression(self):
|
444 |
+
"""
|
445 |
+
Type of compression used for column (str).
|
446 |
+
|
447 |
+
One of 'UNCOMPRESSED', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD',
|
448 |
+
or 'UNKNOWN'.
|
449 |
+
"""
|
450 |
+
return compression_name_from_enum(self.metadata.compression())
|
451 |
+
|
452 |
+
@property
|
453 |
+
def encodings(self):
|
454 |
+
"""
|
455 |
+
Encodings used for column (tuple of str).
|
456 |
+
|
457 |
+
One of 'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', 'DELTA_BINARY_PACKED',
|
458 |
+
'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'.
|
459 |
+
"""
|
460 |
+
return tuple(map(encoding_name_from_enum, self.metadata.encodings()))
|
461 |
+
|
462 |
+
@property
|
463 |
+
def has_dictionary_page(self):
|
464 |
+
"""Whether there is dictionary data present in the column chunk (bool)."""
|
465 |
+
return bool(self.metadata.has_dictionary_page())
|
466 |
+
|
467 |
+
@property
|
468 |
+
def dictionary_page_offset(self):
|
469 |
+
"""Offset of dictionary page relative to column chunk offset (int)."""
|
470 |
+
if self.has_dictionary_page:
|
471 |
+
return self.metadata.dictionary_page_offset()
|
472 |
+
else:
|
473 |
+
return None
|
474 |
+
|
475 |
+
@property
|
476 |
+
def data_page_offset(self):
|
477 |
+
"""Offset of data page relative to column chunk offset (int)."""
|
478 |
+
return self.metadata.data_page_offset()
|
479 |
+
|
480 |
+
@property
|
481 |
+
def has_index_page(self):
|
482 |
+
"""Not yet supported."""
|
483 |
+
raise NotImplementedError('not supported in parquet-cpp')
|
484 |
+
|
485 |
+
@property
|
486 |
+
def index_page_offset(self):
|
487 |
+
"""Not yet supported."""
|
488 |
+
raise NotImplementedError("parquet-cpp doesn't return valid values")
|
489 |
+
|
490 |
+
@property
|
491 |
+
def total_compressed_size(self):
|
492 |
+
"""Compressed size in bytes (int)."""
|
493 |
+
return self.metadata.total_compressed_size()
|
494 |
+
|
495 |
+
@property
|
496 |
+
def total_uncompressed_size(self):
|
497 |
+
"""Uncompressed size in bytes (int)."""
|
498 |
+
return self.metadata.total_uncompressed_size()
|
499 |
+
|
500 |
+
@property
|
501 |
+
def has_offset_index(self):
|
502 |
+
"""Whether the column chunk has an offset index"""
|
503 |
+
return self.metadata.GetOffsetIndexLocation().has_value()
|
504 |
+
|
505 |
+
@property
|
506 |
+
def has_column_index(self):
|
507 |
+
"""Whether the column chunk has a column index"""
|
508 |
+
return self.metadata.GetColumnIndexLocation().has_value()
|
509 |
+
|
510 |
+
|
511 |
+
cdef class SortingColumn:
|
512 |
+
"""
|
513 |
+
Sorting specification for a single column.
|
514 |
+
|
515 |
+
Returned by :meth:`RowGroupMetaData.sorting_columns` and used in
|
516 |
+
:class:`ParquetWriter` to specify the sort order of the data.
|
517 |
+
|
518 |
+
Parameters
|
519 |
+
----------
|
520 |
+
column_index : int
|
521 |
+
Index of column that data is sorted by.
|
522 |
+
descending : bool, default False
|
523 |
+
Whether column is sorted in descending order.
|
524 |
+
nulls_first : bool, default False
|
525 |
+
Whether null values appear before valid values.
|
526 |
+
|
527 |
+
Notes
|
528 |
+
-----
|
529 |
+
|
530 |
+
Column indices are zero-based, refer only to leaf fields, and are in
|
531 |
+
depth-first order. This may make the column indices for nested schemas
|
532 |
+
different from what you expect. In most cases, it will be easier to
|
533 |
+
specify the sort order using column names instead of column indices
|
534 |
+
and converting using the ``from_ordering`` method.
|
535 |
+
|
536 |
+
Examples
|
537 |
+
--------
|
538 |
+
|
539 |
+
In other APIs, sort order is specified by names, such as:
|
540 |
+
|
541 |
+
>>> sort_order = [('id', 'ascending'), ('timestamp', 'descending')]
|
542 |
+
|
543 |
+
For Parquet, the column index must be used instead:
|
544 |
+
|
545 |
+
>>> import pyarrow.parquet as pq
|
546 |
+
>>> [pq.SortingColumn(0), pq.SortingColumn(1, descending=True)]
|
547 |
+
[SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)]
|
548 |
+
|
549 |
+
Convert the sort_order into the list of sorting columns with
|
550 |
+
``from_ordering`` (note that the schema must be provided as well):
|
551 |
+
|
552 |
+
>>> import pyarrow as pa
|
553 |
+
>>> schema = pa.schema([('id', pa.int64()), ('timestamp', pa.timestamp('ms'))])
|
554 |
+
>>> sorting_columns = pq.SortingColumn.from_ordering(schema, sort_order)
|
555 |
+
>>> sorting_columns
|
556 |
+
(SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False))
|
557 |
+
|
558 |
+
Convert back to the sort order with ``to_ordering``:
|
559 |
+
|
560 |
+
>>> pq.SortingColumn.to_ordering(schema, sorting_columns)
|
561 |
+
((('id', 'ascending'), ('timestamp', 'descending')), 'at_end')
|
562 |
+
|
563 |
+
See Also
|
564 |
+
--------
|
565 |
+
RowGroupMetaData.sorting_columns
|
566 |
+
"""
|
567 |
+
cdef int column_index
|
568 |
+
cdef c_bool descending
|
569 |
+
cdef c_bool nulls_first
|
570 |
+
|
571 |
+
def __init__(self, int column_index, c_bool descending=False, c_bool nulls_first=False):
|
572 |
+
self.column_index = column_index
|
573 |
+
self.descending = descending
|
574 |
+
self.nulls_first = nulls_first
|
575 |
+
|
576 |
+
@classmethod
|
577 |
+
def from_ordering(cls, Schema schema, sort_keys, null_placement='at_end'):
|
578 |
+
"""
|
579 |
+
Create a tuple of SortingColumn objects from the same arguments as
|
580 |
+
:class:`pyarrow.compute.SortOptions`.
|
581 |
+
|
582 |
+
Parameters
|
583 |
+
----------
|
584 |
+
schema : Schema
|
585 |
+
Schema of the input data.
|
586 |
+
sort_keys : Sequence of (name, order) tuples
|
587 |
+
Names of field/column keys (str) to sort the input on,
|
588 |
+
along with the order each field/column is sorted in.
|
589 |
+
Accepted values for `order` are "ascending", "descending".
|
590 |
+
null_placement : {'at_start', 'at_end'}, default 'at_end'
|
591 |
+
Where null values should appear in the sort order.
|
592 |
+
|
593 |
+
Returns
|
594 |
+
-------
|
595 |
+
sorting_columns : tuple of SortingColumn
|
596 |
+
"""
|
597 |
+
if null_placement == 'at_start':
|
598 |
+
nulls_first = True
|
599 |
+
elif null_placement == 'at_end':
|
600 |
+
nulls_first = False
|
601 |
+
else:
|
602 |
+
raise ValueError('null_placement must be "at_start" or "at_end"')
|
603 |
+
|
604 |
+
col_map = _name_to_index_map(schema)
|
605 |
+
|
606 |
+
sorting_columns = []
|
607 |
+
|
608 |
+
for sort_key in sort_keys:
|
609 |
+
if isinstance(sort_key, str):
|
610 |
+
name = sort_key
|
611 |
+
descending = False
|
612 |
+
elif (isinstance(sort_key, tuple) and len(sort_key) == 2 and
|
613 |
+
isinstance(sort_key[0], str) and
|
614 |
+
isinstance(sort_key[1], str)):
|
615 |
+
name, descending = sort_key
|
616 |
+
if descending == "descending":
|
617 |
+
descending = True
|
618 |
+
elif descending == "ascending":
|
619 |
+
descending = False
|
620 |
+
else:
|
621 |
+
raise ValueError("Invalid sort key direction: {0}"
|
622 |
+
.format(descending))
|
623 |
+
else:
|
624 |
+
raise ValueError("Invalid sort key: {0}".format(sort_key))
|
625 |
+
|
626 |
+
try:
|
627 |
+
column_index = col_map[name]
|
628 |
+
except KeyError:
|
629 |
+
raise ValueError("Sort key name '{0}' not found in schema:\n{1}"
|
630 |
+
.format(name, schema))
|
631 |
+
|
632 |
+
sorting_columns.append(
|
633 |
+
cls(column_index, descending=descending, nulls_first=nulls_first)
|
634 |
+
)
|
635 |
+
|
636 |
+
return tuple(sorting_columns)
|
637 |
+
|
638 |
+
@staticmethod
|
639 |
+
def to_ordering(Schema schema, sorting_columns):
|
640 |
+
"""
|
641 |
+
Convert a tuple of SortingColumn objects to the same format as
|
642 |
+
:class:`pyarrow.compute.SortOptions`.
|
643 |
+
|
644 |
+
Parameters
|
645 |
+
----------
|
646 |
+
schema : Schema
|
647 |
+
Schema of the input data.
|
648 |
+
sorting_columns : tuple of SortingColumn
|
649 |
+
Columns to sort the input on.
|
650 |
+
|
651 |
+
Returns
|
652 |
+
-------
|
653 |
+
sort_keys : tuple of (name, order) tuples
|
654 |
+
null_placement : {'at_start', 'at_end'}
|
655 |
+
"""
|
656 |
+
col_map = {i: name for name, i in _name_to_index_map(schema).items()}
|
657 |
+
|
658 |
+
sort_keys = []
|
659 |
+
nulls_first = None
|
660 |
+
|
661 |
+
for sorting_column in sorting_columns:
|
662 |
+
name = col_map[sorting_column.column_index]
|
663 |
+
if sorting_column.descending:
|
664 |
+
order = "descending"
|
665 |
+
else:
|
666 |
+
order = "ascending"
|
667 |
+
sort_keys.append((name, order))
|
668 |
+
if nulls_first is None:
|
669 |
+
nulls_first = sorting_column.nulls_first
|
670 |
+
elif nulls_first != sorting_column.nulls_first:
|
671 |
+
raise ValueError("Sorting columns have inconsistent null placement")
|
672 |
+
|
673 |
+
if nulls_first:
|
674 |
+
null_placement = "at_start"
|
675 |
+
else:
|
676 |
+
null_placement = "at_end"
|
677 |
+
|
678 |
+
return tuple(sort_keys), null_placement
|
679 |
+
|
680 |
+
def __repr__(self):
|
681 |
+
return """{}(column_index={}, descending={}, nulls_first={})""".format(
|
682 |
+
self.__class__.__name__,
|
683 |
+
self.column_index, self.descending, self.nulls_first)
|
684 |
+
|
685 |
+
def __eq__(self, SortingColumn other):
|
686 |
+
return (self.column_index == other.column_index and
|
687 |
+
self.descending == other.descending and
|
688 |
+
self.nulls_first == other.nulls_first)
|
689 |
+
|
690 |
+
def __hash__(self):
|
691 |
+
return hash((self.column_index, self.descending, self.nulls_first))
|
692 |
+
|
693 |
+
@property
|
694 |
+
def column_index(self):
|
695 |
+
""""Index of column data is sorted by (int)."""
|
696 |
+
return self.column_index
|
697 |
+
|
698 |
+
@property
|
699 |
+
def descending(self):
|
700 |
+
"""Whether column is sorted in descending order (bool)."""
|
701 |
+
return self.descending
|
702 |
+
|
703 |
+
@property
|
704 |
+
def nulls_first(self):
|
705 |
+
"""Whether null values appear before valid values (bool)."""
|
706 |
+
return self.nulls_first
|
707 |
+
|
708 |
+
|
709 |
+
cdef class RowGroupMetaData(_Weakrefable):
|
710 |
+
"""Metadata for a single row group."""
|
711 |
+
|
712 |
+
def __cinit__(self, FileMetaData parent, int index):
|
713 |
+
if index < 0 or index >= parent.num_row_groups:
|
714 |
+
raise IndexError('{0} out of bounds'.format(index))
|
715 |
+
self.up_metadata = parent._metadata.RowGroup(index)
|
716 |
+
self.metadata = self.up_metadata.get()
|
717 |
+
self.parent = parent
|
718 |
+
self.index = index
|
719 |
+
|
720 |
+
def __reduce__(self):
|
721 |
+
return RowGroupMetaData, (self.parent, self.index)
|
722 |
+
|
723 |
+
def __eq__(self, other):
|
724 |
+
try:
|
725 |
+
return self.equals(other)
|
726 |
+
except TypeError:
|
727 |
+
return NotImplemented
|
728 |
+
|
729 |
+
def equals(self, RowGroupMetaData other):
|
730 |
+
"""
|
731 |
+
Return whether the two row group metadata objects are equal.
|
732 |
+
|
733 |
+
Parameters
|
734 |
+
----------
|
735 |
+
other : RowGroupMetaData
|
736 |
+
Metadata to compare against.
|
737 |
+
|
738 |
+
Returns
|
739 |
+
-------
|
740 |
+
are_equal : bool
|
741 |
+
"""
|
742 |
+
return self.metadata.Equals(deref(other.metadata))
|
743 |
+
|
744 |
+
def column(self, int i):
|
745 |
+
"""
|
746 |
+
Get column metadata at given index.
|
747 |
+
|
748 |
+
Parameters
|
749 |
+
----------
|
750 |
+
i : int
|
751 |
+
Index of column to get metadata for.
|
752 |
+
|
753 |
+
Returns
|
754 |
+
-------
|
755 |
+
ColumnChunkMetaData
|
756 |
+
Metadata for column within this chunk.
|
757 |
+
"""
|
758 |
+
if i < 0 or i >= self.num_columns:
|
759 |
+
raise IndexError('{0} out of bounds'.format(i))
|
760 |
+
chunk = ColumnChunkMetaData()
|
761 |
+
chunk.init(self, i)
|
762 |
+
return chunk
|
763 |
+
|
764 |
+
def __repr__(self):
|
765 |
+
return """{0}
|
766 |
+
num_columns: {1}
|
767 |
+
num_rows: {2}
|
768 |
+
total_byte_size: {3}
|
769 |
+
sorting_columns: {4}""".format(object.__repr__(self),
|
770 |
+
self.num_columns,
|
771 |
+
self.num_rows,
|
772 |
+
self.total_byte_size,
|
773 |
+
self.sorting_columns)
|
774 |
+
|
775 |
+
def to_dict(self):
|
776 |
+
"""
|
777 |
+
Get dictionary representation of the row group metadata.
|
778 |
+
|
779 |
+
Returns
|
780 |
+
-------
|
781 |
+
dict
|
782 |
+
Dictionary with a key for each attribute of this class.
|
783 |
+
"""
|
784 |
+
columns = []
|
785 |
+
d = dict(
|
786 |
+
num_columns=self.num_columns,
|
787 |
+
num_rows=self.num_rows,
|
788 |
+
total_byte_size=self.total_byte_size,
|
789 |
+
columns=columns,
|
790 |
+
sorting_columns=[col.to_dict() for col in self.sorting_columns]
|
791 |
+
)
|
792 |
+
for i in range(self.num_columns):
|
793 |
+
columns.append(self.column(i).to_dict())
|
794 |
+
return d
|
795 |
+
|
796 |
+
@property
|
797 |
+
def num_columns(self):
|
798 |
+
"""Number of columns in this row group (int)."""
|
799 |
+
return self.metadata.num_columns()
|
800 |
+
|
801 |
+
@property
|
802 |
+
def num_rows(self):
|
803 |
+
"""Number of rows in this row group (int)."""
|
804 |
+
return self.metadata.num_rows()
|
805 |
+
|
806 |
+
@property
|
807 |
+
def total_byte_size(self):
|
808 |
+
"""Total byte size of all the uncompressed column data in this row group (int)."""
|
809 |
+
return self.metadata.total_byte_size()
|
810 |
+
|
811 |
+
@property
|
812 |
+
def sorting_columns(self):
|
813 |
+
"""Columns the row group is sorted by (tuple of :class:`SortingColumn`))."""
|
814 |
+
out = []
|
815 |
+
cdef vector[CSortingColumn] sorting_columns = self.metadata.sorting_columns()
|
816 |
+
for sorting_col in sorting_columns:
|
817 |
+
out.append(SortingColumn(
|
818 |
+
sorting_col.column_idx,
|
819 |
+
sorting_col.descending,
|
820 |
+
sorting_col.nulls_first
|
821 |
+
))
|
822 |
+
return tuple(out)
|
823 |
+
|
824 |
+
|
825 |
+
def _reconstruct_filemetadata(Buffer serialized):
|
826 |
+
cdef:
|
827 |
+
FileMetaData metadata = FileMetaData.__new__(FileMetaData)
|
828 |
+
CBuffer *buffer = serialized.buffer.get()
|
829 |
+
uint32_t metadata_len = <uint32_t>buffer.size()
|
830 |
+
|
831 |
+
metadata.init(CFileMetaData_Make(buffer.data(), &metadata_len))
|
832 |
+
|
833 |
+
return metadata
|
834 |
+
|
835 |
+
|
836 |
+
cdef class FileMetaData(_Weakrefable):
|
837 |
+
"""Parquet metadata for a single file."""
|
838 |
+
|
839 |
+
def __cinit__(self):
|
840 |
+
pass
|
841 |
+
|
842 |
+
def __reduce__(self):
|
843 |
+
cdef:
|
844 |
+
NativeFile sink = BufferOutputStream()
|
845 |
+
COutputStream* c_sink = sink.get_output_stream().get()
|
846 |
+
with nogil:
|
847 |
+
self._metadata.WriteTo(c_sink)
|
848 |
+
|
849 |
+
cdef Buffer buffer = sink.getvalue()
|
850 |
+
return _reconstruct_filemetadata, (buffer,)
|
851 |
+
|
852 |
+
def __hash__(self):
|
853 |
+
return hash((self.schema,
|
854 |
+
self.num_rows,
|
855 |
+
self.num_row_groups,
|
856 |
+
self.format_version,
|
857 |
+
self.serialized_size))
|
858 |
+
|
859 |
+
def __repr__(self):
|
860 |
+
return """{0}
|
861 |
+
created_by: {1}
|
862 |
+
num_columns: {2}
|
863 |
+
num_rows: {3}
|
864 |
+
num_row_groups: {4}
|
865 |
+
format_version: {5}
|
866 |
+
serialized_size: {6}""".format(object.__repr__(self),
|
867 |
+
self.created_by, self.num_columns,
|
868 |
+
self.num_rows, self.num_row_groups,
|
869 |
+
self.format_version,
|
870 |
+
self.serialized_size)
|
871 |
+
|
872 |
+
def to_dict(self):
|
873 |
+
"""
|
874 |
+
Get dictionary representation of the file metadata.
|
875 |
+
|
876 |
+
Returns
|
877 |
+
-------
|
878 |
+
dict
|
879 |
+
Dictionary with a key for each attribute of this class.
|
880 |
+
"""
|
881 |
+
row_groups = []
|
882 |
+
d = dict(
|
883 |
+
created_by=self.created_by,
|
884 |
+
num_columns=self.num_columns,
|
885 |
+
num_rows=self.num_rows,
|
886 |
+
num_row_groups=self.num_row_groups,
|
887 |
+
row_groups=row_groups,
|
888 |
+
format_version=self.format_version,
|
889 |
+
serialized_size=self.serialized_size
|
890 |
+
)
|
891 |
+
for i in range(self.num_row_groups):
|
892 |
+
row_groups.append(self.row_group(i).to_dict())
|
893 |
+
return d
|
894 |
+
|
895 |
+
def __eq__(self, other):
|
896 |
+
try:
|
897 |
+
return self.equals(other)
|
898 |
+
except TypeError:
|
899 |
+
return NotImplemented
|
900 |
+
|
901 |
+
def equals(self, FileMetaData other not None):
|
902 |
+
"""
|
903 |
+
Return whether the two file metadata objects are equal.
|
904 |
+
|
905 |
+
Parameters
|
906 |
+
----------
|
907 |
+
other : FileMetaData
|
908 |
+
Metadata to compare against.
|
909 |
+
|
910 |
+
Returns
|
911 |
+
-------
|
912 |
+
are_equal : bool
|
913 |
+
"""
|
914 |
+
return self._metadata.Equals(deref(other._metadata))
|
915 |
+
|
916 |
+
@property
|
917 |
+
def schema(self):
|
918 |
+
"""Schema of the file (:class:`ParquetSchema`)."""
|
919 |
+
if self._schema is None:
|
920 |
+
self._schema = ParquetSchema(self)
|
921 |
+
return self._schema
|
922 |
+
|
923 |
+
@property
|
924 |
+
def serialized_size(self):
|
925 |
+
"""Size of the original thrift encoded metadata footer (int)."""
|
926 |
+
return self._metadata.size()
|
927 |
+
|
928 |
+
@property
|
929 |
+
def num_columns(self):
|
930 |
+
"""Number of columns in file (int)."""
|
931 |
+
return self._metadata.num_columns()
|
932 |
+
|
933 |
+
@property
|
934 |
+
def num_rows(self):
|
935 |
+
"""Total number of rows in file (int)."""
|
936 |
+
return self._metadata.num_rows()
|
937 |
+
|
938 |
+
@property
|
939 |
+
def num_row_groups(self):
|
940 |
+
"""Number of row groups in file (int)."""
|
941 |
+
return self._metadata.num_row_groups()
|
942 |
+
|
943 |
+
@property
|
944 |
+
def format_version(self):
|
945 |
+
"""
|
946 |
+
Parquet format version used in file (str, such as '1.0', '2.4').
|
947 |
+
|
948 |
+
If version is missing or unparsable, will default to assuming '2.6'.
|
949 |
+
"""
|
950 |
+
cdef ParquetVersion version = self._metadata.version()
|
951 |
+
if version == ParquetVersion_V1:
|
952 |
+
return '1.0'
|
953 |
+
elif version == ParquetVersion_V2_0:
|
954 |
+
return 'pseudo-2.0'
|
955 |
+
elif version == ParquetVersion_V2_4:
|
956 |
+
return '2.4'
|
957 |
+
elif version == ParquetVersion_V2_6:
|
958 |
+
return '2.6'
|
959 |
+
else:
|
960 |
+
warnings.warn('Unrecognized file version, assuming 2.6: {}'
|
961 |
+
.format(version))
|
962 |
+
return '2.6'
|
963 |
+
|
964 |
+
@property
|
965 |
+
def created_by(self):
|
966 |
+
"""
|
967 |
+
String describing source of the parquet file (str).
|
968 |
+
|
969 |
+
This typically includes library name and version number. For example, Arrow 7.0's
|
970 |
+
writer returns 'parquet-cpp-arrow version 7.0.0'.
|
971 |
+
"""
|
972 |
+
return frombytes(self._metadata.created_by())
|
973 |
+
|
974 |
+
@property
|
975 |
+
def metadata(self):
|
976 |
+
"""Additional metadata as key value pairs (dict[bytes, bytes])."""
|
977 |
+
cdef:
|
978 |
+
unordered_map[c_string, c_string] metadata
|
979 |
+
const CKeyValueMetadata* underlying_metadata
|
980 |
+
underlying_metadata = self._metadata.key_value_metadata().get()
|
981 |
+
if underlying_metadata != NULL:
|
982 |
+
underlying_metadata.ToUnorderedMap(&metadata)
|
983 |
+
return metadata
|
984 |
+
else:
|
985 |
+
return None
|
986 |
+
|
987 |
+
def row_group(self, int i):
|
988 |
+
"""
|
989 |
+
Get metadata for row group at index i.
|
990 |
+
|
991 |
+
Parameters
|
992 |
+
----------
|
993 |
+
i : int
|
994 |
+
Row group index to get.
|
995 |
+
|
996 |
+
Returns
|
997 |
+
-------
|
998 |
+
row_group_metadata : RowGroupMetaData
|
999 |
+
"""
|
1000 |
+
return RowGroupMetaData(self, i)
|
1001 |
+
|
1002 |
+
def set_file_path(self, path):
|
1003 |
+
"""
|
1004 |
+
Set ColumnChunk file paths to the given value.
|
1005 |
+
|
1006 |
+
This method modifies the ``file_path`` field of each ColumnChunk
|
1007 |
+
in the FileMetaData to be a particular value.
|
1008 |
+
|
1009 |
+
Parameters
|
1010 |
+
----------
|
1011 |
+
path : str
|
1012 |
+
The file path to set on all ColumnChunks.
|
1013 |
+
"""
|
1014 |
+
cdef:
|
1015 |
+
c_string c_path = tobytes(path)
|
1016 |
+
self._metadata.set_file_path(c_path)
|
1017 |
+
|
1018 |
+
def append_row_groups(self, FileMetaData other):
|
1019 |
+
"""
|
1020 |
+
Append row groups from other FileMetaData object.
|
1021 |
+
|
1022 |
+
Parameters
|
1023 |
+
----------
|
1024 |
+
other : FileMetaData
|
1025 |
+
Other metadata to append row groups from.
|
1026 |
+
"""
|
1027 |
+
cdef shared_ptr[CFileMetaData] c_metadata
|
1028 |
+
|
1029 |
+
c_metadata = other.sp_metadata
|
1030 |
+
self._metadata.AppendRowGroups(deref(c_metadata))
|
1031 |
+
|
1032 |
+
def write_metadata_file(self, where):
|
1033 |
+
"""
|
1034 |
+
Write the metadata to a metadata-only Parquet file.
|
1035 |
+
|
1036 |
+
Parameters
|
1037 |
+
----------
|
1038 |
+
where : path or file-like object
|
1039 |
+
Where to write the metadata. Should be a writable path on
|
1040 |
+
the local filesystem, or a writable file-like object.
|
1041 |
+
"""
|
1042 |
+
cdef:
|
1043 |
+
shared_ptr[COutputStream] sink
|
1044 |
+
c_string c_where
|
1045 |
+
|
1046 |
+
try:
|
1047 |
+
where = _stringify_path(where)
|
1048 |
+
except TypeError:
|
1049 |
+
get_writer(where, &sink)
|
1050 |
+
else:
|
1051 |
+
c_where = tobytes(where)
|
1052 |
+
with nogil:
|
1053 |
+
sink = GetResultValue(FileOutputStream.Open(c_where))
|
1054 |
+
|
1055 |
+
with nogil:
|
1056 |
+
check_status(
|
1057 |
+
WriteMetaDataFile(deref(self._metadata), sink.get()))
|
1058 |
+
|
1059 |
+
|
1060 |
+
cdef class ParquetSchema(_Weakrefable):
|
1061 |
+
"""A Parquet schema."""
|
1062 |
+
|
1063 |
+
def __cinit__(self, FileMetaData container):
|
1064 |
+
self.parent = container
|
1065 |
+
self.schema = container._metadata.schema()
|
1066 |
+
|
1067 |
+
def __repr__(self):
|
1068 |
+
return "{0}\n{1}".format(
|
1069 |
+
object.__repr__(self),
|
1070 |
+
frombytes(self.schema.ToString(), safe=True))
|
1071 |
+
|
1072 |
+
def __reduce__(self):
|
1073 |
+
return ParquetSchema, (self.parent,)
|
1074 |
+
|
1075 |
+
def __len__(self):
|
1076 |
+
return self.schema.num_columns()
|
1077 |
+
|
1078 |
+
def __getitem__(self, i):
|
1079 |
+
return self.column(i)
|
1080 |
+
|
1081 |
+
def __hash__(self):
|
1082 |
+
return hash(self.schema.ToString())
|
1083 |
+
|
1084 |
+
@property
|
1085 |
+
def names(self):
|
1086 |
+
"""Name of each field (list of str)."""
|
1087 |
+
return [self[i].name for i in range(len(self))]
|
1088 |
+
|
1089 |
+
def to_arrow_schema(self):
|
1090 |
+
"""
|
1091 |
+
Convert Parquet schema to effective Arrow schema.
|
1092 |
+
|
1093 |
+
Returns
|
1094 |
+
-------
|
1095 |
+
schema : Schema
|
1096 |
+
"""
|
1097 |
+
cdef shared_ptr[CSchema] sp_arrow_schema
|
1098 |
+
|
1099 |
+
with nogil:
|
1100 |
+
check_status(FromParquetSchema(
|
1101 |
+
self.schema, default_arrow_reader_properties(),
|
1102 |
+
self.parent._metadata.key_value_metadata(),
|
1103 |
+
&sp_arrow_schema))
|
1104 |
+
|
1105 |
+
return pyarrow_wrap_schema(sp_arrow_schema)
|
1106 |
+
|
1107 |
+
def __eq__(self, other):
|
1108 |
+
try:
|
1109 |
+
return self.equals(other)
|
1110 |
+
except TypeError:
|
1111 |
+
return NotImplemented
|
1112 |
+
|
1113 |
+
def equals(self, ParquetSchema other):
|
1114 |
+
"""
|
1115 |
+
Return whether the two schemas are equal.
|
1116 |
+
|
1117 |
+
Parameters
|
1118 |
+
----------
|
1119 |
+
other : ParquetSchema
|
1120 |
+
Schema to compare against.
|
1121 |
+
|
1122 |
+
Returns
|
1123 |
+
-------
|
1124 |
+
are_equal : bool
|
1125 |
+
"""
|
1126 |
+
return self.schema.Equals(deref(other.schema))
|
1127 |
+
|
1128 |
+
def column(self, i):
|
1129 |
+
"""
|
1130 |
+
Return the schema for a single column.
|
1131 |
+
|
1132 |
+
Parameters
|
1133 |
+
----------
|
1134 |
+
i : int
|
1135 |
+
Index of column in schema.
|
1136 |
+
|
1137 |
+
Returns
|
1138 |
+
-------
|
1139 |
+
column_schema : ColumnSchema
|
1140 |
+
"""
|
1141 |
+
if i < 0 or i >= len(self):
|
1142 |
+
raise IndexError('{0} out of bounds'.format(i))
|
1143 |
+
|
1144 |
+
return ColumnSchema(self, i)
|
1145 |
+
|
1146 |
+
|
1147 |
+
cdef class ColumnSchema(_Weakrefable):
|
1148 |
+
"""Schema for a single column."""
|
1149 |
+
cdef:
|
1150 |
+
int index
|
1151 |
+
ParquetSchema parent
|
1152 |
+
const ColumnDescriptor* descr
|
1153 |
+
|
1154 |
+
def __cinit__(self, ParquetSchema schema, int index):
|
1155 |
+
self.parent = schema
|
1156 |
+
self.index = index # for pickling support
|
1157 |
+
self.descr = schema.schema.Column(index)
|
1158 |
+
|
1159 |
+
def __eq__(self, other):
|
1160 |
+
try:
|
1161 |
+
return self.equals(other)
|
1162 |
+
except TypeError:
|
1163 |
+
return NotImplemented
|
1164 |
+
|
1165 |
+
def __reduce__(self):
|
1166 |
+
return ColumnSchema, (self.parent, self.index)
|
1167 |
+
|
1168 |
+
def equals(self, ColumnSchema other):
|
1169 |
+
"""
|
1170 |
+
Return whether the two column schemas are equal.
|
1171 |
+
|
1172 |
+
Parameters
|
1173 |
+
----------
|
1174 |
+
other : ColumnSchema
|
1175 |
+
Schema to compare against.
|
1176 |
+
|
1177 |
+
Returns
|
1178 |
+
-------
|
1179 |
+
are_equal : bool
|
1180 |
+
"""
|
1181 |
+
return self.descr.Equals(deref(other.descr))
|
1182 |
+
|
1183 |
+
def __repr__(self):
|
1184 |
+
physical_type = self.physical_type
|
1185 |
+
converted_type = self.converted_type
|
1186 |
+
if converted_type == 'DECIMAL':
|
1187 |
+
converted_type = 'DECIMAL({0}, {1})'.format(self.precision,
|
1188 |
+
self.scale)
|
1189 |
+
elif physical_type == 'FIXED_LEN_BYTE_ARRAY':
|
1190 |
+
converted_type = ('FIXED_LEN_BYTE_ARRAY(length={0})'
|
1191 |
+
.format(self.length))
|
1192 |
+
|
1193 |
+
return """<ParquetColumnSchema>
|
1194 |
+
name: {0}
|
1195 |
+
path: {1}
|
1196 |
+
max_definition_level: {2}
|
1197 |
+
max_repetition_level: {3}
|
1198 |
+
physical_type: {4}
|
1199 |
+
logical_type: {5}
|
1200 |
+
converted_type (legacy): {6}""".format(self.name, self.path,
|
1201 |
+
self.max_definition_level,
|
1202 |
+
self.max_repetition_level,
|
1203 |
+
physical_type,
|
1204 |
+
str(self.logical_type),
|
1205 |
+
converted_type)
|
1206 |
+
|
1207 |
+
@property
|
1208 |
+
def name(self):
|
1209 |
+
"""Name of field (str)."""
|
1210 |
+
return frombytes(self.descr.name())
|
1211 |
+
|
1212 |
+
@property
|
1213 |
+
def path(self):
|
1214 |
+
"""Nested path to field, separated by periods (str)."""
|
1215 |
+
return frombytes(self.descr.path().get().ToDotString())
|
1216 |
+
|
1217 |
+
@property
|
1218 |
+
def max_definition_level(self):
|
1219 |
+
"""Maximum definition level (int)."""
|
1220 |
+
return self.descr.max_definition_level()
|
1221 |
+
|
1222 |
+
@property
|
1223 |
+
def max_repetition_level(self):
|
1224 |
+
"""Maximum repetition level (int)."""
|
1225 |
+
return self.descr.max_repetition_level()
|
1226 |
+
|
1227 |
+
@property
|
1228 |
+
def physical_type(self):
|
1229 |
+
"""Name of physical type (str)."""
|
1230 |
+
return physical_type_name_from_enum(self.descr.physical_type())
|
1231 |
+
|
1232 |
+
@property
|
1233 |
+
def logical_type(self):
|
1234 |
+
"""Logical type of column (:class:`ParquetLogicalType`)."""
|
1235 |
+
return wrap_logical_type(self.descr.logical_type())
|
1236 |
+
|
1237 |
+
@property
|
1238 |
+
def converted_type(self):
|
1239 |
+
"""Legacy converted type (str or None)."""
|
1240 |
+
return converted_type_name_from_enum(self.descr.converted_type())
|
1241 |
+
|
1242 |
+
# FIXED_LEN_BYTE_ARRAY attribute
|
1243 |
+
@property
|
1244 |
+
def length(self):
|
1245 |
+
"""Array length if fixed length byte array type, None otherwise (int or None)."""
|
1246 |
+
return self.descr.type_length()
|
1247 |
+
|
1248 |
+
# Decimal attributes
|
1249 |
+
@property
|
1250 |
+
def precision(self):
|
1251 |
+
"""Precision if decimal type, None otherwise (int or None)."""
|
1252 |
+
return self.descr.type_precision()
|
1253 |
+
|
1254 |
+
@property
|
1255 |
+
def scale(self):
|
1256 |
+
"""Scale if decimal type, None otherwise (int or None)."""
|
1257 |
+
return self.descr.type_scale()
|
1258 |
+
|
1259 |
+
|
1260 |
+
cdef physical_type_name_from_enum(ParquetType type_):
|
1261 |
+
return {
|
1262 |
+
ParquetType_BOOLEAN: 'BOOLEAN',
|
1263 |
+
ParquetType_INT32: 'INT32',
|
1264 |
+
ParquetType_INT64: 'INT64',
|
1265 |
+
ParquetType_INT96: 'INT96',
|
1266 |
+
ParquetType_FLOAT: 'FLOAT',
|
1267 |
+
ParquetType_DOUBLE: 'DOUBLE',
|
1268 |
+
ParquetType_BYTE_ARRAY: 'BYTE_ARRAY',
|
1269 |
+
ParquetType_FIXED_LEN_BYTE_ARRAY: 'FIXED_LEN_BYTE_ARRAY',
|
1270 |
+
}.get(type_, 'UNKNOWN')
|
1271 |
+
|
1272 |
+
|
1273 |
+
cdef logical_type_name_from_enum(ParquetLogicalTypeId type_):
|
1274 |
+
return {
|
1275 |
+
ParquetLogicalType_UNDEFINED: 'UNDEFINED',
|
1276 |
+
ParquetLogicalType_STRING: 'STRING',
|
1277 |
+
ParquetLogicalType_MAP: 'MAP',
|
1278 |
+
ParquetLogicalType_LIST: 'LIST',
|
1279 |
+
ParquetLogicalType_ENUM: 'ENUM',
|
1280 |
+
ParquetLogicalType_DECIMAL: 'DECIMAL',
|
1281 |
+
ParquetLogicalType_DATE: 'DATE',
|
1282 |
+
ParquetLogicalType_TIME: 'TIME',
|
1283 |
+
ParquetLogicalType_TIMESTAMP: 'TIMESTAMP',
|
1284 |
+
ParquetLogicalType_INT: 'INT',
|
1285 |
+
ParquetLogicalType_JSON: 'JSON',
|
1286 |
+
ParquetLogicalType_BSON: 'BSON',
|
1287 |
+
ParquetLogicalType_UUID: 'UUID',
|
1288 |
+
ParquetLogicalType_NONE: 'NONE',
|
1289 |
+
}.get(type_, 'UNKNOWN')
|
1290 |
+
|
1291 |
+
|
1292 |
+
cdef converted_type_name_from_enum(ParquetConvertedType type_):
|
1293 |
+
return {
|
1294 |
+
ParquetConvertedType_NONE: 'NONE',
|
1295 |
+
ParquetConvertedType_UTF8: 'UTF8',
|
1296 |
+
ParquetConvertedType_MAP: 'MAP',
|
1297 |
+
ParquetConvertedType_MAP_KEY_VALUE: 'MAP_KEY_VALUE',
|
1298 |
+
ParquetConvertedType_LIST: 'LIST',
|
1299 |
+
ParquetConvertedType_ENUM: 'ENUM',
|
1300 |
+
ParquetConvertedType_DECIMAL: 'DECIMAL',
|
1301 |
+
ParquetConvertedType_DATE: 'DATE',
|
1302 |
+
ParquetConvertedType_TIME_MILLIS: 'TIME_MILLIS',
|
1303 |
+
ParquetConvertedType_TIME_MICROS: 'TIME_MICROS',
|
1304 |
+
ParquetConvertedType_TIMESTAMP_MILLIS: 'TIMESTAMP_MILLIS',
|
1305 |
+
ParquetConvertedType_TIMESTAMP_MICROS: 'TIMESTAMP_MICROS',
|
1306 |
+
ParquetConvertedType_UINT_8: 'UINT_8',
|
1307 |
+
ParquetConvertedType_UINT_16: 'UINT_16',
|
1308 |
+
ParquetConvertedType_UINT_32: 'UINT_32',
|
1309 |
+
ParquetConvertedType_UINT_64: 'UINT_64',
|
1310 |
+
ParquetConvertedType_INT_8: 'INT_8',
|
1311 |
+
ParquetConvertedType_INT_16: 'INT_16',
|
1312 |
+
ParquetConvertedType_INT_32: 'INT_32',
|
1313 |
+
ParquetConvertedType_INT_64: 'INT_64',
|
1314 |
+
ParquetConvertedType_JSON: 'JSON',
|
1315 |
+
ParquetConvertedType_BSON: 'BSON',
|
1316 |
+
ParquetConvertedType_INTERVAL: 'INTERVAL',
|
1317 |
+
}.get(type_, 'UNKNOWN')
|
1318 |
+
|
1319 |
+
|
1320 |
+
cdef encoding_name_from_enum(ParquetEncoding encoding_):
|
1321 |
+
return {
|
1322 |
+
ParquetEncoding_PLAIN: 'PLAIN',
|
1323 |
+
ParquetEncoding_PLAIN_DICTIONARY: 'PLAIN_DICTIONARY',
|
1324 |
+
ParquetEncoding_RLE: 'RLE',
|
1325 |
+
ParquetEncoding_BIT_PACKED: 'BIT_PACKED',
|
1326 |
+
ParquetEncoding_DELTA_BINARY_PACKED: 'DELTA_BINARY_PACKED',
|
1327 |
+
ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY: 'DELTA_LENGTH_BYTE_ARRAY',
|
1328 |
+
ParquetEncoding_DELTA_BYTE_ARRAY: 'DELTA_BYTE_ARRAY',
|
1329 |
+
ParquetEncoding_RLE_DICTIONARY: 'RLE_DICTIONARY',
|
1330 |
+
ParquetEncoding_BYTE_STREAM_SPLIT: 'BYTE_STREAM_SPLIT',
|
1331 |
+
}.get(encoding_, 'UNKNOWN')
|
1332 |
+
|
1333 |
+
|
1334 |
+
cdef encoding_enum_from_name(str encoding_name):
|
1335 |
+
enc = {
|
1336 |
+
'PLAIN': ParquetEncoding_PLAIN,
|
1337 |
+
'BIT_PACKED': ParquetEncoding_BIT_PACKED,
|
1338 |
+
'RLE': ParquetEncoding_RLE,
|
1339 |
+
'BYTE_STREAM_SPLIT': ParquetEncoding_BYTE_STREAM_SPLIT,
|
1340 |
+
'DELTA_BINARY_PACKED': ParquetEncoding_DELTA_BINARY_PACKED,
|
1341 |
+
'DELTA_LENGTH_BYTE_ARRAY': ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY,
|
1342 |
+
'DELTA_BYTE_ARRAY': ParquetEncoding_DELTA_BYTE_ARRAY,
|
1343 |
+
'RLE_DICTIONARY': 'dict',
|
1344 |
+
'PLAIN_DICTIONARY': 'dict',
|
1345 |
+
}.get(encoding_name, None)
|
1346 |
+
if enc is None:
|
1347 |
+
raise ValueError(f"Unsupported column encoding: {encoding_name!r}")
|
1348 |
+
elif enc == 'dict':
|
1349 |
+
raise ValueError(f"{encoding_name!r} is already used by default.")
|
1350 |
+
else:
|
1351 |
+
return enc
|
1352 |
+
|
1353 |
+
|
1354 |
+
cdef compression_name_from_enum(ParquetCompression compression_):
|
1355 |
+
return {
|
1356 |
+
ParquetCompression_UNCOMPRESSED: 'UNCOMPRESSED',
|
1357 |
+
ParquetCompression_SNAPPY: 'SNAPPY',
|
1358 |
+
ParquetCompression_GZIP: 'GZIP',
|
1359 |
+
ParquetCompression_LZO: 'LZO',
|
1360 |
+
ParquetCompression_BROTLI: 'BROTLI',
|
1361 |
+
ParquetCompression_LZ4: 'LZ4',
|
1362 |
+
ParquetCompression_ZSTD: 'ZSTD',
|
1363 |
+
}.get(compression_, 'UNKNOWN')
|
1364 |
+
|
1365 |
+
|
1366 |
+
cdef int check_compression_name(name) except -1:
|
1367 |
+
if name.upper() not in {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4',
|
1368 |
+
'ZSTD'}:
|
1369 |
+
raise ArrowException("Unsupported compression: " + name)
|
1370 |
+
return 0
|
1371 |
+
|
1372 |
+
|
1373 |
+
cdef ParquetCompression compression_from_name(name):
|
1374 |
+
name = name.upper()
|
1375 |
+
if name == 'SNAPPY':
|
1376 |
+
return ParquetCompression_SNAPPY
|
1377 |
+
elif name == 'GZIP':
|
1378 |
+
return ParquetCompression_GZIP
|
1379 |
+
elif name == 'LZO':
|
1380 |
+
return ParquetCompression_LZO
|
1381 |
+
elif name == 'BROTLI':
|
1382 |
+
return ParquetCompression_BROTLI
|
1383 |
+
elif name == 'LZ4':
|
1384 |
+
return ParquetCompression_LZ4
|
1385 |
+
elif name == 'ZSTD':
|
1386 |
+
return ParquetCompression_ZSTD
|
1387 |
+
else:
|
1388 |
+
return ParquetCompression_UNCOMPRESSED
|
1389 |
+
|
1390 |
+
|
1391 |
+
cdef class ParquetReader(_Weakrefable):
|
1392 |
+
cdef:
|
1393 |
+
object source
|
1394 |
+
CMemoryPool* pool
|
1395 |
+
UniquePtrNoGIL[FileReader] reader
|
1396 |
+
FileMetaData _metadata
|
1397 |
+
shared_ptr[CRandomAccessFile] rd_handle
|
1398 |
+
|
1399 |
+
cdef public:
|
1400 |
+
_column_idx_map
|
1401 |
+
|
1402 |
+
def __cinit__(self, MemoryPool memory_pool=None):
|
1403 |
+
self.pool = maybe_unbox_memory_pool(memory_pool)
|
1404 |
+
self._metadata = None
|
1405 |
+
|
1406 |
+
def open(self, object source not None, *, bint use_memory_map=False,
|
1407 |
+
read_dictionary=None, FileMetaData metadata=None,
|
1408 |
+
int buffer_size=0, bint pre_buffer=False,
|
1409 |
+
coerce_int96_timestamp_unit=None,
|
1410 |
+
FileDecryptionProperties decryption_properties=None,
|
1411 |
+
thrift_string_size_limit=None,
|
1412 |
+
thrift_container_size_limit=None,
|
1413 |
+
page_checksum_verification=False):
|
1414 |
+
"""
|
1415 |
+
Open a parquet file for reading.
|
1416 |
+
|
1417 |
+
Parameters
|
1418 |
+
----------
|
1419 |
+
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
|
1420 |
+
use_memory_map : bool, default False
|
1421 |
+
read_dictionary : iterable[int or str], optional
|
1422 |
+
metadata : FileMetaData, optional
|
1423 |
+
buffer_size : int, default 0
|
1424 |
+
pre_buffer : bool, default False
|
1425 |
+
coerce_int96_timestamp_unit : str, optional
|
1426 |
+
decryption_properties : FileDecryptionProperties, optional
|
1427 |
+
thrift_string_size_limit : int, optional
|
1428 |
+
thrift_container_size_limit : int, optional
|
1429 |
+
page_checksum_verification : bool, default False
|
1430 |
+
"""
|
1431 |
+
cdef:
|
1432 |
+
shared_ptr[CFileMetaData] c_metadata
|
1433 |
+
CReaderProperties properties = default_reader_properties()
|
1434 |
+
ArrowReaderProperties arrow_props = (
|
1435 |
+
default_arrow_reader_properties())
|
1436 |
+
FileReaderBuilder builder
|
1437 |
+
|
1438 |
+
if metadata is not None:
|
1439 |
+
c_metadata = metadata.sp_metadata
|
1440 |
+
|
1441 |
+
if buffer_size > 0:
|
1442 |
+
properties.enable_buffered_stream()
|
1443 |
+
properties.set_buffer_size(buffer_size)
|
1444 |
+
elif buffer_size == 0:
|
1445 |
+
properties.disable_buffered_stream()
|
1446 |
+
else:
|
1447 |
+
raise ValueError('Buffer size must be larger than zero')
|
1448 |
+
|
1449 |
+
if thrift_string_size_limit is not None:
|
1450 |
+
if thrift_string_size_limit <= 0:
|
1451 |
+
raise ValueError("thrift_string_size_limit "
|
1452 |
+
"must be larger than zero")
|
1453 |
+
properties.set_thrift_string_size_limit(thrift_string_size_limit)
|
1454 |
+
if thrift_container_size_limit is not None:
|
1455 |
+
if thrift_container_size_limit <= 0:
|
1456 |
+
raise ValueError("thrift_container_size_limit "
|
1457 |
+
"must be larger than zero")
|
1458 |
+
properties.set_thrift_container_size_limit(
|
1459 |
+
thrift_container_size_limit)
|
1460 |
+
|
1461 |
+
if decryption_properties is not None:
|
1462 |
+
properties.file_decryption_properties(
|
1463 |
+
decryption_properties.unwrap())
|
1464 |
+
|
1465 |
+
arrow_props.set_pre_buffer(pre_buffer)
|
1466 |
+
|
1467 |
+
properties.set_page_checksum_verification(page_checksum_verification)
|
1468 |
+
|
1469 |
+
if coerce_int96_timestamp_unit is None:
|
1470 |
+
# use the default defined in default_arrow_reader_properties()
|
1471 |
+
pass
|
1472 |
+
else:
|
1473 |
+
arrow_props.set_coerce_int96_timestamp_unit(
|
1474 |
+
string_to_timeunit(coerce_int96_timestamp_unit))
|
1475 |
+
|
1476 |
+
self.source = source
|
1477 |
+
get_reader(source, use_memory_map, &self.rd_handle)
|
1478 |
+
|
1479 |
+
with nogil:
|
1480 |
+
check_status(builder.Open(self.rd_handle, properties, c_metadata))
|
1481 |
+
|
1482 |
+
# Set up metadata
|
1483 |
+
with nogil:
|
1484 |
+
c_metadata = builder.raw_reader().metadata()
|
1485 |
+
self._metadata = result = FileMetaData()
|
1486 |
+
result.init(c_metadata)
|
1487 |
+
|
1488 |
+
if read_dictionary is not None:
|
1489 |
+
self._set_read_dictionary(read_dictionary, &arrow_props)
|
1490 |
+
|
1491 |
+
with nogil:
|
1492 |
+
check_status(builder.memory_pool(self.pool)
|
1493 |
+
.properties(arrow_props)
|
1494 |
+
.Build(&self.reader))
|
1495 |
+
|
1496 |
+
cdef _set_read_dictionary(self, read_dictionary,
|
1497 |
+
ArrowReaderProperties* props):
|
1498 |
+
for column in read_dictionary:
|
1499 |
+
if not isinstance(column, int):
|
1500 |
+
column = self.column_name_idx(column)
|
1501 |
+
props.set_read_dictionary(column, True)
|
1502 |
+
|
1503 |
+
@property
|
1504 |
+
def column_paths(self):
|
1505 |
+
cdef:
|
1506 |
+
FileMetaData container = self.metadata
|
1507 |
+
const CFileMetaData* metadata = container._metadata
|
1508 |
+
vector[c_string] path
|
1509 |
+
int i = 0
|
1510 |
+
|
1511 |
+
paths = []
|
1512 |
+
for i in range(0, metadata.num_columns()):
|
1513 |
+
path = (metadata.schema().Column(i)
|
1514 |
+
.path().get().ToDotVector())
|
1515 |
+
paths.append([frombytes(x) for x in path])
|
1516 |
+
|
1517 |
+
return paths
|
1518 |
+
|
1519 |
+
@property
|
1520 |
+
def metadata(self):
|
1521 |
+
return self._metadata
|
1522 |
+
|
1523 |
+
@property
|
1524 |
+
def schema_arrow(self):
|
1525 |
+
cdef shared_ptr[CSchema] out
|
1526 |
+
with nogil:
|
1527 |
+
check_status(self.reader.get().GetSchema(&out))
|
1528 |
+
return pyarrow_wrap_schema(out)
|
1529 |
+
|
1530 |
+
@property
|
1531 |
+
def num_row_groups(self):
|
1532 |
+
return self.reader.get().num_row_groups()
|
1533 |
+
|
1534 |
+
def set_use_threads(self, bint use_threads):
|
1535 |
+
"""
|
1536 |
+
Parameters
|
1537 |
+
----------
|
1538 |
+
use_threads : bool
|
1539 |
+
"""
|
1540 |
+
self.reader.get().set_use_threads(use_threads)
|
1541 |
+
|
1542 |
+
def set_batch_size(self, int64_t batch_size):
|
1543 |
+
"""
|
1544 |
+
Parameters
|
1545 |
+
----------
|
1546 |
+
batch_size : int64
|
1547 |
+
"""
|
1548 |
+
self.reader.get().set_batch_size(batch_size)
|
1549 |
+
|
1550 |
+
def iter_batches(self, int64_t batch_size, row_groups, column_indices=None,
|
1551 |
+
bint use_threads=True):
|
1552 |
+
"""
|
1553 |
+
Parameters
|
1554 |
+
----------
|
1555 |
+
batch_size : int64
|
1556 |
+
row_groups : list[int]
|
1557 |
+
column_indices : list[int], optional
|
1558 |
+
use_threads : bool, default True
|
1559 |
+
|
1560 |
+
Yields
|
1561 |
+
------
|
1562 |
+
next : RecordBatch
|
1563 |
+
"""
|
1564 |
+
cdef:
|
1565 |
+
vector[int] c_row_groups
|
1566 |
+
vector[int] c_column_indices
|
1567 |
+
shared_ptr[CRecordBatch] record_batch
|
1568 |
+
UniquePtrNoGIL[CRecordBatchReader] recordbatchreader
|
1569 |
+
|
1570 |
+
self.set_batch_size(batch_size)
|
1571 |
+
|
1572 |
+
if use_threads:
|
1573 |
+
self.set_use_threads(use_threads)
|
1574 |
+
|
1575 |
+
for row_group in row_groups:
|
1576 |
+
c_row_groups.push_back(row_group)
|
1577 |
+
|
1578 |
+
if column_indices is not None:
|
1579 |
+
for index in column_indices:
|
1580 |
+
c_column_indices.push_back(index)
|
1581 |
+
with nogil:
|
1582 |
+
check_status(
|
1583 |
+
self.reader.get().GetRecordBatchReader(
|
1584 |
+
c_row_groups, c_column_indices, &recordbatchreader
|
1585 |
+
)
|
1586 |
+
)
|
1587 |
+
else:
|
1588 |
+
with nogil:
|
1589 |
+
check_status(
|
1590 |
+
self.reader.get().GetRecordBatchReader(
|
1591 |
+
c_row_groups, &recordbatchreader
|
1592 |
+
)
|
1593 |
+
)
|
1594 |
+
|
1595 |
+
while True:
|
1596 |
+
with nogil:
|
1597 |
+
check_status(
|
1598 |
+
recordbatchreader.get().ReadNext(&record_batch)
|
1599 |
+
)
|
1600 |
+
if record_batch.get() == NULL:
|
1601 |
+
break
|
1602 |
+
|
1603 |
+
yield pyarrow_wrap_batch(record_batch)
|
1604 |
+
|
1605 |
+
def read_row_group(self, int i, column_indices=None,
|
1606 |
+
bint use_threads=True):
|
1607 |
+
"""
|
1608 |
+
Parameters
|
1609 |
+
----------
|
1610 |
+
i : int
|
1611 |
+
column_indices : list[int], optional
|
1612 |
+
use_threads : bool, default True
|
1613 |
+
|
1614 |
+
Returns
|
1615 |
+
-------
|
1616 |
+
table : pyarrow.Table
|
1617 |
+
"""
|
1618 |
+
return self.read_row_groups([i], column_indices, use_threads)
|
1619 |
+
|
1620 |
+
def read_row_groups(self, row_groups not None, column_indices=None,
|
1621 |
+
bint use_threads=True):
|
1622 |
+
"""
|
1623 |
+
Parameters
|
1624 |
+
----------
|
1625 |
+
row_groups : list[int]
|
1626 |
+
column_indices : list[int], optional
|
1627 |
+
use_threads : bool, default True
|
1628 |
+
|
1629 |
+
Returns
|
1630 |
+
-------
|
1631 |
+
table : pyarrow.Table
|
1632 |
+
"""
|
1633 |
+
cdef:
|
1634 |
+
shared_ptr[CTable] ctable
|
1635 |
+
vector[int] c_row_groups
|
1636 |
+
vector[int] c_column_indices
|
1637 |
+
|
1638 |
+
self.set_use_threads(use_threads)
|
1639 |
+
|
1640 |
+
for row_group in row_groups:
|
1641 |
+
c_row_groups.push_back(row_group)
|
1642 |
+
|
1643 |
+
if column_indices is not None:
|
1644 |
+
for index in column_indices:
|
1645 |
+
c_column_indices.push_back(index)
|
1646 |
+
|
1647 |
+
with nogil:
|
1648 |
+
check_status(self.reader.get()
|
1649 |
+
.ReadRowGroups(c_row_groups, c_column_indices,
|
1650 |
+
&ctable))
|
1651 |
+
else:
|
1652 |
+
# Read all columns
|
1653 |
+
with nogil:
|
1654 |
+
check_status(self.reader.get()
|
1655 |
+
.ReadRowGroups(c_row_groups, &ctable))
|
1656 |
+
return pyarrow_wrap_table(ctable)
|
1657 |
+
|
1658 |
+
def read_all(self, column_indices=None, bint use_threads=True):
|
1659 |
+
"""
|
1660 |
+
Parameters
|
1661 |
+
----------
|
1662 |
+
column_indices : list[int], optional
|
1663 |
+
use_threads : bool, default True
|
1664 |
+
|
1665 |
+
Returns
|
1666 |
+
-------
|
1667 |
+
table : pyarrow.Table
|
1668 |
+
"""
|
1669 |
+
cdef:
|
1670 |
+
shared_ptr[CTable] ctable
|
1671 |
+
vector[int] c_column_indices
|
1672 |
+
|
1673 |
+
self.set_use_threads(use_threads)
|
1674 |
+
|
1675 |
+
if column_indices is not None:
|
1676 |
+
for index in column_indices:
|
1677 |
+
c_column_indices.push_back(index)
|
1678 |
+
|
1679 |
+
with nogil:
|
1680 |
+
check_status(self.reader.get()
|
1681 |
+
.ReadTable(c_column_indices, &ctable))
|
1682 |
+
else:
|
1683 |
+
# Read all columns
|
1684 |
+
with nogil:
|
1685 |
+
check_status(self.reader.get()
|
1686 |
+
.ReadTable(&ctable))
|
1687 |
+
return pyarrow_wrap_table(ctable)
|
1688 |
+
|
1689 |
+
def scan_contents(self, column_indices=None, batch_size=65536):
|
1690 |
+
"""
|
1691 |
+
Parameters
|
1692 |
+
----------
|
1693 |
+
column_indices : list[int], optional
|
1694 |
+
batch_size : int32, default 65536
|
1695 |
+
|
1696 |
+
Returns
|
1697 |
+
-------
|
1698 |
+
num_rows : int64
|
1699 |
+
"""
|
1700 |
+
cdef:
|
1701 |
+
vector[int] c_column_indices
|
1702 |
+
int32_t c_batch_size
|
1703 |
+
int64_t c_num_rows
|
1704 |
+
|
1705 |
+
if column_indices is not None:
|
1706 |
+
for index in column_indices:
|
1707 |
+
c_column_indices.push_back(index)
|
1708 |
+
|
1709 |
+
c_batch_size = batch_size
|
1710 |
+
|
1711 |
+
with nogil:
|
1712 |
+
check_status(self.reader.get()
|
1713 |
+
.ScanContents(c_column_indices, c_batch_size,
|
1714 |
+
&c_num_rows))
|
1715 |
+
|
1716 |
+
return c_num_rows
|
1717 |
+
|
1718 |
+
def column_name_idx(self, column_name):
|
1719 |
+
"""
|
1720 |
+
Find the index of a column by its name.
|
1721 |
+
|
1722 |
+
Parameters
|
1723 |
+
----------
|
1724 |
+
column_name : str
|
1725 |
+
Name of the column; separation of nesting levels is done via ".".
|
1726 |
+
|
1727 |
+
Returns
|
1728 |
+
-------
|
1729 |
+
column_idx : int
|
1730 |
+
Integer index of the column in the schema.
|
1731 |
+
"""
|
1732 |
+
cdef:
|
1733 |
+
FileMetaData container = self.metadata
|
1734 |
+
const CFileMetaData* metadata = container._metadata
|
1735 |
+
int i = 0
|
1736 |
+
|
1737 |
+
if self._column_idx_map is None:
|
1738 |
+
self._column_idx_map = {}
|
1739 |
+
for i in range(0, metadata.num_columns()):
|
1740 |
+
col_bytes = tobytes(metadata.schema().Column(i)
|
1741 |
+
.path().get().ToDotString())
|
1742 |
+
self._column_idx_map[col_bytes] = i
|
1743 |
+
|
1744 |
+
return self._column_idx_map[tobytes(column_name)]
|
1745 |
+
|
1746 |
+
def read_column(self, int column_index):
|
1747 |
+
"""
|
1748 |
+
Read the column at the specified index.
|
1749 |
+
|
1750 |
+
Parameters
|
1751 |
+
----------
|
1752 |
+
column_index : int
|
1753 |
+
Index of the column.
|
1754 |
+
|
1755 |
+
Returns
|
1756 |
+
-------
|
1757 |
+
column : pyarrow.ChunkedArray
|
1758 |
+
"""
|
1759 |
+
cdef shared_ptr[CChunkedArray] out
|
1760 |
+
with nogil:
|
1761 |
+
check_status(self.reader.get()
|
1762 |
+
.ReadColumn(column_index, &out))
|
1763 |
+
return pyarrow_wrap_chunked_array(out)
|
1764 |
+
|
1765 |
+
def close(self):
|
1766 |
+
if not self.closed:
|
1767 |
+
with nogil:
|
1768 |
+
check_status(self.rd_handle.get().Close())
|
1769 |
+
|
1770 |
+
@property
|
1771 |
+
def closed(self):
|
1772 |
+
if self.rd_handle == NULL:
|
1773 |
+
return True
|
1774 |
+
with nogil:
|
1775 |
+
closed = self.rd_handle.get().closed()
|
1776 |
+
return closed
|
1777 |
+
|
1778 |
+
|
1779 |
+
cdef CSortingColumn _convert_sorting_column(SortingColumn sorting_column):
|
1780 |
+
cdef CSortingColumn c_sorting_column
|
1781 |
+
|
1782 |
+
c_sorting_column.column_idx = sorting_column.column_index
|
1783 |
+
c_sorting_column.descending = sorting_column.descending
|
1784 |
+
c_sorting_column.nulls_first = sorting_column.nulls_first
|
1785 |
+
|
1786 |
+
return c_sorting_column
|
1787 |
+
|
1788 |
+
|
1789 |
+
cdef vector[CSortingColumn] _convert_sorting_columns(sorting_columns) except *:
|
1790 |
+
if not (isinstance(sorting_columns, Sequence)
|
1791 |
+
and all(isinstance(col, SortingColumn) for col in sorting_columns)):
|
1792 |
+
raise ValueError(
|
1793 |
+
"'sorting_columns' must be a list of `SortingColumn`")
|
1794 |
+
|
1795 |
+
cdef vector[CSortingColumn] c_sorting_columns = [_convert_sorting_column(col)
|
1796 |
+
for col in sorting_columns]
|
1797 |
+
|
1798 |
+
return c_sorting_columns
|
1799 |
+
|
1800 |
+
|
1801 |
+
cdef shared_ptr[WriterProperties] _create_writer_properties(
|
1802 |
+
use_dictionary=None,
|
1803 |
+
compression=None,
|
1804 |
+
version=None,
|
1805 |
+
write_statistics=None,
|
1806 |
+
data_page_size=None,
|
1807 |
+
compression_level=None,
|
1808 |
+
use_byte_stream_split=False,
|
1809 |
+
column_encoding=None,
|
1810 |
+
data_page_version=None,
|
1811 |
+
FileEncryptionProperties encryption_properties=None,
|
1812 |
+
write_batch_size=None,
|
1813 |
+
dictionary_pagesize_limit=None,
|
1814 |
+
write_page_index=False,
|
1815 |
+
write_page_checksum=False,
|
1816 |
+
sorting_columns=None) except *:
|
1817 |
+
"""General writer properties"""
|
1818 |
+
cdef:
|
1819 |
+
shared_ptr[WriterProperties] properties
|
1820 |
+
WriterProperties.Builder props
|
1821 |
+
|
1822 |
+
# data_page_version
|
1823 |
+
|
1824 |
+
if data_page_version is not None:
|
1825 |
+
if data_page_version == "1.0":
|
1826 |
+
props.data_page_version(ParquetDataPageVersion_V1)
|
1827 |
+
elif data_page_version == "2.0":
|
1828 |
+
props.data_page_version(ParquetDataPageVersion_V2)
|
1829 |
+
else:
|
1830 |
+
raise ValueError("Unsupported Parquet data page version: {0}"
|
1831 |
+
.format(data_page_version))
|
1832 |
+
|
1833 |
+
# version
|
1834 |
+
|
1835 |
+
if version is not None:
|
1836 |
+
if version == "1.0":
|
1837 |
+
props.version(ParquetVersion_V1)
|
1838 |
+
elif version in ("2.0", "pseudo-2.0"):
|
1839 |
+
warnings.warn(
|
1840 |
+
"Parquet format '2.0' pseudo version is deprecated, use "
|
1841 |
+
"'2.4' or '2.6' for fine-grained feature selection",
|
1842 |
+
FutureWarning, stacklevel=2)
|
1843 |
+
props.version(ParquetVersion_V2_0)
|
1844 |
+
elif version == "2.4":
|
1845 |
+
props.version(ParquetVersion_V2_4)
|
1846 |
+
elif version == "2.6":
|
1847 |
+
props.version(ParquetVersion_V2_6)
|
1848 |
+
else:
|
1849 |
+
raise ValueError("Unsupported Parquet format version: {0}"
|
1850 |
+
.format(version))
|
1851 |
+
|
1852 |
+
# compression
|
1853 |
+
|
1854 |
+
if isinstance(compression, basestring):
|
1855 |
+
check_compression_name(compression)
|
1856 |
+
props.compression(compression_from_name(compression))
|
1857 |
+
elif compression is not None:
|
1858 |
+
for column, codec in compression.iteritems():
|
1859 |
+
check_compression_name(codec)
|
1860 |
+
props.compression(tobytes(column), compression_from_name(codec))
|
1861 |
+
|
1862 |
+
if isinstance(compression_level, int):
|
1863 |
+
props.compression_level(compression_level)
|
1864 |
+
elif compression_level is not None:
|
1865 |
+
for column, level in compression_level.iteritems():
|
1866 |
+
props.compression_level(tobytes(column), level)
|
1867 |
+
|
1868 |
+
# use_dictionary
|
1869 |
+
|
1870 |
+
if isinstance(use_dictionary, bool):
|
1871 |
+
if use_dictionary:
|
1872 |
+
props.enable_dictionary()
|
1873 |
+
if column_encoding is not None:
|
1874 |
+
raise ValueError(
|
1875 |
+
"To use 'column_encoding' set 'use_dictionary' to False")
|
1876 |
+
else:
|
1877 |
+
props.disable_dictionary()
|
1878 |
+
elif use_dictionary is not None:
|
1879 |
+
# Deactivate dictionary encoding by default
|
1880 |
+
props.disable_dictionary()
|
1881 |
+
for column in use_dictionary:
|
1882 |
+
props.enable_dictionary(tobytes(column))
|
1883 |
+
if (column_encoding is not None and
|
1884 |
+
column_encoding.get(column) is not None):
|
1885 |
+
raise ValueError(
|
1886 |
+
"To use 'column_encoding' set 'use_dictionary' to False")
|
1887 |
+
|
1888 |
+
# write_statistics
|
1889 |
+
|
1890 |
+
if isinstance(write_statistics, bool):
|
1891 |
+
if write_statistics:
|
1892 |
+
props.enable_statistics()
|
1893 |
+
else:
|
1894 |
+
props.disable_statistics()
|
1895 |
+
elif write_statistics is not None:
|
1896 |
+
# Deactivate statistics by default and enable for specified columns
|
1897 |
+
props.disable_statistics()
|
1898 |
+
for column in write_statistics:
|
1899 |
+
props.enable_statistics(tobytes(column))
|
1900 |
+
|
1901 |
+
# sorting_columns
|
1902 |
+
|
1903 |
+
if sorting_columns is not None:
|
1904 |
+
props.set_sorting_columns(_convert_sorting_columns(sorting_columns))
|
1905 |
+
|
1906 |
+
# use_byte_stream_split
|
1907 |
+
|
1908 |
+
if isinstance(use_byte_stream_split, bool):
|
1909 |
+
if use_byte_stream_split:
|
1910 |
+
if column_encoding is not None:
|
1911 |
+
raise ValueError(
|
1912 |
+
"'use_byte_stream_split' cannot be passed"
|
1913 |
+
"together with 'column_encoding'")
|
1914 |
+
else:
|
1915 |
+
props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT)
|
1916 |
+
elif use_byte_stream_split is not None:
|
1917 |
+
for column in use_byte_stream_split:
|
1918 |
+
if column_encoding is None:
|
1919 |
+
column_encoding = {column: 'BYTE_STREAM_SPLIT'}
|
1920 |
+
elif column_encoding.get(column, None) is None:
|
1921 |
+
column_encoding[column] = 'BYTE_STREAM_SPLIT'
|
1922 |
+
else:
|
1923 |
+
raise ValueError(
|
1924 |
+
"'use_byte_stream_split' cannot be passed"
|
1925 |
+
"together with 'column_encoding'")
|
1926 |
+
|
1927 |
+
# column_encoding
|
1928 |
+
# encoding map - encode individual columns
|
1929 |
+
|
1930 |
+
if column_encoding is not None:
|
1931 |
+
if isinstance(column_encoding, dict):
|
1932 |
+
for column, _encoding in column_encoding.items():
|
1933 |
+
props.encoding(tobytes(column),
|
1934 |
+
encoding_enum_from_name(_encoding))
|
1935 |
+
elif isinstance(column_encoding, str):
|
1936 |
+
props.encoding(encoding_enum_from_name(column_encoding))
|
1937 |
+
else:
|
1938 |
+
raise TypeError(
|
1939 |
+
"'column_encoding' should be a dictionary or a string")
|
1940 |
+
|
1941 |
+
if data_page_size is not None:
|
1942 |
+
props.data_pagesize(data_page_size)
|
1943 |
+
|
1944 |
+
if write_batch_size is not None:
|
1945 |
+
props.write_batch_size(write_batch_size)
|
1946 |
+
|
1947 |
+
if dictionary_pagesize_limit is not None:
|
1948 |
+
props.dictionary_pagesize_limit(dictionary_pagesize_limit)
|
1949 |
+
|
1950 |
+
# encryption
|
1951 |
+
|
1952 |
+
if encryption_properties is not None:
|
1953 |
+
props.encryption(
|
1954 |
+
(<FileEncryptionProperties>encryption_properties).unwrap())
|
1955 |
+
|
1956 |
+
# For backwards compatibility reasons we cap the maximum row group size
|
1957 |
+
# at 64Mi rows. This could be changed in the future, though it would be
|
1958 |
+
# a breaking change.
|
1959 |
+
#
|
1960 |
+
# The user can always specify a smaller row group size (and the default
|
1961 |
+
# is smaller) when calling write_table. If the call to write_table uses
|
1962 |
+
# a size larger than this then it will be latched to this value.
|
1963 |
+
props.max_row_group_length(_MAX_ROW_GROUP_SIZE)
|
1964 |
+
|
1965 |
+
# checksum
|
1966 |
+
|
1967 |
+
if write_page_checksum:
|
1968 |
+
props.enable_page_checksum()
|
1969 |
+
else:
|
1970 |
+
props.disable_page_checksum()
|
1971 |
+
|
1972 |
+
# page index
|
1973 |
+
|
1974 |
+
if write_page_index:
|
1975 |
+
props.enable_write_page_index()
|
1976 |
+
else:
|
1977 |
+
props.disable_write_page_index()
|
1978 |
+
|
1979 |
+
properties = props.build()
|
1980 |
+
|
1981 |
+
return properties
|
1982 |
+
|
1983 |
+
|
1984 |
+
cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties(
|
1985 |
+
use_deprecated_int96_timestamps=False,
|
1986 |
+
coerce_timestamps=None,
|
1987 |
+
allow_truncated_timestamps=False,
|
1988 |
+
writer_engine_version=None,
|
1989 |
+
use_compliant_nested_type=True,
|
1990 |
+
store_schema=True) except *:
|
1991 |
+
"""Arrow writer properties"""
|
1992 |
+
cdef:
|
1993 |
+
shared_ptr[ArrowWriterProperties] arrow_properties
|
1994 |
+
ArrowWriterProperties.Builder arrow_props
|
1995 |
+
|
1996 |
+
# Store the original Arrow schema so things like dictionary types can
|
1997 |
+
# be automatically reconstructed
|
1998 |
+
if store_schema:
|
1999 |
+
arrow_props.store_schema()
|
2000 |
+
|
2001 |
+
# int96 support
|
2002 |
+
|
2003 |
+
if use_deprecated_int96_timestamps:
|
2004 |
+
arrow_props.enable_deprecated_int96_timestamps()
|
2005 |
+
else:
|
2006 |
+
arrow_props.disable_deprecated_int96_timestamps()
|
2007 |
+
|
2008 |
+
# coerce_timestamps
|
2009 |
+
|
2010 |
+
if coerce_timestamps == 'ms':
|
2011 |
+
arrow_props.coerce_timestamps(TimeUnit_MILLI)
|
2012 |
+
elif coerce_timestamps == 'us':
|
2013 |
+
arrow_props.coerce_timestamps(TimeUnit_MICRO)
|
2014 |
+
elif coerce_timestamps is not None:
|
2015 |
+
raise ValueError('Invalid value for coerce_timestamps: {0}'
|
2016 |
+
.format(coerce_timestamps))
|
2017 |
+
|
2018 |
+
# allow_truncated_timestamps
|
2019 |
+
|
2020 |
+
if allow_truncated_timestamps:
|
2021 |
+
arrow_props.allow_truncated_timestamps()
|
2022 |
+
else:
|
2023 |
+
arrow_props.disallow_truncated_timestamps()
|
2024 |
+
|
2025 |
+
# use_compliant_nested_type
|
2026 |
+
|
2027 |
+
if use_compliant_nested_type:
|
2028 |
+
arrow_props.enable_compliant_nested_types()
|
2029 |
+
else:
|
2030 |
+
arrow_props.disable_compliant_nested_types()
|
2031 |
+
|
2032 |
+
# writer_engine_version
|
2033 |
+
|
2034 |
+
if writer_engine_version == "V1":
|
2035 |
+
warnings.warn("V1 parquet writer engine is a no-op. Use V2.")
|
2036 |
+
arrow_props.set_engine_version(ArrowWriterEngineVersion.V1)
|
2037 |
+
elif writer_engine_version != "V2":
|
2038 |
+
raise ValueError("Unsupported Writer Engine Version: {0}"
|
2039 |
+
.format(writer_engine_version))
|
2040 |
+
|
2041 |
+
arrow_properties = arrow_props.build()
|
2042 |
+
|
2043 |
+
return arrow_properties
|
2044 |
+
|
2045 |
+
cdef _name_to_index_map(Schema arrow_schema):
|
2046 |
+
cdef:
|
2047 |
+
shared_ptr[CSchema] sp_arrow_schema
|
2048 |
+
shared_ptr[SchemaDescriptor] sp_parquet_schema
|
2049 |
+
shared_ptr[WriterProperties] props = _create_writer_properties()
|
2050 |
+
shared_ptr[ArrowWriterProperties] arrow_props = _create_arrow_writer_properties(
|
2051 |
+
use_deprecated_int96_timestamps=False,
|
2052 |
+
coerce_timestamps=None,
|
2053 |
+
allow_truncated_timestamps=False,
|
2054 |
+
writer_engine_version="V2"
|
2055 |
+
)
|
2056 |
+
|
2057 |
+
sp_arrow_schema = pyarrow_unwrap_schema(arrow_schema)
|
2058 |
+
|
2059 |
+
with nogil:
|
2060 |
+
check_status(ToParquetSchema(
|
2061 |
+
sp_arrow_schema.get(), deref(props.get()), deref(arrow_props.get()), &sp_parquet_schema))
|
2062 |
+
|
2063 |
+
out = dict()
|
2064 |
+
|
2065 |
+
cdef SchemaDescriptor* parquet_schema = sp_parquet_schema.get()
|
2066 |
+
|
2067 |
+
for i in range(parquet_schema.num_columns()):
|
2068 |
+
name = frombytes(parquet_schema.Column(i).path().get().ToDotString())
|
2069 |
+
out[name] = i
|
2070 |
+
|
2071 |
+
return out
|
2072 |
+
|
2073 |
+
|
2074 |
+
cdef class ParquetWriter(_Weakrefable):
|
2075 |
+
cdef:
|
2076 |
+
unique_ptr[FileWriter] writer
|
2077 |
+
shared_ptr[COutputStream] sink
|
2078 |
+
bint own_sink
|
2079 |
+
|
2080 |
+
cdef readonly:
|
2081 |
+
object use_dictionary
|
2082 |
+
object use_deprecated_int96_timestamps
|
2083 |
+
object use_byte_stream_split
|
2084 |
+
object column_encoding
|
2085 |
+
object coerce_timestamps
|
2086 |
+
object allow_truncated_timestamps
|
2087 |
+
object compression
|
2088 |
+
object compression_level
|
2089 |
+
object data_page_version
|
2090 |
+
object use_compliant_nested_type
|
2091 |
+
object version
|
2092 |
+
object write_statistics
|
2093 |
+
object writer_engine_version
|
2094 |
+
int row_group_size
|
2095 |
+
int64_t data_page_size
|
2096 |
+
FileEncryptionProperties encryption_properties
|
2097 |
+
int64_t write_batch_size
|
2098 |
+
int64_t dictionary_pagesize_limit
|
2099 |
+
object store_schema
|
2100 |
+
|
2101 |
+
def __cinit__(self, where, Schema schema not None, use_dictionary=None,
|
2102 |
+
compression=None, version=None,
|
2103 |
+
write_statistics=None,
|
2104 |
+
MemoryPool memory_pool=None,
|
2105 |
+
use_deprecated_int96_timestamps=False,
|
2106 |
+
coerce_timestamps=None,
|
2107 |
+
data_page_size=None,
|
2108 |
+
allow_truncated_timestamps=False,
|
2109 |
+
compression_level=None,
|
2110 |
+
use_byte_stream_split=False,
|
2111 |
+
column_encoding=None,
|
2112 |
+
writer_engine_version=None,
|
2113 |
+
data_page_version=None,
|
2114 |
+
use_compliant_nested_type=True,
|
2115 |
+
encryption_properties=None,
|
2116 |
+
write_batch_size=None,
|
2117 |
+
dictionary_pagesize_limit=None,
|
2118 |
+
store_schema=True,
|
2119 |
+
write_page_index=False,
|
2120 |
+
write_page_checksum=False,
|
2121 |
+
sorting_columns=None):
|
2122 |
+
cdef:
|
2123 |
+
shared_ptr[WriterProperties] properties
|
2124 |
+
shared_ptr[ArrowWriterProperties] arrow_properties
|
2125 |
+
c_string c_where
|
2126 |
+
CMemoryPool* pool
|
2127 |
+
|
2128 |
+
try:
|
2129 |
+
where = _stringify_path(where)
|
2130 |
+
except TypeError:
|
2131 |
+
get_writer(where, &self.sink)
|
2132 |
+
self.own_sink = False
|
2133 |
+
else:
|
2134 |
+
c_where = tobytes(where)
|
2135 |
+
with nogil:
|
2136 |
+
self.sink = GetResultValue(FileOutputStream.Open(c_where))
|
2137 |
+
self.own_sink = True
|
2138 |
+
|
2139 |
+
properties = _create_writer_properties(
|
2140 |
+
use_dictionary=use_dictionary,
|
2141 |
+
compression=compression,
|
2142 |
+
version=version,
|
2143 |
+
write_statistics=write_statistics,
|
2144 |
+
data_page_size=data_page_size,
|
2145 |
+
compression_level=compression_level,
|
2146 |
+
use_byte_stream_split=use_byte_stream_split,
|
2147 |
+
column_encoding=column_encoding,
|
2148 |
+
data_page_version=data_page_version,
|
2149 |
+
encryption_properties=encryption_properties,
|
2150 |
+
write_batch_size=write_batch_size,
|
2151 |
+
dictionary_pagesize_limit=dictionary_pagesize_limit,
|
2152 |
+
write_page_index=write_page_index,
|
2153 |
+
write_page_checksum=write_page_checksum,
|
2154 |
+
sorting_columns=sorting_columns,
|
2155 |
+
)
|
2156 |
+
arrow_properties = _create_arrow_writer_properties(
|
2157 |
+
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
|
2158 |
+
coerce_timestamps=coerce_timestamps,
|
2159 |
+
allow_truncated_timestamps=allow_truncated_timestamps,
|
2160 |
+
writer_engine_version=writer_engine_version,
|
2161 |
+
use_compliant_nested_type=use_compliant_nested_type,
|
2162 |
+
store_schema=store_schema,
|
2163 |
+
)
|
2164 |
+
|
2165 |
+
pool = maybe_unbox_memory_pool(memory_pool)
|
2166 |
+
with nogil:
|
2167 |
+
self.writer = move(GetResultValue(
|
2168 |
+
FileWriter.Open(deref(schema.schema), pool,
|
2169 |
+
self.sink, properties, arrow_properties)))
|
2170 |
+
|
2171 |
+
def close(self):
|
2172 |
+
with nogil:
|
2173 |
+
check_status(self.writer.get().Close())
|
2174 |
+
if self.own_sink:
|
2175 |
+
check_status(self.sink.get().Close())
|
2176 |
+
|
2177 |
+
def write_table(self, Table table, row_group_size=None):
|
2178 |
+
cdef:
|
2179 |
+
CTable* ctable = table.table
|
2180 |
+
int64_t c_row_group_size
|
2181 |
+
|
2182 |
+
if row_group_size is None or row_group_size == -1:
|
2183 |
+
c_row_group_size = min(ctable.num_rows(), _DEFAULT_ROW_GROUP_SIZE)
|
2184 |
+
elif row_group_size == 0:
|
2185 |
+
raise ValueError('Row group size cannot be 0')
|
2186 |
+
else:
|
2187 |
+
c_row_group_size = row_group_size
|
2188 |
+
|
2189 |
+
with nogil:
|
2190 |
+
check_status(self.writer.get()
|
2191 |
+
.WriteTable(deref(ctable), c_row_group_size))
|
2192 |
+
|
2193 |
+
@property
|
2194 |
+
def metadata(self):
|
2195 |
+
cdef:
|
2196 |
+
shared_ptr[CFileMetaData] metadata
|
2197 |
+
FileMetaData result
|
2198 |
+
with nogil:
|
2199 |
+
metadata = self.writer.get().metadata()
|
2200 |
+
if metadata:
|
2201 |
+
result = FileMetaData()
|
2202 |
+
result.init(metadata)
|
2203 |
+
return result
|
2204 |
+
raise RuntimeError(
|
2205 |
+
'file metadata is only available after writer close')
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.includes.common cimport *
|
22 |
+
from pyarrow.includes.libparquet_encryption cimport *
|
23 |
+
from pyarrow._parquet cimport (ParquetCipher,
|
24 |
+
CFileEncryptionProperties,
|
25 |
+
CFileDecryptionProperties,
|
26 |
+
FileEncryptionProperties,
|
27 |
+
FileDecryptionProperties,
|
28 |
+
ParquetCipher_AES_GCM_V1,
|
29 |
+
ParquetCipher_AES_GCM_CTR_V1)
|
30 |
+
from pyarrow.lib cimport _Weakrefable
|
31 |
+
|
32 |
+
cdef class CryptoFactory(_Weakrefable):
|
33 |
+
cdef shared_ptr[CPyCryptoFactory] factory
|
34 |
+
cdef init(self, callable_client_factory)
|
35 |
+
cdef inline shared_ptr[CPyCryptoFactory] unwrap(self)
|
36 |
+
|
37 |
+
cdef class EncryptionConfiguration(_Weakrefable):
|
38 |
+
cdef shared_ptr[CEncryptionConfiguration] configuration
|
39 |
+
cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil
|
40 |
+
|
41 |
+
cdef class DecryptionConfiguration(_Weakrefable):
|
42 |
+
cdef shared_ptr[CDecryptionConfiguration] configuration
|
43 |
+
cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil
|
44 |
+
|
45 |
+
cdef class KmsConnectionConfig(_Weakrefable):
|
46 |
+
cdef shared_ptr[CKmsConnectionConfig] configuration
|
47 |
+
cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil
|
48 |
+
|
49 |
+
@staticmethod
|
50 |
+
cdef wrap(const CKmsConnectionConfig& config)
|
51 |
+
|
52 |
+
|
53 |
+
cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *
|
54 |
+
cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *
|
55 |
+
cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *
|
56 |
+
cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
|
21 |
+
from datetime import timedelta
|
22 |
+
|
23 |
+
from cython.operator cimport dereference as deref
|
24 |
+
from libcpp.memory cimport shared_ptr
|
25 |
+
from pyarrow.includes.common cimport *
|
26 |
+
from pyarrow.includes.libarrow cimport *
|
27 |
+
from pyarrow.lib cimport _Weakrefable
|
28 |
+
from pyarrow.lib import tobytes, frombytes
|
29 |
+
|
30 |
+
|
31 |
+
cdef ParquetCipher cipher_from_name(name):
|
32 |
+
name = name.upper()
|
33 |
+
if name == 'AES_GCM_V1':
|
34 |
+
return ParquetCipher_AES_GCM_V1
|
35 |
+
elif name == 'AES_GCM_CTR_V1':
|
36 |
+
return ParquetCipher_AES_GCM_CTR_V1
|
37 |
+
else:
|
38 |
+
raise ValueError(f'Invalid cipher name: {name!r}')
|
39 |
+
|
40 |
+
|
41 |
+
cdef cipher_to_name(ParquetCipher cipher):
|
42 |
+
if ParquetCipher_AES_GCM_V1 == cipher:
|
43 |
+
return 'AES_GCM_V1'
|
44 |
+
elif ParquetCipher_AES_GCM_CTR_V1 == cipher:
|
45 |
+
return 'AES_GCM_CTR_V1'
|
46 |
+
else:
|
47 |
+
raise ValueError('Invalid cipher value: {0}'.format(cipher))
|
48 |
+
|
49 |
+
cdef class EncryptionConfiguration(_Weakrefable):
|
50 |
+
"""Configuration of the encryption, such as which columns to encrypt"""
|
51 |
+
# Avoid mistakingly creating attributes
|
52 |
+
__slots__ = ()
|
53 |
+
|
54 |
+
def __init__(self, footer_key, *, column_keys=None,
|
55 |
+
encryption_algorithm=None,
|
56 |
+
plaintext_footer=None, double_wrapping=None,
|
57 |
+
cache_lifetime=None, internal_key_material=None,
|
58 |
+
data_key_length_bits=None):
|
59 |
+
self.configuration.reset(
|
60 |
+
new CEncryptionConfiguration(tobytes(footer_key)))
|
61 |
+
if column_keys is not None:
|
62 |
+
self.column_keys = column_keys
|
63 |
+
if encryption_algorithm is not None:
|
64 |
+
self.encryption_algorithm = encryption_algorithm
|
65 |
+
if plaintext_footer is not None:
|
66 |
+
self.plaintext_footer = plaintext_footer
|
67 |
+
if double_wrapping is not None:
|
68 |
+
self.double_wrapping = double_wrapping
|
69 |
+
if cache_lifetime is not None:
|
70 |
+
self.cache_lifetime = cache_lifetime
|
71 |
+
if internal_key_material is not None:
|
72 |
+
self.internal_key_material = internal_key_material
|
73 |
+
if data_key_length_bits is not None:
|
74 |
+
self.data_key_length_bits = data_key_length_bits
|
75 |
+
|
76 |
+
@property
|
77 |
+
def footer_key(self):
|
78 |
+
"""ID of the master key for footer encryption/signing"""
|
79 |
+
return frombytes(self.configuration.get().footer_key)
|
80 |
+
|
81 |
+
@property
|
82 |
+
def column_keys(self):
|
83 |
+
"""
|
84 |
+
List of columns to encrypt, with master key IDs.
|
85 |
+
"""
|
86 |
+
column_keys_str = frombytes(self.configuration.get().column_keys)
|
87 |
+
# Convert from "masterKeyID:colName,colName;masterKeyID:colName..."
|
88 |
+
# (see HIVE-21848) to dictionary of master key ID to column name lists
|
89 |
+
column_keys_to_key_list_str = dict(subString.replace(" ", "").split(
|
90 |
+
":") for subString in column_keys_str.split(";"))
|
91 |
+
column_keys_dict = {k: v.split(
|
92 |
+
",") for k, v in column_keys_to_key_list_str.items()}
|
93 |
+
return column_keys_dict
|
94 |
+
|
95 |
+
@column_keys.setter
|
96 |
+
def column_keys(self, dict value):
|
97 |
+
if value is not None:
|
98 |
+
# convert a dictionary such as
|
99 |
+
# '{"key1": ["col1 ", "col2"], "key2": ["col3 ", "col4"]}''
|
100 |
+
# to the string defined by the spec
|
101 |
+
# 'key1: col1 , col2; key2: col3 , col4'
|
102 |
+
column_keys = "; ".join(
|
103 |
+
["{}: {}".format(k, ", ".join(v)) for k, v in value.items()])
|
104 |
+
self.configuration.get().column_keys = tobytes(column_keys)
|
105 |
+
|
106 |
+
@property
|
107 |
+
def encryption_algorithm(self):
|
108 |
+
"""Parquet encryption algorithm.
|
109 |
+
Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1"."""
|
110 |
+
return cipher_to_name(self.configuration.get().encryption_algorithm)
|
111 |
+
|
112 |
+
@encryption_algorithm.setter
|
113 |
+
def encryption_algorithm(self, value):
|
114 |
+
cipher = cipher_from_name(value)
|
115 |
+
self.configuration.get().encryption_algorithm = cipher
|
116 |
+
|
117 |
+
@property
|
118 |
+
def plaintext_footer(self):
|
119 |
+
"""Write files with plaintext footer."""
|
120 |
+
return self.configuration.get().plaintext_footer
|
121 |
+
|
122 |
+
@plaintext_footer.setter
|
123 |
+
def plaintext_footer(self, value):
|
124 |
+
self.configuration.get().plaintext_footer = value
|
125 |
+
|
126 |
+
@property
|
127 |
+
def double_wrapping(self):
|
128 |
+
"""Use double wrapping - where data encryption keys (DEKs) are
|
129 |
+
encrypted with key encryption keys (KEKs), which in turn are
|
130 |
+
encrypted with master keys.
|
131 |
+
If set to false, use single wrapping - where DEKs are
|
132 |
+
encrypted directly with master keys."""
|
133 |
+
return self.configuration.get().double_wrapping
|
134 |
+
|
135 |
+
@double_wrapping.setter
|
136 |
+
def double_wrapping(self, value):
|
137 |
+
self.configuration.get().double_wrapping = value
|
138 |
+
|
139 |
+
@property
|
140 |
+
def cache_lifetime(self):
|
141 |
+
"""Lifetime of cached entities (key encryption keys,
|
142 |
+
local wrapping keys, KMS client objects)."""
|
143 |
+
return timedelta(
|
144 |
+
seconds=self.configuration.get().cache_lifetime_seconds)
|
145 |
+
|
146 |
+
@cache_lifetime.setter
|
147 |
+
def cache_lifetime(self, value):
|
148 |
+
if not isinstance(value, timedelta):
|
149 |
+
raise TypeError("cache_lifetime should be a timedelta")
|
150 |
+
self.configuration.get().cache_lifetime_seconds = value.total_seconds()
|
151 |
+
|
152 |
+
@property
|
153 |
+
def internal_key_material(self):
|
154 |
+
"""Store key material inside Parquet file footers; this mode doesn’t
|
155 |
+
produce additional files. If set to false, key material is stored in
|
156 |
+
separate files in the same folder, which enables key rotation for
|
157 |
+
immutable Parquet files."""
|
158 |
+
return self.configuration.get().internal_key_material
|
159 |
+
|
160 |
+
@internal_key_material.setter
|
161 |
+
def internal_key_material(self, value):
|
162 |
+
self.configuration.get().internal_key_material = value
|
163 |
+
|
164 |
+
@property
|
165 |
+
def data_key_length_bits(self):
|
166 |
+
"""Length of data encryption keys (DEKs), randomly generated by parquet key
|
167 |
+
management tools. Can be 128, 192 or 256 bits."""
|
168 |
+
return self.configuration.get().data_key_length_bits
|
169 |
+
|
170 |
+
@data_key_length_bits.setter
|
171 |
+
def data_key_length_bits(self, value):
|
172 |
+
self.configuration.get().data_key_length_bits = value
|
173 |
+
|
174 |
+
cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil:
|
175 |
+
return self.configuration
|
176 |
+
|
177 |
+
|
178 |
+
cdef class DecryptionConfiguration(_Weakrefable):
|
179 |
+
"""Configuration of the decryption, such as cache timeout."""
|
180 |
+
# Avoid mistakingly creating attributes
|
181 |
+
__slots__ = ()
|
182 |
+
|
183 |
+
def __init__(self, *, cache_lifetime=None):
|
184 |
+
self.configuration.reset(new CDecryptionConfiguration())
|
185 |
+
|
186 |
+
@property
|
187 |
+
def cache_lifetime(self):
|
188 |
+
"""Lifetime of cached entities (key encryption keys,
|
189 |
+
local wrapping keys, KMS client objects)."""
|
190 |
+
return timedelta(
|
191 |
+
seconds=self.configuration.get().cache_lifetime_seconds)
|
192 |
+
|
193 |
+
@cache_lifetime.setter
|
194 |
+
def cache_lifetime(self, value):
|
195 |
+
self.configuration.get().cache_lifetime_seconds = value.total_seconds()
|
196 |
+
|
197 |
+
cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil:
|
198 |
+
return self.configuration
|
199 |
+
|
200 |
+
|
201 |
+
cdef class KmsConnectionConfig(_Weakrefable):
|
202 |
+
"""Configuration of the connection to the Key Management Service (KMS)"""
|
203 |
+
# Avoid mistakingly creating attributes
|
204 |
+
__slots__ = ()
|
205 |
+
|
206 |
+
def __init__(self, *, kms_instance_id=None, kms_instance_url=None,
|
207 |
+
key_access_token=None, custom_kms_conf=None):
|
208 |
+
self.configuration.reset(new CKmsConnectionConfig())
|
209 |
+
if kms_instance_id is not None:
|
210 |
+
self.kms_instance_id = kms_instance_id
|
211 |
+
if kms_instance_url is not None:
|
212 |
+
self.kms_instance_url = kms_instance_url
|
213 |
+
if key_access_token is None:
|
214 |
+
self.key_access_token = b'DEFAULT'
|
215 |
+
else:
|
216 |
+
self.key_access_token = key_access_token
|
217 |
+
if custom_kms_conf is not None:
|
218 |
+
self.custom_kms_conf = custom_kms_conf
|
219 |
+
|
220 |
+
@property
|
221 |
+
def kms_instance_id(self):
|
222 |
+
"""ID of the KMS instance that will be used for encryption
|
223 |
+
(if multiple KMS instances are available)."""
|
224 |
+
return frombytes(self.configuration.get().kms_instance_id)
|
225 |
+
|
226 |
+
@kms_instance_id.setter
|
227 |
+
def kms_instance_id(self, value):
|
228 |
+
self.configuration.get().kms_instance_id = tobytes(value)
|
229 |
+
|
230 |
+
@property
|
231 |
+
def kms_instance_url(self):
|
232 |
+
"""URL of the KMS instance."""
|
233 |
+
return frombytes(self.configuration.get().kms_instance_url)
|
234 |
+
|
235 |
+
@kms_instance_url.setter
|
236 |
+
def kms_instance_url(self, value):
|
237 |
+
self.configuration.get().kms_instance_url = tobytes(value)
|
238 |
+
|
239 |
+
@property
|
240 |
+
def key_access_token(self):
|
241 |
+
"""Authorization token that will be passed to KMS."""
|
242 |
+
return frombytes(self.configuration.get()
|
243 |
+
.refreshable_key_access_token.get().value())
|
244 |
+
|
245 |
+
@key_access_token.setter
|
246 |
+
def key_access_token(self, value):
|
247 |
+
self.refresh_key_access_token(value)
|
248 |
+
|
249 |
+
@property
|
250 |
+
def custom_kms_conf(self):
|
251 |
+
"""A dictionary with KMS-type-specific configuration"""
|
252 |
+
custom_kms_conf = {
|
253 |
+
frombytes(k): frombytes(v)
|
254 |
+
for k, v in self.configuration.get().custom_kms_conf
|
255 |
+
}
|
256 |
+
return custom_kms_conf
|
257 |
+
|
258 |
+
@custom_kms_conf.setter
|
259 |
+
def custom_kms_conf(self, dict value):
|
260 |
+
if value is not None:
|
261 |
+
for k, v in value.items():
|
262 |
+
if isinstance(k, str) and isinstance(v, str):
|
263 |
+
self.configuration.get().custom_kms_conf[tobytes(k)] = \
|
264 |
+
tobytes(v)
|
265 |
+
else:
|
266 |
+
raise TypeError("Expected custom_kms_conf to be " +
|
267 |
+
"a dictionary of strings")
|
268 |
+
|
269 |
+
def refresh_key_access_token(self, value):
|
270 |
+
cdef:
|
271 |
+
shared_ptr[CKeyAccessToken] c_key_access_token = \
|
272 |
+
self.configuration.get().refreshable_key_access_token
|
273 |
+
|
274 |
+
c_key_access_token.get().Refresh(tobytes(value))
|
275 |
+
|
276 |
+
cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil:
|
277 |
+
return self.configuration
|
278 |
+
|
279 |
+
@staticmethod
|
280 |
+
cdef wrap(const CKmsConnectionConfig& config):
|
281 |
+
result = KmsConnectionConfig()
|
282 |
+
result.configuration = make_shared[CKmsConnectionConfig](move(config))
|
283 |
+
return result
|
284 |
+
|
285 |
+
|
286 |
+
# Callback definitions for CPyKmsClientVtable
|
287 |
+
cdef void _cb_wrap_key(
|
288 |
+
handler, const c_string& key_bytes,
|
289 |
+
const c_string& master_key_identifier, c_string* out) except *:
|
290 |
+
mkid_str = frombytes(master_key_identifier)
|
291 |
+
wrapped_key = handler.wrap_key(key_bytes, mkid_str)
|
292 |
+
out[0] = tobytes(wrapped_key)
|
293 |
+
|
294 |
+
|
295 |
+
cdef void _cb_unwrap_key(
|
296 |
+
handler, const c_string& wrapped_key,
|
297 |
+
const c_string& master_key_identifier, c_string* out) except *:
|
298 |
+
mkid_str = frombytes(master_key_identifier)
|
299 |
+
wk_str = frombytes(wrapped_key)
|
300 |
+
key = handler.unwrap_key(wk_str, mkid_str)
|
301 |
+
out[0] = tobytes(key)
|
302 |
+
|
303 |
+
|
304 |
+
cdef class KmsClient(_Weakrefable):
|
305 |
+
"""The abstract base class for KmsClient implementations."""
|
306 |
+
cdef:
|
307 |
+
shared_ptr[CKmsClient] client
|
308 |
+
|
309 |
+
def __init__(self):
|
310 |
+
self.init()
|
311 |
+
|
312 |
+
cdef init(self):
|
313 |
+
cdef:
|
314 |
+
CPyKmsClientVtable vtable = CPyKmsClientVtable()
|
315 |
+
|
316 |
+
vtable.wrap_key = _cb_wrap_key
|
317 |
+
vtable.unwrap_key = _cb_unwrap_key
|
318 |
+
|
319 |
+
self.client.reset(new CPyKmsClient(self, vtable))
|
320 |
+
|
321 |
+
def wrap_key(self, key_bytes, master_key_identifier):
|
322 |
+
"""Wrap a key - encrypt it with the master key."""
|
323 |
+
raise NotImplementedError()
|
324 |
+
|
325 |
+
def unwrap_key(self, wrapped_key, master_key_identifier):
|
326 |
+
"""Unwrap a key - decrypt it with the master key."""
|
327 |
+
raise NotImplementedError()
|
328 |
+
|
329 |
+
cdef inline shared_ptr[CKmsClient] unwrap(self) nogil:
|
330 |
+
return self.client
|
331 |
+
|
332 |
+
|
333 |
+
# Callback definition for CPyKmsClientFactoryVtable
|
334 |
+
cdef void _cb_create_kms_client(
|
335 |
+
handler,
|
336 |
+
const CKmsConnectionConfig& kms_connection_config,
|
337 |
+
shared_ptr[CKmsClient]* out) except *:
|
338 |
+
connection_config = KmsConnectionConfig.wrap(kms_connection_config)
|
339 |
+
|
340 |
+
result = handler(connection_config)
|
341 |
+
if not isinstance(result, KmsClient):
|
342 |
+
raise TypeError(
|
343 |
+
"callable must return KmsClient instances, but got {}".format(
|
344 |
+
type(result)))
|
345 |
+
|
346 |
+
out[0] = (<KmsClient> result).unwrap()
|
347 |
+
|
348 |
+
|
349 |
+
cdef class CryptoFactory(_Weakrefable):
|
350 |
+
""" A factory that produces the low-level FileEncryptionProperties and
|
351 |
+
FileDecryptionProperties objects, from the high-level parameters."""
|
352 |
+
# Avoid mistakingly creating attributes
|
353 |
+
__slots__ = ()
|
354 |
+
|
355 |
+
def __init__(self, kms_client_factory):
|
356 |
+
"""Create CryptoFactory.
|
357 |
+
|
358 |
+
Parameters
|
359 |
+
----------
|
360 |
+
kms_client_factory : a callable that accepts KmsConnectionConfig
|
361 |
+
and returns a KmsClient
|
362 |
+
"""
|
363 |
+
self.factory.reset(new CPyCryptoFactory())
|
364 |
+
|
365 |
+
if callable(kms_client_factory):
|
366 |
+
self.init(kms_client_factory)
|
367 |
+
else:
|
368 |
+
raise TypeError("Parameter kms_client_factory must be a callable")
|
369 |
+
|
370 |
+
cdef init(self, callable_client_factory):
|
371 |
+
cdef:
|
372 |
+
CPyKmsClientFactoryVtable vtable
|
373 |
+
shared_ptr[CPyKmsClientFactory] kms_client_factory
|
374 |
+
|
375 |
+
vtable.create_kms_client = _cb_create_kms_client
|
376 |
+
kms_client_factory.reset(
|
377 |
+
new CPyKmsClientFactory(callable_client_factory, vtable))
|
378 |
+
# A KmsClientFactory object must be registered
|
379 |
+
# via this method before calling any of
|
380 |
+
# file_encryption_properties()/file_decryption_properties() methods.
|
381 |
+
self.factory.get().RegisterKmsClientFactory(
|
382 |
+
static_pointer_cast[CKmsClientFactory, CPyKmsClientFactory](
|
383 |
+
kms_client_factory))
|
384 |
+
|
385 |
+
def file_encryption_properties(self,
|
386 |
+
KmsConnectionConfig kms_connection_config,
|
387 |
+
EncryptionConfiguration encryption_config):
|
388 |
+
"""Create file encryption properties.
|
389 |
+
|
390 |
+
Parameters
|
391 |
+
----------
|
392 |
+
kms_connection_config : KmsConnectionConfig
|
393 |
+
Configuration of connection to KMS
|
394 |
+
|
395 |
+
encryption_config : EncryptionConfiguration
|
396 |
+
Configuration of the encryption, such as which columns to encrypt
|
397 |
+
|
398 |
+
Returns
|
399 |
+
-------
|
400 |
+
file_encryption_properties : FileEncryptionProperties
|
401 |
+
File encryption properties.
|
402 |
+
"""
|
403 |
+
cdef:
|
404 |
+
CResult[shared_ptr[CFileEncryptionProperties]] \
|
405 |
+
file_encryption_properties_result
|
406 |
+
with nogil:
|
407 |
+
file_encryption_properties_result = \
|
408 |
+
self.factory.get().SafeGetFileEncryptionProperties(
|
409 |
+
deref(kms_connection_config.unwrap().get()),
|
410 |
+
deref(encryption_config.unwrap().get()))
|
411 |
+
file_encryption_properties = GetResultValue(
|
412 |
+
file_encryption_properties_result)
|
413 |
+
return FileEncryptionProperties.wrap(file_encryption_properties)
|
414 |
+
|
415 |
+
def file_decryption_properties(
|
416 |
+
self,
|
417 |
+
KmsConnectionConfig kms_connection_config,
|
418 |
+
DecryptionConfiguration decryption_config=None):
|
419 |
+
"""Create file decryption properties.
|
420 |
+
|
421 |
+
Parameters
|
422 |
+
----------
|
423 |
+
kms_connection_config : KmsConnectionConfig
|
424 |
+
Configuration of connection to KMS
|
425 |
+
|
426 |
+
decryption_config : DecryptionConfiguration, default None
|
427 |
+
Configuration of the decryption, such as cache timeout.
|
428 |
+
Can be None.
|
429 |
+
|
430 |
+
Returns
|
431 |
+
-------
|
432 |
+
file_decryption_properties : FileDecryptionProperties
|
433 |
+
File decryption properties.
|
434 |
+
"""
|
435 |
+
cdef:
|
436 |
+
CDecryptionConfiguration c_decryption_config
|
437 |
+
CResult[shared_ptr[CFileDecryptionProperties]] \
|
438 |
+
c_file_decryption_properties
|
439 |
+
if decryption_config is None:
|
440 |
+
c_decryption_config = CDecryptionConfiguration()
|
441 |
+
else:
|
442 |
+
c_decryption_config = deref(decryption_config.unwrap().get())
|
443 |
+
with nogil:
|
444 |
+
c_file_decryption_properties = \
|
445 |
+
self.factory.get().SafeGetFileDecryptionProperties(
|
446 |
+
deref(kms_connection_config.unwrap().get()),
|
447 |
+
c_decryption_config)
|
448 |
+
file_decryption_properties = GetResultValue(
|
449 |
+
c_file_decryption_properties)
|
450 |
+
return FileDecryptionProperties.wrap(file_decryption_properties)
|
451 |
+
|
452 |
+
def remove_cache_entries_for_token(self, access_token):
|
453 |
+
self.factory.get().RemoveCacheEntriesForToken(tobytes(access_token))
|
454 |
+
|
455 |
+
def remove_cache_entries_for_all_tokens(self):
|
456 |
+
self.factory.get().RemoveCacheEntriesForAllTokens()
|
457 |
+
|
458 |
+
cdef inline shared_ptr[CPyCryptoFactory] unwrap(self):
|
459 |
+
return self.factory
|
460 |
+
|
461 |
+
|
462 |
+
cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *:
|
463 |
+
if isinstance(crypto_factory, CryptoFactory):
|
464 |
+
pycf = (<CryptoFactory> crypto_factory).unwrap()
|
465 |
+
return static_pointer_cast[CCryptoFactory, CPyCryptoFactory](pycf)
|
466 |
+
raise TypeError("Expected CryptoFactory, got %s" % type(crypto_factory))
|
467 |
+
|
468 |
+
|
469 |
+
cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *:
|
470 |
+
if isinstance(kmsconnectionconfig, KmsConnectionConfig):
|
471 |
+
return (<KmsConnectionConfig> kmsconnectionconfig).unwrap()
|
472 |
+
raise TypeError("Expected KmsConnectionConfig, got %s" % type(kmsconnectionconfig))
|
473 |
+
|
474 |
+
|
475 |
+
cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *:
|
476 |
+
if isinstance(encryptionconfig, EncryptionConfiguration):
|
477 |
+
return (<EncryptionConfiguration> encryptionconfig).unwrap()
|
478 |
+
raise TypeError("Expected EncryptionConfiguration, got %s" % type(encryptionconfig))
|
479 |
+
|
480 |
+
|
481 |
+
cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *:
|
482 |
+
if isinstance(decryptionconfig, DecryptionConfiguration):
|
483 |
+
return (<DecryptionConfiguration> decryptionconfig).unwrap()
|
484 |
+
raise TypeError("Expected DecryptionConfiguration, got %s" % type(decryptionconfig))
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (84.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.includes.common cimport *
|
22 |
+
from pyarrow.includes.libarrow cimport CStatus
|
23 |
+
|
24 |
+
|
25 |
+
ctypedef CStatus cb_test_func()
|
26 |
+
|
27 |
+
cdef extern from "arrow/python/python_test.h" namespace "arrow::py::testing" nogil:
|
28 |
+
|
29 |
+
cdef cppclass CTestCase "arrow::py::testing::TestCase":
|
30 |
+
c_string name
|
31 |
+
cb_test_func func
|
32 |
+
|
33 |
+
vector[CTestCase] GetCppTestCases()
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_s3fs.pyx
ADDED
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from cython cimport binding
|
21 |
+
|
22 |
+
from pyarrow.lib cimport (check_status, pyarrow_wrap_metadata,
|
23 |
+
pyarrow_unwrap_metadata)
|
24 |
+
from pyarrow.lib import frombytes, tobytes, KeyValueMetadata
|
25 |
+
from pyarrow.includes.common cimport *
|
26 |
+
from pyarrow.includes.libarrow cimport *
|
27 |
+
from pyarrow.includes.libarrow_fs cimport *
|
28 |
+
from pyarrow._fs cimport FileSystem
|
29 |
+
|
30 |
+
|
31 |
+
cpdef enum S3LogLevel:
|
32 |
+
Off = <int8_t> CS3LogLevel_Off
|
33 |
+
Fatal = <int8_t> CS3LogLevel_Fatal
|
34 |
+
Error = <int8_t> CS3LogLevel_Error
|
35 |
+
Warn = <int8_t> CS3LogLevel_Warn
|
36 |
+
Info = <int8_t> CS3LogLevel_Info
|
37 |
+
Debug = <int8_t> CS3LogLevel_Debug
|
38 |
+
Trace = <int8_t> CS3LogLevel_Trace
|
39 |
+
|
40 |
+
|
41 |
+
def initialize_s3(S3LogLevel log_level=S3LogLevel.Fatal, int num_event_loop_threads=1):
|
42 |
+
"""
|
43 |
+
Initialize S3 support
|
44 |
+
|
45 |
+
Parameters
|
46 |
+
----------
|
47 |
+
log_level : S3LogLevel
|
48 |
+
level of logging
|
49 |
+
num_event_loop_threads : int, default 1
|
50 |
+
how many threads to use for the AWS SDK's I/O event loop
|
51 |
+
|
52 |
+
Examples
|
53 |
+
--------
|
54 |
+
>>> fs.initialize_s3(fs.S3LogLevel.Error) # doctest: +SKIP
|
55 |
+
"""
|
56 |
+
cdef CS3GlobalOptions options
|
57 |
+
options.log_level = <CS3LogLevel> log_level
|
58 |
+
options.num_event_loop_threads = num_event_loop_threads
|
59 |
+
check_status(CInitializeS3(options))
|
60 |
+
|
61 |
+
|
62 |
+
def ensure_s3_initialized():
|
63 |
+
"""
|
64 |
+
Initialize S3 (with default options) if not already initialized
|
65 |
+
"""
|
66 |
+
check_status(CEnsureS3Initialized())
|
67 |
+
|
68 |
+
|
69 |
+
def finalize_s3():
|
70 |
+
check_status(CFinalizeS3())
|
71 |
+
|
72 |
+
|
73 |
+
def ensure_s3_finalized():
|
74 |
+
"""
|
75 |
+
Finalize S3 if already initialized
|
76 |
+
"""
|
77 |
+
check_status(CEnsureS3Finalized())
|
78 |
+
|
79 |
+
|
80 |
+
def resolve_s3_region(bucket):
|
81 |
+
"""
|
82 |
+
Resolve the S3 region of a bucket.
|
83 |
+
|
84 |
+
Parameters
|
85 |
+
----------
|
86 |
+
bucket : str
|
87 |
+
A S3 bucket name
|
88 |
+
|
89 |
+
Returns
|
90 |
+
-------
|
91 |
+
region : str
|
92 |
+
A S3 region name
|
93 |
+
|
94 |
+
Examples
|
95 |
+
--------
|
96 |
+
>>> fs.resolve_s3_region('voltrondata-labs-datasets')
|
97 |
+
'us-east-2'
|
98 |
+
"""
|
99 |
+
cdef:
|
100 |
+
c_string c_bucket
|
101 |
+
c_string c_region
|
102 |
+
|
103 |
+
ensure_s3_initialized()
|
104 |
+
|
105 |
+
c_bucket = tobytes(bucket)
|
106 |
+
with nogil:
|
107 |
+
c_region = GetResultValue(ResolveS3BucketRegion(c_bucket))
|
108 |
+
|
109 |
+
return frombytes(c_region)
|
110 |
+
|
111 |
+
|
112 |
+
class S3RetryStrategy:
|
113 |
+
"""
|
114 |
+
Base class for AWS retry strategies for use with S3.
|
115 |
+
|
116 |
+
Parameters
|
117 |
+
----------
|
118 |
+
max_attempts : int, default 3
|
119 |
+
The maximum number of retry attempts to attempt before failing.
|
120 |
+
"""
|
121 |
+
|
122 |
+
def __init__(self, max_attempts=3):
|
123 |
+
self.max_attempts = max_attempts
|
124 |
+
|
125 |
+
|
126 |
+
class AwsStandardS3RetryStrategy(S3RetryStrategy):
|
127 |
+
"""
|
128 |
+
Represents an AWS Standard retry strategy for use with S3.
|
129 |
+
|
130 |
+
Parameters
|
131 |
+
----------
|
132 |
+
max_attempts : int, default 3
|
133 |
+
The maximum number of retry attempts to attempt before failing.
|
134 |
+
"""
|
135 |
+
pass
|
136 |
+
|
137 |
+
|
138 |
+
class AwsDefaultS3RetryStrategy(S3RetryStrategy):
|
139 |
+
"""
|
140 |
+
Represents an AWS Default retry strategy for use with S3.
|
141 |
+
|
142 |
+
Parameters
|
143 |
+
----------
|
144 |
+
max_attempts : int, default 3
|
145 |
+
The maximum number of retry attempts to attempt before failing.
|
146 |
+
"""
|
147 |
+
pass
|
148 |
+
|
149 |
+
|
150 |
+
cdef class S3FileSystem(FileSystem):
|
151 |
+
"""
|
152 |
+
S3-backed FileSystem implementation
|
153 |
+
|
154 |
+
AWS access_key and secret_key can be provided explicitly.
|
155 |
+
|
156 |
+
If role_arn is provided instead of access_key and secret_key, temporary
|
157 |
+
credentials will be fetched by issuing a request to STS to assume the
|
158 |
+
specified role.
|
159 |
+
|
160 |
+
If neither access_key nor secret_key are provided, and role_arn is also not
|
161 |
+
provided, then attempts to establish the credentials automatically.
|
162 |
+
S3FileSystem will try the following methods, in order:
|
163 |
+
|
164 |
+
* ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` environment variables
|
165 |
+
* configuration files such as ``~/.aws/credentials`` and ``~/.aws/config``
|
166 |
+
* for nodes on Amazon EC2, the EC2 Instance Metadata Service
|
167 |
+
|
168 |
+
Note: S3 buckets are special and the operations available on them may be
|
169 |
+
limited or more expensive than desired.
|
170 |
+
|
171 |
+
When S3FileSystem creates new buckets (assuming allow_bucket_creation is
|
172 |
+
True), it does not pass any non-default settings. In AWS S3, the bucket and
|
173 |
+
all objects will be not publicly visible, and will have no bucket policies
|
174 |
+
and no resource tags. To have more control over how buckets are created,
|
175 |
+
use a different API to create them.
|
176 |
+
|
177 |
+
Parameters
|
178 |
+
----------
|
179 |
+
access_key : str, default None
|
180 |
+
AWS Access Key ID. Pass None to use the standard AWS environment
|
181 |
+
variables and/or configuration file.
|
182 |
+
secret_key : str, default None
|
183 |
+
AWS Secret Access key. Pass None to use the standard AWS environment
|
184 |
+
variables and/or configuration file.
|
185 |
+
session_token : str, default None
|
186 |
+
AWS Session Token. An optional session token, required if access_key
|
187 |
+
and secret_key are temporary credentials from STS.
|
188 |
+
anonymous : boolean, default False
|
189 |
+
Whether to connect anonymously if access_key and secret_key are None.
|
190 |
+
If true, will not attempt to look up credentials using standard AWS
|
191 |
+
configuration methods.
|
192 |
+
role_arn : str, default None
|
193 |
+
AWS Role ARN. If provided instead of access_key and secret_key,
|
194 |
+
temporary credentials will be fetched by assuming this role.
|
195 |
+
session_name : str, default None
|
196 |
+
An optional identifier for the assumed role session.
|
197 |
+
external_id : str, default None
|
198 |
+
An optional unique identifier that might be required when you assume
|
199 |
+
a role in another account.
|
200 |
+
load_frequency : int, default 900
|
201 |
+
The frequency (in seconds) with which temporary credentials from an
|
202 |
+
assumed role session will be refreshed.
|
203 |
+
region : str, default None
|
204 |
+
AWS region to connect to. If not set, the AWS SDK will attempt to
|
205 |
+
determine the region using heuristics such as environment variables,
|
206 |
+
configuration profile, EC2 metadata, or default to 'us-east-1' when SDK
|
207 |
+
version <1.8. One can also use :func:`pyarrow.fs.resolve_s3_region` to
|
208 |
+
automatically resolve the region from a bucket name.
|
209 |
+
request_timeout : double, default None
|
210 |
+
Socket read timeouts on Windows and macOS, in seconds.
|
211 |
+
If omitted, the AWS SDK default value is used (typically 3 seconds).
|
212 |
+
This option is ignored on non-Windows, non-macOS systems.
|
213 |
+
connect_timeout : double, default None
|
214 |
+
Socket connection timeout, in seconds.
|
215 |
+
If omitted, the AWS SDK default value is used (typically 1 second).
|
216 |
+
scheme : str, default 'https'
|
217 |
+
S3 connection transport scheme.
|
218 |
+
endpoint_override : str, default None
|
219 |
+
Override region with a connect string such as "localhost:9000"
|
220 |
+
background_writes : boolean, default True
|
221 |
+
Whether file writes will be issued in the background, without
|
222 |
+
blocking.
|
223 |
+
default_metadata : mapping or pyarrow.KeyValueMetadata, default None
|
224 |
+
Default metadata for open_output_stream. This will be ignored if
|
225 |
+
non-empty metadata is passed to open_output_stream.
|
226 |
+
proxy_options : dict or str, default None
|
227 |
+
If a proxy is used, provide the options here. Supported options are:
|
228 |
+
'scheme' (str: 'http' or 'https'; required), 'host' (str; required),
|
229 |
+
'port' (int; required), 'username' (str; optional),
|
230 |
+
'password' (str; optional).
|
231 |
+
A proxy URI (str) can also be provided, in which case these options
|
232 |
+
will be derived from the provided URI.
|
233 |
+
The following are equivalent::
|
234 |
+
|
235 |
+
S3FileSystem(proxy_options='http://username:password@localhost:8020')
|
236 |
+
S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost',
|
237 |
+
'port': 8020, 'username': 'username',
|
238 |
+
'password': 'password'})
|
239 |
+
allow_bucket_creation : bool, default False
|
240 |
+
Whether to allow CreateDir at the bucket-level. This option may also be
|
241 |
+
passed in a URI query parameter.
|
242 |
+
allow_bucket_deletion : bool, default False
|
243 |
+
Whether to allow DeleteDir at the bucket-level. This option may also be
|
244 |
+
passed in a URI query parameter.
|
245 |
+
retry_strategy : S3RetryStrategy, default AwsStandardS3RetryStrategy(max_attempts=3)
|
246 |
+
The retry strategy to use with S3; fail after max_attempts. Available
|
247 |
+
strategies are AwsStandardS3RetryStrategy, AwsDefaultS3RetryStrategy.
|
248 |
+
force_virtual_addressing : bool, default False
|
249 |
+
Whether to use virtual addressing of buckets.
|
250 |
+
If true, then virtual addressing is always enabled.
|
251 |
+
If false, then virtual addressing is only enabled if `endpoint_override` is empty.
|
252 |
+
This can be used for non-AWS backends that only support virtual hosted-style access.
|
253 |
+
|
254 |
+
Examples
|
255 |
+
--------
|
256 |
+
>>> from pyarrow import fs
|
257 |
+
>>> s3 = fs.S3FileSystem(region='us-west-2')
|
258 |
+
>>> s3.get_file_info(fs.FileSelector(
|
259 |
+
... 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN', recursive=True
|
260 |
+
... ))
|
261 |
+
[<FileInfo for 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN/.zarray...
|
262 |
+
|
263 |
+
For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
|
264 |
+
"""
|
265 |
+
|
266 |
+
cdef:
|
267 |
+
CS3FileSystem* s3fs
|
268 |
+
|
269 |
+
def __init__(self, *, access_key=None, secret_key=None, session_token=None,
|
270 |
+
bint anonymous=False, region=None, request_timeout=None,
|
271 |
+
connect_timeout=None, scheme=None, endpoint_override=None,
|
272 |
+
bint background_writes=True, default_metadata=None,
|
273 |
+
role_arn=None, session_name=None, external_id=None,
|
274 |
+
load_frequency=900, proxy_options=None,
|
275 |
+
allow_bucket_creation=False, allow_bucket_deletion=False,
|
276 |
+
retry_strategy: S3RetryStrategy = AwsStandardS3RetryStrategy(
|
277 |
+
max_attempts=3),
|
278 |
+
force_virtual_addressing=False):
|
279 |
+
cdef:
|
280 |
+
optional[CS3Options] options
|
281 |
+
shared_ptr[CS3FileSystem] wrapped
|
282 |
+
|
283 |
+
# Need to do this before initializing `options` as the S3Options
|
284 |
+
# constructor has a debug check against use after S3 finalization.
|
285 |
+
ensure_s3_initialized()
|
286 |
+
|
287 |
+
if access_key is not None and secret_key is None:
|
288 |
+
raise ValueError(
|
289 |
+
'In order to initialize with explicit credentials both '
|
290 |
+
'access_key and secret_key must be provided, '
|
291 |
+
'`secret_key` is not set.'
|
292 |
+
)
|
293 |
+
elif access_key is None and secret_key is not None:
|
294 |
+
raise ValueError(
|
295 |
+
'In order to initialize with explicit credentials both '
|
296 |
+
'access_key and secret_key must be provided, '
|
297 |
+
'`access_key` is not set.'
|
298 |
+
)
|
299 |
+
|
300 |
+
elif session_token is not None and (access_key is None or
|
301 |
+
secret_key is None):
|
302 |
+
raise ValueError(
|
303 |
+
'In order to initialize a session with temporary credentials, '
|
304 |
+
'both secret_key and access_key must be provided in addition '
|
305 |
+
'to session_token.'
|
306 |
+
)
|
307 |
+
|
308 |
+
elif (access_key is not None or secret_key is not None):
|
309 |
+
if anonymous:
|
310 |
+
raise ValueError(
|
311 |
+
'Cannot pass anonymous=True together with access_key '
|
312 |
+
'and secret_key.')
|
313 |
+
|
314 |
+
if role_arn:
|
315 |
+
raise ValueError(
|
316 |
+
'Cannot provide role_arn with access_key and secret_key')
|
317 |
+
|
318 |
+
if session_token is None:
|
319 |
+
session_token = ""
|
320 |
+
|
321 |
+
options = CS3Options.FromAccessKey(
|
322 |
+
tobytes(access_key),
|
323 |
+
tobytes(secret_key),
|
324 |
+
tobytes(session_token)
|
325 |
+
)
|
326 |
+
elif anonymous:
|
327 |
+
if role_arn:
|
328 |
+
raise ValueError(
|
329 |
+
'Cannot provide role_arn with anonymous=True')
|
330 |
+
|
331 |
+
options = CS3Options.Anonymous()
|
332 |
+
elif role_arn:
|
333 |
+
if session_name is None:
|
334 |
+
session_name = ''
|
335 |
+
if external_id is None:
|
336 |
+
external_id = ''
|
337 |
+
|
338 |
+
options = CS3Options.FromAssumeRole(
|
339 |
+
tobytes(role_arn),
|
340 |
+
tobytes(session_name),
|
341 |
+
tobytes(external_id),
|
342 |
+
load_frequency
|
343 |
+
)
|
344 |
+
else:
|
345 |
+
options = CS3Options.Defaults()
|
346 |
+
|
347 |
+
if region is not None:
|
348 |
+
options.value().region = tobytes(region)
|
349 |
+
if request_timeout is not None:
|
350 |
+
options.value().request_timeout = request_timeout
|
351 |
+
if connect_timeout is not None:
|
352 |
+
options.value().connect_timeout = connect_timeout
|
353 |
+
if scheme is not None:
|
354 |
+
options.value().scheme = tobytes(scheme)
|
355 |
+
if endpoint_override is not None:
|
356 |
+
options.value().endpoint_override = tobytes(endpoint_override)
|
357 |
+
if background_writes is not None:
|
358 |
+
options.value().background_writes = background_writes
|
359 |
+
if default_metadata is not None:
|
360 |
+
if not isinstance(default_metadata, KeyValueMetadata):
|
361 |
+
default_metadata = KeyValueMetadata(default_metadata)
|
362 |
+
options.value().default_metadata = pyarrow_unwrap_metadata(
|
363 |
+
default_metadata)
|
364 |
+
|
365 |
+
if proxy_options is not None:
|
366 |
+
if isinstance(proxy_options, dict):
|
367 |
+
options.value().proxy_options.scheme = tobytes(
|
368 |
+
proxy_options["scheme"])
|
369 |
+
options.value().proxy_options.host = tobytes(
|
370 |
+
proxy_options["host"])
|
371 |
+
options.value().proxy_options.port = proxy_options["port"]
|
372 |
+
proxy_username = proxy_options.get("username", None)
|
373 |
+
if proxy_username:
|
374 |
+
options.value().proxy_options.username = tobytes(
|
375 |
+
proxy_username)
|
376 |
+
proxy_password = proxy_options.get("password", None)
|
377 |
+
if proxy_password:
|
378 |
+
options.value().proxy_options.password = tobytes(
|
379 |
+
proxy_password)
|
380 |
+
elif isinstance(proxy_options, str):
|
381 |
+
options.value().proxy_options = GetResultValue(
|
382 |
+
CS3ProxyOptions.FromUriString(tobytes(proxy_options)))
|
383 |
+
else:
|
384 |
+
raise TypeError(
|
385 |
+
"'proxy_options': expected 'dict' or 'str', "
|
386 |
+
f"got {type(proxy_options)} instead.")
|
387 |
+
|
388 |
+
options.value().allow_bucket_creation = allow_bucket_creation
|
389 |
+
options.value().allow_bucket_deletion = allow_bucket_deletion
|
390 |
+
options.value().force_virtual_addressing = force_virtual_addressing
|
391 |
+
|
392 |
+
if isinstance(retry_strategy, AwsStandardS3RetryStrategy):
|
393 |
+
options.value().retry_strategy = CS3RetryStrategy.GetAwsStandardRetryStrategy(
|
394 |
+
retry_strategy.max_attempts)
|
395 |
+
elif isinstance(retry_strategy, AwsDefaultS3RetryStrategy):
|
396 |
+
options.value().retry_strategy = CS3RetryStrategy.GetAwsDefaultRetryStrategy(
|
397 |
+
retry_strategy.max_attempts)
|
398 |
+
else:
|
399 |
+
raise ValueError(f'Invalid retry_strategy {retry_strategy!r}')
|
400 |
+
|
401 |
+
with nogil:
|
402 |
+
wrapped = GetResultValue(CS3FileSystem.Make(options.value()))
|
403 |
+
|
404 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
405 |
+
|
406 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
407 |
+
FileSystem.init(self, wrapped)
|
408 |
+
self.s3fs = <CS3FileSystem*> wrapped.get()
|
409 |
+
|
410 |
+
@staticmethod
|
411 |
+
@binding(True) # Required for cython < 3
|
412 |
+
def _reconstruct(kwargs):
|
413 |
+
# __reduce__ doesn't allow passing named arguments directly to the
|
414 |
+
# reconstructor, hence this wrapper.
|
415 |
+
return S3FileSystem(**kwargs)
|
416 |
+
|
417 |
+
def __reduce__(self):
|
418 |
+
cdef CS3Options opts = self.s3fs.options()
|
419 |
+
|
420 |
+
# if creds were explicitly provided, then use them
|
421 |
+
# else obtain them as they were last time.
|
422 |
+
if opts.credentials_kind == CS3CredentialsKind_Explicit:
|
423 |
+
access_key = frombytes(opts.GetAccessKey())
|
424 |
+
secret_key = frombytes(opts.GetSecretKey())
|
425 |
+
session_token = frombytes(opts.GetSessionToken())
|
426 |
+
else:
|
427 |
+
access_key = None
|
428 |
+
secret_key = None
|
429 |
+
session_token = None
|
430 |
+
|
431 |
+
return (
|
432 |
+
S3FileSystem._reconstruct, (dict(
|
433 |
+
access_key=access_key,
|
434 |
+
secret_key=secret_key,
|
435 |
+
session_token=session_token,
|
436 |
+
anonymous=(opts.credentials_kind ==
|
437 |
+
CS3CredentialsKind_Anonymous),
|
438 |
+
region=frombytes(opts.region),
|
439 |
+
scheme=frombytes(opts.scheme),
|
440 |
+
connect_timeout=opts.connect_timeout,
|
441 |
+
request_timeout=opts.request_timeout,
|
442 |
+
endpoint_override=frombytes(opts.endpoint_override),
|
443 |
+
role_arn=frombytes(opts.role_arn),
|
444 |
+
session_name=frombytes(opts.session_name),
|
445 |
+
external_id=frombytes(opts.external_id),
|
446 |
+
load_frequency=opts.load_frequency,
|
447 |
+
background_writes=opts.background_writes,
|
448 |
+
allow_bucket_creation=opts.allow_bucket_creation,
|
449 |
+
allow_bucket_deletion=opts.allow_bucket_deletion,
|
450 |
+
default_metadata=pyarrow_wrap_metadata(opts.default_metadata),
|
451 |
+
proxy_options={'scheme': frombytes(opts.proxy_options.scheme),
|
452 |
+
'host': frombytes(opts.proxy_options.host),
|
453 |
+
'port': opts.proxy_options.port,
|
454 |
+
'username': frombytes(
|
455 |
+
opts.proxy_options.username),
|
456 |
+
'password': frombytes(
|
457 |
+
opts.proxy_options.password)},
|
458 |
+
force_virtual_addressing=opts.force_virtual_addressing,
|
459 |
+
),)
|
460 |
+
)
|
461 |
+
|
462 |
+
@property
|
463 |
+
def region(self):
|
464 |
+
"""
|
465 |
+
The AWS region this filesystem connects to.
|
466 |
+
"""
|
467 |
+
return frombytes(self.s3fs.region())
|
llmeval-env/lib/python3.10/site-packages/pyarrow/acero.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# ---------------------------------------------------------------------
|
19 |
+
# Implement Internal ExecPlan bindings
|
20 |
+
|
21 |
+
# cython: profile=False
|
22 |
+
# distutils: language = c++
|
23 |
+
# cython: language_level = 3
|
24 |
+
|
25 |
+
from pyarrow.lib import Table
|
26 |
+
from pyarrow.compute import Expression, field
|
27 |
+
|
28 |
+
try:
|
29 |
+
from pyarrow._acero import ( # noqa
|
30 |
+
Declaration,
|
31 |
+
ExecNodeOptions,
|
32 |
+
TableSourceNodeOptions,
|
33 |
+
FilterNodeOptions,
|
34 |
+
ProjectNodeOptions,
|
35 |
+
AggregateNodeOptions,
|
36 |
+
OrderByNodeOptions,
|
37 |
+
HashJoinNodeOptions,
|
38 |
+
AsofJoinNodeOptions,
|
39 |
+
)
|
40 |
+
except ImportError as exc:
|
41 |
+
raise ImportError(
|
42 |
+
f"The pyarrow installation is not built with support for 'acero' ({str(exc)})"
|
43 |
+
) from None
|
44 |
+
|
45 |
+
|
46 |
+
try:
|
47 |
+
import pyarrow.dataset as ds
|
48 |
+
from pyarrow._dataset import ScanNodeOptions
|
49 |
+
except ImportError:
|
50 |
+
class DatasetModuleStub:
|
51 |
+
class Dataset:
|
52 |
+
pass
|
53 |
+
|
54 |
+
class InMemoryDataset:
|
55 |
+
pass
|
56 |
+
ds = DatasetModuleStub
|
57 |
+
|
58 |
+
|
59 |
+
def _dataset_to_decl(dataset, use_threads=True):
|
60 |
+
decl = Declaration("scan", ScanNodeOptions(dataset, use_threads=use_threads))
|
61 |
+
|
62 |
+
# Get rid of special dataset columns
|
63 |
+
# "__fragment_index", "__batch_index", "__last_in_fragment", "__filename"
|
64 |
+
projections = [field(f) for f in dataset.schema.names]
|
65 |
+
decl = Declaration.from_sequence(
|
66 |
+
[decl, Declaration("project", ProjectNodeOptions(projections))]
|
67 |
+
)
|
68 |
+
|
69 |
+
filter_expr = dataset._scan_options.get("filter")
|
70 |
+
if filter_expr is not None:
|
71 |
+
# Filters applied in CScanNodeOptions are "best effort" for the scan node itself
|
72 |
+
# so we always need to inject an additional Filter node to apply them for real.
|
73 |
+
decl = Declaration.from_sequence(
|
74 |
+
[decl, Declaration("filter", FilterNodeOptions(filter_expr))]
|
75 |
+
)
|
76 |
+
|
77 |
+
return decl
|
78 |
+
|
79 |
+
|
80 |
+
def _perform_join(join_type, left_operand, left_keys,
|
81 |
+
right_operand, right_keys,
|
82 |
+
left_suffix=None, right_suffix=None,
|
83 |
+
use_threads=True, coalesce_keys=False,
|
84 |
+
output_type=Table):
|
85 |
+
"""
|
86 |
+
Perform join of two tables or datasets.
|
87 |
+
|
88 |
+
The result will be an output table with the result of the join operation
|
89 |
+
|
90 |
+
Parameters
|
91 |
+
----------
|
92 |
+
join_type : str
|
93 |
+
One of supported join types.
|
94 |
+
left_operand : Table or Dataset
|
95 |
+
The left operand for the join operation.
|
96 |
+
left_keys : str or list[str]
|
97 |
+
The left key (or keys) on which the join operation should be performed.
|
98 |
+
right_operand : Table or Dataset
|
99 |
+
The right operand for the join operation.
|
100 |
+
right_keys : str or list[str]
|
101 |
+
The right key (or keys) on which the join operation should be performed.
|
102 |
+
left_suffix : str, default None
|
103 |
+
Which suffix to add to left column names. This prevents confusion
|
104 |
+
when the columns in left and right operands have colliding names.
|
105 |
+
right_suffix : str, default None
|
106 |
+
Which suffix to add to the right column names. This prevents confusion
|
107 |
+
when the columns in left and right operands have colliding names.
|
108 |
+
use_threads : bool, default True
|
109 |
+
Whether to use multithreading or not.
|
110 |
+
coalesce_keys : bool, default False
|
111 |
+
If the duplicated keys should be omitted from one of the sides
|
112 |
+
in the join result.
|
113 |
+
output_type: Table or InMemoryDataset
|
114 |
+
The output type for the exec plan result.
|
115 |
+
|
116 |
+
Returns
|
117 |
+
-------
|
118 |
+
result_table : Table or InMemoryDataset
|
119 |
+
"""
|
120 |
+
if not isinstance(left_operand, (Table, ds.Dataset)):
|
121 |
+
raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}")
|
122 |
+
if not isinstance(right_operand, (Table, ds.Dataset)):
|
123 |
+
raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}")
|
124 |
+
|
125 |
+
# Prepare left and right tables Keys to send them to the C++ function
|
126 |
+
left_keys_order = {}
|
127 |
+
if not isinstance(left_keys, (tuple, list)):
|
128 |
+
left_keys = [left_keys]
|
129 |
+
for idx, key in enumerate(left_keys):
|
130 |
+
left_keys_order[key] = idx
|
131 |
+
|
132 |
+
right_keys_order = {}
|
133 |
+
if not isinstance(right_keys, (list, tuple)):
|
134 |
+
right_keys = [right_keys]
|
135 |
+
for idx, key in enumerate(right_keys):
|
136 |
+
right_keys_order[key] = idx
|
137 |
+
|
138 |
+
# By default expose all columns on both left and right table
|
139 |
+
left_columns = left_operand.schema.names
|
140 |
+
right_columns = right_operand.schema.names
|
141 |
+
|
142 |
+
# Pick the join type
|
143 |
+
if join_type == "left semi" or join_type == "left anti":
|
144 |
+
right_columns = []
|
145 |
+
elif join_type == "right semi" or join_type == "right anti":
|
146 |
+
left_columns = []
|
147 |
+
elif join_type == "inner" or join_type == "left outer":
|
148 |
+
right_columns = [
|
149 |
+
col for col in right_columns if col not in right_keys_order
|
150 |
+
]
|
151 |
+
elif join_type == "right outer":
|
152 |
+
left_columns = [
|
153 |
+
col for col in left_columns if col not in left_keys_order
|
154 |
+
]
|
155 |
+
|
156 |
+
# Turn the columns to vectors of FieldRefs
|
157 |
+
# and set aside indices of keys.
|
158 |
+
left_column_keys_indices = {}
|
159 |
+
for idx, colname in enumerate(left_columns):
|
160 |
+
if colname in left_keys:
|
161 |
+
left_column_keys_indices[colname] = idx
|
162 |
+
right_column_keys_indices = {}
|
163 |
+
for idx, colname in enumerate(right_columns):
|
164 |
+
if colname in right_keys:
|
165 |
+
right_column_keys_indices[colname] = idx
|
166 |
+
|
167 |
+
# Add the join node to the execplan
|
168 |
+
if isinstance(left_operand, ds.Dataset):
|
169 |
+
left_source = _dataset_to_decl(left_operand, use_threads=use_threads)
|
170 |
+
else:
|
171 |
+
left_source = Declaration("table_source", TableSourceNodeOptions(left_operand))
|
172 |
+
if isinstance(right_operand, ds.Dataset):
|
173 |
+
right_source = _dataset_to_decl(right_operand, use_threads=use_threads)
|
174 |
+
else:
|
175 |
+
right_source = Declaration(
|
176 |
+
"table_source", TableSourceNodeOptions(right_operand)
|
177 |
+
)
|
178 |
+
|
179 |
+
if coalesce_keys:
|
180 |
+
join_opts = HashJoinNodeOptions(
|
181 |
+
join_type, left_keys, right_keys, left_columns, right_columns,
|
182 |
+
output_suffix_for_left=left_suffix or "",
|
183 |
+
output_suffix_for_right=right_suffix or "",
|
184 |
+
)
|
185 |
+
else:
|
186 |
+
join_opts = HashJoinNodeOptions(
|
187 |
+
join_type, left_keys, right_keys,
|
188 |
+
output_suffix_for_left=left_suffix or "",
|
189 |
+
output_suffix_for_right=right_suffix or "",
|
190 |
+
)
|
191 |
+
decl = Declaration(
|
192 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source]
|
193 |
+
)
|
194 |
+
|
195 |
+
if coalesce_keys and join_type == "full outer":
|
196 |
+
# In case of full outer joins, the join operation will output all columns
|
197 |
+
# so that we can coalesce the keys and exclude duplicates in a subsequent
|
198 |
+
# projection.
|
199 |
+
left_columns_set = set(left_columns)
|
200 |
+
right_columns_set = set(right_columns)
|
201 |
+
# Where the right table columns start.
|
202 |
+
right_operand_index = len(left_columns)
|
203 |
+
projected_col_names = []
|
204 |
+
projections = []
|
205 |
+
for idx, col in enumerate(left_columns + right_columns):
|
206 |
+
if idx < len(left_columns) and col in left_column_keys_indices:
|
207 |
+
# Include keys only once and coalesce left+right table keys.
|
208 |
+
projected_col_names.append(col)
|
209 |
+
# Get the index of the right key that is being paired
|
210 |
+
# with this left key. We do so by retrieving the name
|
211 |
+
# of the right key that is in the same position in the provided keys
|
212 |
+
# and then looking up the index for that name in the right table.
|
213 |
+
right_key_index = right_column_keys_indices[
|
214 |
+
right_keys[left_keys_order[col]]]
|
215 |
+
projections.append(
|
216 |
+
Expression._call("coalesce", [
|
217 |
+
Expression._field(idx), Expression._field(
|
218 |
+
right_operand_index+right_key_index)
|
219 |
+
])
|
220 |
+
)
|
221 |
+
elif idx >= right_operand_index and col in right_column_keys_indices:
|
222 |
+
# Do not include right table keys. As they would lead to duplicated keys
|
223 |
+
continue
|
224 |
+
else:
|
225 |
+
# For all the other columns include them as they are.
|
226 |
+
# Just recompute the suffixes that the join produced as the projection
|
227 |
+
# would lose them otherwise.
|
228 |
+
if (
|
229 |
+
left_suffix and idx < right_operand_index
|
230 |
+
and col in right_columns_set
|
231 |
+
):
|
232 |
+
col += left_suffix
|
233 |
+
if (
|
234 |
+
right_suffix and idx >= right_operand_index
|
235 |
+
and col in left_columns_set
|
236 |
+
):
|
237 |
+
col += right_suffix
|
238 |
+
projected_col_names.append(col)
|
239 |
+
projections.append(
|
240 |
+
Expression._field(idx)
|
241 |
+
)
|
242 |
+
projection = Declaration(
|
243 |
+
"project", ProjectNodeOptions(projections, projected_col_names)
|
244 |
+
)
|
245 |
+
decl = Declaration.from_sequence([decl, projection])
|
246 |
+
|
247 |
+
result_table = decl.to_table(use_threads=use_threads)
|
248 |
+
|
249 |
+
if output_type == Table:
|
250 |
+
return result_table
|
251 |
+
elif output_type == ds.InMemoryDataset:
|
252 |
+
return ds.InMemoryDataset(result_table)
|
253 |
+
else:
|
254 |
+
raise TypeError("Unsupported output type")
|
255 |
+
|
256 |
+
|
257 |
+
def _perform_join_asof(left_operand, left_on, left_by,
|
258 |
+
right_operand, right_on, right_by,
|
259 |
+
tolerance, use_threads=True,
|
260 |
+
output_type=Table):
|
261 |
+
"""
|
262 |
+
Perform asof join of two tables or datasets.
|
263 |
+
|
264 |
+
The result will be an output table with the result of the join operation
|
265 |
+
|
266 |
+
Parameters
|
267 |
+
----------
|
268 |
+
left_operand : Table or Dataset
|
269 |
+
The left operand for the join operation.
|
270 |
+
left_on : str
|
271 |
+
The left key (or keys) on which the join operation should be performed.
|
272 |
+
left_by: str or list[str]
|
273 |
+
The left key (or keys) on which the join operation should be performed.
|
274 |
+
right_operand : Table or Dataset
|
275 |
+
The right operand for the join operation.
|
276 |
+
right_on : str or list[str]
|
277 |
+
The right key (or keys) on which the join operation should be performed.
|
278 |
+
right_by: str or list[str]
|
279 |
+
The right key (or keys) on which the join operation should be performed.
|
280 |
+
tolerance : int
|
281 |
+
The tolerance to use for the asof join. The tolerance is interpreted in
|
282 |
+
the same units as the "on" key.
|
283 |
+
output_type: Table or InMemoryDataset
|
284 |
+
The output type for the exec plan result.
|
285 |
+
|
286 |
+
Returns
|
287 |
+
-------
|
288 |
+
result_table : Table or InMemoryDataset
|
289 |
+
"""
|
290 |
+
if not isinstance(left_operand, (Table, ds.Dataset)):
|
291 |
+
raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}")
|
292 |
+
if not isinstance(right_operand, (Table, ds.Dataset)):
|
293 |
+
raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}")
|
294 |
+
|
295 |
+
if not isinstance(left_by, (tuple, list)):
|
296 |
+
left_by = [left_by]
|
297 |
+
if not isinstance(right_by, (tuple, list)):
|
298 |
+
right_by = [right_by]
|
299 |
+
|
300 |
+
# AsofJoin does not return on or by columns for right_operand.
|
301 |
+
right_columns = [
|
302 |
+
col for col in right_operand.schema.names
|
303 |
+
if col not in [right_on] + right_by
|
304 |
+
]
|
305 |
+
columns_collisions = set(left_operand.schema.names) & set(right_columns)
|
306 |
+
if columns_collisions:
|
307 |
+
raise ValueError(
|
308 |
+
"Columns {} present in both tables. AsofJoin does not support "
|
309 |
+
"column collisions.".format(columns_collisions),
|
310 |
+
)
|
311 |
+
|
312 |
+
# Add the join node to the execplan
|
313 |
+
if isinstance(left_operand, ds.Dataset):
|
314 |
+
left_source = _dataset_to_decl(left_operand, use_threads=use_threads)
|
315 |
+
else:
|
316 |
+
left_source = Declaration(
|
317 |
+
"table_source", TableSourceNodeOptions(left_operand),
|
318 |
+
)
|
319 |
+
if isinstance(right_operand, ds.Dataset):
|
320 |
+
right_source = _dataset_to_decl(right_operand, use_threads=use_threads)
|
321 |
+
else:
|
322 |
+
right_source = Declaration(
|
323 |
+
"table_source", TableSourceNodeOptions(right_operand)
|
324 |
+
)
|
325 |
+
|
326 |
+
join_opts = AsofJoinNodeOptions(
|
327 |
+
left_on, left_by, right_on, right_by, tolerance
|
328 |
+
)
|
329 |
+
decl = Declaration(
|
330 |
+
"asofjoin", options=join_opts, inputs=[left_source, right_source]
|
331 |
+
)
|
332 |
+
|
333 |
+
result_table = decl.to_table(use_threads=use_threads)
|
334 |
+
|
335 |
+
if output_type == Table:
|
336 |
+
return result_table
|
337 |
+
elif output_type == ds.InMemoryDataset:
|
338 |
+
return ds.InMemoryDataset(result_table)
|
339 |
+
else:
|
340 |
+
raise TypeError("Unsupported output type")
|
341 |
+
|
342 |
+
|
343 |
+
def _filter_table(table, expression):
|
344 |
+
"""Filter rows of a table based on the provided expression.
|
345 |
+
|
346 |
+
The result will be an output table with only the rows matching
|
347 |
+
the provided expression.
|
348 |
+
|
349 |
+
Parameters
|
350 |
+
----------
|
351 |
+
table : Table or Dataset
|
352 |
+
Table or Dataset that should be filtered.
|
353 |
+
expression : Expression
|
354 |
+
The expression on which rows should be filtered.
|
355 |
+
|
356 |
+
Returns
|
357 |
+
-------
|
358 |
+
Table
|
359 |
+
"""
|
360 |
+
decl = Declaration.from_sequence([
|
361 |
+
Declaration("table_source", options=TableSourceNodeOptions(table)),
|
362 |
+
Declaration("filter", options=FilterNodeOptions(expression))
|
363 |
+
])
|
364 |
+
return decl.to_table(use_threads=True)
|
365 |
+
|
366 |
+
|
367 |
+
def _sort_source(table_or_dataset, sort_keys, output_type=Table, **kwargs):
|
368 |
+
|
369 |
+
if isinstance(table_or_dataset, ds.Dataset):
|
370 |
+
data_source = _dataset_to_decl(table_or_dataset, use_threads=True)
|
371 |
+
else:
|
372 |
+
data_source = Declaration(
|
373 |
+
"table_source", TableSourceNodeOptions(table_or_dataset)
|
374 |
+
)
|
375 |
+
|
376 |
+
order_by = Declaration("order_by", OrderByNodeOptions(sort_keys, **kwargs))
|
377 |
+
|
378 |
+
decl = Declaration.from_sequence([data_source, order_by])
|
379 |
+
result_table = decl.to_table(use_threads=True)
|
380 |
+
|
381 |
+
if output_type == Table:
|
382 |
+
return result_table
|
383 |
+
elif output_type == ds.InMemoryDataset:
|
384 |
+
return ds.InMemoryDataset(result_table)
|
385 |
+
else:
|
386 |
+
raise TypeError("Unsupported output type")
|
387 |
+
|
388 |
+
|
389 |
+
def _group_by(table, aggregates, keys, use_threads=True):
|
390 |
+
|
391 |
+
decl = Declaration.from_sequence([
|
392 |
+
Declaration("table_source", TableSourceNodeOptions(table)),
|
393 |
+
Declaration("aggregate", AggregateNodeOptions(aggregates, keys=keys))
|
394 |
+
])
|
395 |
+
return decl.to_table(use_threads=use_threads)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/compat.pxi
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
|
19 |
+
def encode_file_path(path):
|
20 |
+
if isinstance(path, str):
|
21 |
+
# POSIX systems can handle utf-8. UTF8 is converted to utf16-le in
|
22 |
+
# libarrow
|
23 |
+
encoded_path = path.encode('utf-8')
|
24 |
+
else:
|
25 |
+
encoded_path = path
|
26 |
+
|
27 |
+
# Windows file system requires utf-16le for file names; Arrow C++ libraries
|
28 |
+
# will convert utf8 to utf16
|
29 |
+
return encoded_path
|
30 |
+
|
31 |
+
|
32 |
+
# Starting with Python 3.7, dicts are guaranteed to be insertion-ordered.
|
33 |
+
ordered_dict = dict
|
34 |
+
|
35 |
+
|
36 |
+
try:
|
37 |
+
import cloudpickle as pickle
|
38 |
+
except ImportError:
|
39 |
+
import pickle
|
40 |
+
|
41 |
+
|
42 |
+
def tobytes(o):
|
43 |
+
"""
|
44 |
+
Encode a unicode or bytes string to bytes.
|
45 |
+
|
46 |
+
Parameters
|
47 |
+
----------
|
48 |
+
o : str or bytes
|
49 |
+
Input string.
|
50 |
+
"""
|
51 |
+
if isinstance(o, str):
|
52 |
+
return o.encode('utf8')
|
53 |
+
else:
|
54 |
+
return o
|
55 |
+
|
56 |
+
|
57 |
+
def frombytes(o, *, safe=False):
|
58 |
+
"""
|
59 |
+
Decode the given bytestring to unicode.
|
60 |
+
|
61 |
+
Parameters
|
62 |
+
----------
|
63 |
+
o : bytes-like
|
64 |
+
Input object.
|
65 |
+
safe : bool, default False
|
66 |
+
If true, raise on encoding errors.
|
67 |
+
"""
|
68 |
+
if safe:
|
69 |
+
return o.decode('utf8', errors='replace')
|
70 |
+
else:
|
71 |
+
return o.decode('utf8')
|
llmeval-env/lib/python3.10/site-packages/pyarrow/compute.py
ADDED
@@ -0,0 +1,731 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
from pyarrow._compute import ( # noqa
|
19 |
+
Function,
|
20 |
+
FunctionOptions,
|
21 |
+
FunctionRegistry,
|
22 |
+
HashAggregateFunction,
|
23 |
+
HashAggregateKernel,
|
24 |
+
Kernel,
|
25 |
+
ScalarAggregateFunction,
|
26 |
+
ScalarAggregateKernel,
|
27 |
+
ScalarFunction,
|
28 |
+
ScalarKernel,
|
29 |
+
VectorFunction,
|
30 |
+
VectorKernel,
|
31 |
+
# Option classes
|
32 |
+
ArraySortOptions,
|
33 |
+
AssumeTimezoneOptions,
|
34 |
+
CastOptions,
|
35 |
+
CountOptions,
|
36 |
+
CumulativeOptions,
|
37 |
+
CumulativeSumOptions,
|
38 |
+
DayOfWeekOptions,
|
39 |
+
DictionaryEncodeOptions,
|
40 |
+
RunEndEncodeOptions,
|
41 |
+
ElementWiseAggregateOptions,
|
42 |
+
ExtractRegexOptions,
|
43 |
+
FilterOptions,
|
44 |
+
IndexOptions,
|
45 |
+
JoinOptions,
|
46 |
+
ListSliceOptions,
|
47 |
+
MakeStructOptions,
|
48 |
+
MapLookupOptions,
|
49 |
+
MatchSubstringOptions,
|
50 |
+
ModeOptions,
|
51 |
+
NullOptions,
|
52 |
+
PadOptions,
|
53 |
+
PairwiseOptions,
|
54 |
+
PartitionNthOptions,
|
55 |
+
QuantileOptions,
|
56 |
+
RandomOptions,
|
57 |
+
RankOptions,
|
58 |
+
ReplaceSliceOptions,
|
59 |
+
ReplaceSubstringOptions,
|
60 |
+
RoundBinaryOptions,
|
61 |
+
RoundOptions,
|
62 |
+
RoundTemporalOptions,
|
63 |
+
RoundToMultipleOptions,
|
64 |
+
ScalarAggregateOptions,
|
65 |
+
SelectKOptions,
|
66 |
+
SetLookupOptions,
|
67 |
+
SliceOptions,
|
68 |
+
SortOptions,
|
69 |
+
SplitOptions,
|
70 |
+
SplitPatternOptions,
|
71 |
+
StrftimeOptions,
|
72 |
+
StrptimeOptions,
|
73 |
+
StructFieldOptions,
|
74 |
+
TakeOptions,
|
75 |
+
TDigestOptions,
|
76 |
+
TrimOptions,
|
77 |
+
Utf8NormalizeOptions,
|
78 |
+
VarianceOptions,
|
79 |
+
WeekOptions,
|
80 |
+
# Functions
|
81 |
+
call_function,
|
82 |
+
function_registry,
|
83 |
+
get_function,
|
84 |
+
list_functions,
|
85 |
+
# Udf
|
86 |
+
call_tabular_function,
|
87 |
+
register_scalar_function,
|
88 |
+
register_tabular_function,
|
89 |
+
register_aggregate_function,
|
90 |
+
register_vector_function,
|
91 |
+
UdfContext,
|
92 |
+
# Expressions
|
93 |
+
Expression,
|
94 |
+
)
|
95 |
+
|
96 |
+
from collections import namedtuple
|
97 |
+
import inspect
|
98 |
+
from textwrap import dedent
|
99 |
+
import warnings
|
100 |
+
|
101 |
+
import pyarrow as pa
|
102 |
+
from pyarrow import _compute_docstrings
|
103 |
+
from pyarrow.vendored import docscrape
|
104 |
+
|
105 |
+
|
106 |
+
def _get_arg_names(func):
|
107 |
+
return func._doc.arg_names
|
108 |
+
|
109 |
+
|
110 |
+
_OptionsClassDoc = namedtuple('_OptionsClassDoc', ('params',))
|
111 |
+
|
112 |
+
|
113 |
+
def _scrape_options_class_doc(options_class):
|
114 |
+
if not options_class.__doc__:
|
115 |
+
return None
|
116 |
+
doc = docscrape.NumpyDocString(options_class.__doc__)
|
117 |
+
return _OptionsClassDoc(doc['Parameters'])
|
118 |
+
|
119 |
+
|
120 |
+
def _decorate_compute_function(wrapper, exposed_name, func, options_class):
|
121 |
+
# Decorate the given compute function wrapper with useful metadata
|
122 |
+
# and documentation.
|
123 |
+
cpp_doc = func._doc
|
124 |
+
|
125 |
+
wrapper.__arrow_compute_function__ = dict(
|
126 |
+
name=func.name,
|
127 |
+
arity=func.arity,
|
128 |
+
options_class=cpp_doc.options_class,
|
129 |
+
options_required=cpp_doc.options_required)
|
130 |
+
wrapper.__name__ = exposed_name
|
131 |
+
wrapper.__qualname__ = exposed_name
|
132 |
+
|
133 |
+
doc_pieces = []
|
134 |
+
|
135 |
+
# 1. One-line summary
|
136 |
+
summary = cpp_doc.summary
|
137 |
+
if not summary:
|
138 |
+
arg_str = "arguments" if func.arity > 1 else "argument"
|
139 |
+
summary = ("Call compute function {!r} with the given {}"
|
140 |
+
.format(func.name, arg_str))
|
141 |
+
|
142 |
+
doc_pieces.append(f"{summary}.\n\n")
|
143 |
+
|
144 |
+
# 2. Multi-line description
|
145 |
+
description = cpp_doc.description
|
146 |
+
if description:
|
147 |
+
doc_pieces.append(f"{description}\n\n")
|
148 |
+
|
149 |
+
doc_addition = _compute_docstrings.function_doc_additions.get(func.name)
|
150 |
+
|
151 |
+
# 3. Parameter description
|
152 |
+
doc_pieces.append(dedent("""\
|
153 |
+
Parameters
|
154 |
+
----------
|
155 |
+
"""))
|
156 |
+
|
157 |
+
# 3a. Compute function parameters
|
158 |
+
arg_names = _get_arg_names(func)
|
159 |
+
for arg_name in arg_names:
|
160 |
+
if func.kind in ('vector', 'scalar_aggregate'):
|
161 |
+
arg_type = 'Array-like'
|
162 |
+
else:
|
163 |
+
arg_type = 'Array-like or scalar-like'
|
164 |
+
doc_pieces.append(f"{arg_name} : {arg_type}\n")
|
165 |
+
doc_pieces.append(" Argument to compute function.\n")
|
166 |
+
|
167 |
+
# 3b. Compute function option values
|
168 |
+
if options_class is not None:
|
169 |
+
options_class_doc = _scrape_options_class_doc(options_class)
|
170 |
+
if options_class_doc:
|
171 |
+
for p in options_class_doc.params:
|
172 |
+
doc_pieces.append(f"{p.name} : {p.type}\n")
|
173 |
+
for s in p.desc:
|
174 |
+
doc_pieces.append(f" {s}\n")
|
175 |
+
else:
|
176 |
+
warnings.warn(f"Options class {options_class.__name__} "
|
177 |
+
f"does not have a docstring", RuntimeWarning)
|
178 |
+
options_sig = inspect.signature(options_class)
|
179 |
+
for p in options_sig.parameters.values():
|
180 |
+
doc_pieces.append(dedent("""\
|
181 |
+
{0} : optional
|
182 |
+
Parameter for {1} constructor. Either `options`
|
183 |
+
or `{0}` can be passed, but not both at the same time.
|
184 |
+
""".format(p.name, options_class.__name__)))
|
185 |
+
doc_pieces.append(dedent(f"""\
|
186 |
+
options : pyarrow.compute.{options_class.__name__}, optional
|
187 |
+
Alternative way of passing options.
|
188 |
+
"""))
|
189 |
+
|
190 |
+
doc_pieces.append(dedent("""\
|
191 |
+
memory_pool : pyarrow.MemoryPool, optional
|
192 |
+
If not passed, will allocate memory from the default memory pool.
|
193 |
+
"""))
|
194 |
+
|
195 |
+
# 4. Custom addition (e.g. examples)
|
196 |
+
if doc_addition is not None:
|
197 |
+
doc_pieces.append("\n{}\n".format(dedent(doc_addition).strip("\n")))
|
198 |
+
|
199 |
+
wrapper.__doc__ = "".join(doc_pieces)
|
200 |
+
return wrapper
|
201 |
+
|
202 |
+
|
203 |
+
def _get_options_class(func):
|
204 |
+
class_name = func._doc.options_class
|
205 |
+
if not class_name:
|
206 |
+
return None
|
207 |
+
try:
|
208 |
+
return globals()[class_name]
|
209 |
+
except KeyError:
|
210 |
+
warnings.warn("Python binding for {} not exposed"
|
211 |
+
.format(class_name), RuntimeWarning)
|
212 |
+
return None
|
213 |
+
|
214 |
+
|
215 |
+
def _handle_options(name, options_class, options, args, kwargs):
|
216 |
+
if args or kwargs:
|
217 |
+
if options is not None:
|
218 |
+
raise TypeError(
|
219 |
+
"Function {!r} called with both an 'options' argument "
|
220 |
+
"and additional arguments"
|
221 |
+
.format(name))
|
222 |
+
return options_class(*args, **kwargs)
|
223 |
+
|
224 |
+
if options is not None:
|
225 |
+
if isinstance(options, dict):
|
226 |
+
return options_class(**options)
|
227 |
+
elif isinstance(options, options_class):
|
228 |
+
return options
|
229 |
+
raise TypeError(
|
230 |
+
"Function {!r} expected a {} parameter, got {}"
|
231 |
+
.format(name, options_class, type(options)))
|
232 |
+
|
233 |
+
return None
|
234 |
+
|
235 |
+
|
236 |
+
def _make_generic_wrapper(func_name, func, options_class, arity):
|
237 |
+
if options_class is None:
|
238 |
+
def wrapper(*args, memory_pool=None):
|
239 |
+
if arity is not Ellipsis and len(args) != arity:
|
240 |
+
raise TypeError(
|
241 |
+
f"{func_name} takes {arity} positional argument(s), "
|
242 |
+
f"but {len(args)} were given"
|
243 |
+
)
|
244 |
+
if args and isinstance(args[0], Expression):
|
245 |
+
return Expression._call(func_name, list(args))
|
246 |
+
return func.call(args, None, memory_pool)
|
247 |
+
else:
|
248 |
+
def wrapper(*args, memory_pool=None, options=None, **kwargs):
|
249 |
+
if arity is not Ellipsis:
|
250 |
+
if len(args) < arity:
|
251 |
+
raise TypeError(
|
252 |
+
f"{func_name} takes {arity} positional argument(s), "
|
253 |
+
f"but {len(args)} were given"
|
254 |
+
)
|
255 |
+
option_args = args[arity:]
|
256 |
+
args = args[:arity]
|
257 |
+
else:
|
258 |
+
option_args = ()
|
259 |
+
options = _handle_options(func_name, options_class, options,
|
260 |
+
option_args, kwargs)
|
261 |
+
if args and isinstance(args[0], Expression):
|
262 |
+
return Expression._call(func_name, list(args), options)
|
263 |
+
return func.call(args, options, memory_pool)
|
264 |
+
return wrapper
|
265 |
+
|
266 |
+
|
267 |
+
def _make_signature(arg_names, var_arg_names, options_class):
|
268 |
+
from inspect import Parameter
|
269 |
+
params = []
|
270 |
+
for name in arg_names:
|
271 |
+
params.append(Parameter(name, Parameter.POSITIONAL_ONLY))
|
272 |
+
for name in var_arg_names:
|
273 |
+
params.append(Parameter(name, Parameter.VAR_POSITIONAL))
|
274 |
+
if options_class is not None:
|
275 |
+
options_sig = inspect.signature(options_class)
|
276 |
+
for p in options_sig.parameters.values():
|
277 |
+
assert p.kind in (Parameter.POSITIONAL_OR_KEYWORD,
|
278 |
+
Parameter.KEYWORD_ONLY)
|
279 |
+
if var_arg_names:
|
280 |
+
# Cannot have a positional argument after a *args
|
281 |
+
p = p.replace(kind=Parameter.KEYWORD_ONLY)
|
282 |
+
params.append(p)
|
283 |
+
params.append(Parameter("options", Parameter.KEYWORD_ONLY,
|
284 |
+
default=None))
|
285 |
+
params.append(Parameter("memory_pool", Parameter.KEYWORD_ONLY,
|
286 |
+
default=None))
|
287 |
+
return inspect.Signature(params)
|
288 |
+
|
289 |
+
|
290 |
+
def _wrap_function(name, func):
|
291 |
+
options_class = _get_options_class(func)
|
292 |
+
arg_names = _get_arg_names(func)
|
293 |
+
has_vararg = arg_names and arg_names[-1].startswith('*')
|
294 |
+
if has_vararg:
|
295 |
+
var_arg_names = [arg_names.pop().lstrip('*')]
|
296 |
+
else:
|
297 |
+
var_arg_names = []
|
298 |
+
|
299 |
+
wrapper = _make_generic_wrapper(
|
300 |
+
name, func, options_class, arity=func.arity)
|
301 |
+
wrapper.__signature__ = _make_signature(arg_names, var_arg_names,
|
302 |
+
options_class)
|
303 |
+
return _decorate_compute_function(wrapper, name, func, options_class)
|
304 |
+
|
305 |
+
|
306 |
+
def _make_global_functions():
|
307 |
+
"""
|
308 |
+
Make global functions wrapping each compute function.
|
309 |
+
|
310 |
+
Note that some of the automatically-generated wrappers may be overridden
|
311 |
+
by custom versions below.
|
312 |
+
"""
|
313 |
+
g = globals()
|
314 |
+
reg = function_registry()
|
315 |
+
|
316 |
+
# Avoid clashes with Python keywords
|
317 |
+
rewrites = {'and': 'and_',
|
318 |
+
'or': 'or_'}
|
319 |
+
|
320 |
+
for cpp_name in reg.list_functions():
|
321 |
+
name = rewrites.get(cpp_name, cpp_name)
|
322 |
+
func = reg.get_function(cpp_name)
|
323 |
+
if func.kind == "hash_aggregate":
|
324 |
+
# Hash aggregate functions are not callable,
|
325 |
+
# so let's not expose them at module level.
|
326 |
+
continue
|
327 |
+
if func.kind == "scalar_aggregate" and func.arity == 0:
|
328 |
+
# Nullary scalar aggregate functions are not callable
|
329 |
+
# directly so let's not expose them at module level.
|
330 |
+
continue
|
331 |
+
assert name not in g, name
|
332 |
+
g[cpp_name] = g[name] = _wrap_function(name, func)
|
333 |
+
|
334 |
+
|
335 |
+
_make_global_functions()
|
336 |
+
|
337 |
+
|
338 |
+
def cast(arr, target_type=None, safe=None, options=None, memory_pool=None):
|
339 |
+
"""
|
340 |
+
Cast array values to another data type. Can also be invoked as an array
|
341 |
+
instance method.
|
342 |
+
|
343 |
+
Parameters
|
344 |
+
----------
|
345 |
+
arr : Array-like
|
346 |
+
target_type : DataType or str
|
347 |
+
Type to cast to
|
348 |
+
safe : bool, default True
|
349 |
+
Check for overflows or other unsafe conversions
|
350 |
+
options : CastOptions, default None
|
351 |
+
Additional checks pass by CastOptions
|
352 |
+
memory_pool : MemoryPool, optional
|
353 |
+
memory pool to use for allocations during function execution.
|
354 |
+
|
355 |
+
Examples
|
356 |
+
--------
|
357 |
+
>>> from datetime import datetime
|
358 |
+
>>> import pyarrow as pa
|
359 |
+
>>> arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)])
|
360 |
+
>>> arr.type
|
361 |
+
TimestampType(timestamp[us])
|
362 |
+
|
363 |
+
You can use ``pyarrow.DataType`` objects to specify the target type:
|
364 |
+
|
365 |
+
>>> cast(arr, pa.timestamp('ms'))
|
366 |
+
<pyarrow.lib.TimestampArray object at ...>
|
367 |
+
[
|
368 |
+
2010-01-01 00:00:00.000,
|
369 |
+
2015-01-01 00:00:00.000
|
370 |
+
]
|
371 |
+
|
372 |
+
>>> cast(arr, pa.timestamp('ms')).type
|
373 |
+
TimestampType(timestamp[ms])
|
374 |
+
|
375 |
+
Alternatively, it is also supported to use the string aliases for these
|
376 |
+
types:
|
377 |
+
|
378 |
+
>>> arr.cast('timestamp[ms]')
|
379 |
+
<pyarrow.lib.TimestampArray object at ...>
|
380 |
+
[
|
381 |
+
2010-01-01 00:00:00.000,
|
382 |
+
2015-01-01 00:00:00.000
|
383 |
+
]
|
384 |
+
>>> arr.cast('timestamp[ms]').type
|
385 |
+
TimestampType(timestamp[ms])
|
386 |
+
|
387 |
+
Returns
|
388 |
+
-------
|
389 |
+
casted : Array
|
390 |
+
The cast result as a new Array
|
391 |
+
"""
|
392 |
+
safe_vars_passed = (safe is not None) or (target_type is not None)
|
393 |
+
|
394 |
+
if safe_vars_passed and (options is not None):
|
395 |
+
raise ValueError("Must either pass values for 'target_type' and 'safe'"
|
396 |
+
" or pass a value for 'options'")
|
397 |
+
|
398 |
+
if options is None:
|
399 |
+
target_type = pa.types.lib.ensure_type(target_type)
|
400 |
+
if safe is False:
|
401 |
+
options = CastOptions.unsafe(target_type)
|
402 |
+
else:
|
403 |
+
options = CastOptions.safe(target_type)
|
404 |
+
return call_function("cast", [arr], options, memory_pool)
|
405 |
+
|
406 |
+
|
407 |
+
def index(data, value, start=None, end=None, *, memory_pool=None):
|
408 |
+
"""
|
409 |
+
Find the index of the first occurrence of a given value.
|
410 |
+
|
411 |
+
Parameters
|
412 |
+
----------
|
413 |
+
data : Array-like
|
414 |
+
value : Scalar-like object
|
415 |
+
The value to search for.
|
416 |
+
start : int, optional
|
417 |
+
end : int, optional
|
418 |
+
memory_pool : MemoryPool, optional
|
419 |
+
If not passed, will allocate memory from the default memory pool.
|
420 |
+
|
421 |
+
Returns
|
422 |
+
-------
|
423 |
+
index : int
|
424 |
+
the index, or -1 if not found
|
425 |
+
"""
|
426 |
+
if start is not None:
|
427 |
+
if end is not None:
|
428 |
+
data = data.slice(start, end - start)
|
429 |
+
else:
|
430 |
+
data = data.slice(start)
|
431 |
+
elif end is not None:
|
432 |
+
data = data.slice(0, end)
|
433 |
+
|
434 |
+
if not isinstance(value, pa.Scalar):
|
435 |
+
value = pa.scalar(value, type=data.type)
|
436 |
+
elif data.type != value.type:
|
437 |
+
value = pa.scalar(value.as_py(), type=data.type)
|
438 |
+
options = IndexOptions(value=value)
|
439 |
+
result = call_function('index', [data], options, memory_pool)
|
440 |
+
if start is not None and result.as_py() >= 0:
|
441 |
+
result = pa.scalar(result.as_py() + start, type=pa.int64())
|
442 |
+
return result
|
443 |
+
|
444 |
+
|
445 |
+
def take(data, indices, *, boundscheck=True, memory_pool=None):
|
446 |
+
"""
|
447 |
+
Select values (or records) from array- or table-like data given integer
|
448 |
+
selection indices.
|
449 |
+
|
450 |
+
The result will be of the same type(s) as the input, with elements taken
|
451 |
+
from the input array (or record batch / table fields) at the given
|
452 |
+
indices. If an index is null then the corresponding value in the output
|
453 |
+
will be null.
|
454 |
+
|
455 |
+
Parameters
|
456 |
+
----------
|
457 |
+
data : Array, ChunkedArray, RecordBatch, or Table
|
458 |
+
indices : Array, ChunkedArray
|
459 |
+
Must be of integer type
|
460 |
+
boundscheck : boolean, default True
|
461 |
+
Whether to boundscheck the indices. If False and there is an out of
|
462 |
+
bounds index, will likely cause the process to crash.
|
463 |
+
memory_pool : MemoryPool, optional
|
464 |
+
If not passed, will allocate memory from the default memory pool.
|
465 |
+
|
466 |
+
Returns
|
467 |
+
-------
|
468 |
+
result : depends on inputs
|
469 |
+
Selected values for the given indices
|
470 |
+
|
471 |
+
Examples
|
472 |
+
--------
|
473 |
+
>>> import pyarrow as pa
|
474 |
+
>>> arr = pa.array(["a", "b", "c", None, "e", "f"])
|
475 |
+
>>> indices = pa.array([0, None, 4, 3])
|
476 |
+
>>> arr.take(indices)
|
477 |
+
<pyarrow.lib.StringArray object at ...>
|
478 |
+
[
|
479 |
+
"a",
|
480 |
+
null,
|
481 |
+
"e",
|
482 |
+
null
|
483 |
+
]
|
484 |
+
"""
|
485 |
+
options = TakeOptions(boundscheck=boundscheck)
|
486 |
+
return call_function('take', [data, indices], options, memory_pool)
|
487 |
+
|
488 |
+
|
489 |
+
def fill_null(values, fill_value):
|
490 |
+
"""Replace each null element in values with a corresponding
|
491 |
+
element from fill_value.
|
492 |
+
|
493 |
+
If fill_value is scalar-like, then every null element in values
|
494 |
+
will be replaced with fill_value. If fill_value is array-like,
|
495 |
+
then the i-th element in values will be replaced with the i-th
|
496 |
+
element in fill_value.
|
497 |
+
|
498 |
+
The fill_value's type must be the same as that of values, or it
|
499 |
+
must be able to be implicitly casted to the array's type.
|
500 |
+
|
501 |
+
This is an alias for :func:`coalesce`.
|
502 |
+
|
503 |
+
Parameters
|
504 |
+
----------
|
505 |
+
values : Array, ChunkedArray, or Scalar-like object
|
506 |
+
Each null element is replaced with the corresponding value
|
507 |
+
from fill_value.
|
508 |
+
fill_value : Array, ChunkedArray, or Scalar-like object
|
509 |
+
If not same type as values, will attempt to cast.
|
510 |
+
|
511 |
+
Returns
|
512 |
+
-------
|
513 |
+
result : depends on inputs
|
514 |
+
Values with all null elements replaced
|
515 |
+
|
516 |
+
Examples
|
517 |
+
--------
|
518 |
+
>>> import pyarrow as pa
|
519 |
+
>>> arr = pa.array([1, 2, None, 3], type=pa.int8())
|
520 |
+
>>> fill_value = pa.scalar(5, type=pa.int8())
|
521 |
+
>>> arr.fill_null(fill_value)
|
522 |
+
<pyarrow.lib.Int8Array object at ...>
|
523 |
+
[
|
524 |
+
1,
|
525 |
+
2,
|
526 |
+
5,
|
527 |
+
3
|
528 |
+
]
|
529 |
+
>>> arr = pa.array([1, 2, None, 4, None])
|
530 |
+
>>> arr.fill_null(pa.array([10, 20, 30, 40, 50]))
|
531 |
+
<pyarrow.lib.Int64Array object at ...>
|
532 |
+
[
|
533 |
+
1,
|
534 |
+
2,
|
535 |
+
30,
|
536 |
+
4,
|
537 |
+
50
|
538 |
+
]
|
539 |
+
"""
|
540 |
+
if not isinstance(fill_value, (pa.Array, pa.ChunkedArray, pa.Scalar)):
|
541 |
+
fill_value = pa.scalar(fill_value, type=values.type)
|
542 |
+
elif values.type != fill_value.type:
|
543 |
+
fill_value = pa.scalar(fill_value.as_py(), type=values.type)
|
544 |
+
|
545 |
+
return call_function("coalesce", [values, fill_value])
|
546 |
+
|
547 |
+
|
548 |
+
def top_k_unstable(values, k, sort_keys=None, *, memory_pool=None):
|
549 |
+
"""
|
550 |
+
Select the indices of the top-k ordered elements from array- or table-like
|
551 |
+
data.
|
552 |
+
|
553 |
+
This is a specialization for :func:`select_k_unstable`. Output is not
|
554 |
+
guaranteed to be stable.
|
555 |
+
|
556 |
+
Parameters
|
557 |
+
----------
|
558 |
+
values : Array, ChunkedArray, RecordBatch, or Table
|
559 |
+
Data to sort and get top indices from.
|
560 |
+
k : int
|
561 |
+
The number of `k` elements to keep.
|
562 |
+
sort_keys : List-like
|
563 |
+
Column key names to order by when input is table-like data.
|
564 |
+
memory_pool : MemoryPool, optional
|
565 |
+
If not passed, will allocate memory from the default memory pool.
|
566 |
+
|
567 |
+
Returns
|
568 |
+
-------
|
569 |
+
result : Array
|
570 |
+
Indices of the top-k ordered elements
|
571 |
+
|
572 |
+
Examples
|
573 |
+
--------
|
574 |
+
>>> import pyarrow as pa
|
575 |
+
>>> import pyarrow.compute as pc
|
576 |
+
>>> arr = pa.array(["a", "b", "c", None, "e", "f"])
|
577 |
+
>>> pc.top_k_unstable(arr, k=3)
|
578 |
+
<pyarrow.lib.UInt64Array object at ...>
|
579 |
+
[
|
580 |
+
5,
|
581 |
+
4,
|
582 |
+
2
|
583 |
+
]
|
584 |
+
"""
|
585 |
+
if sort_keys is None:
|
586 |
+
sort_keys = []
|
587 |
+
if isinstance(values, (pa.Array, pa.ChunkedArray)):
|
588 |
+
sort_keys.append(("dummy", "descending"))
|
589 |
+
else:
|
590 |
+
sort_keys = map(lambda key_name: (key_name, "descending"), sort_keys)
|
591 |
+
options = SelectKOptions(k, sort_keys)
|
592 |
+
return call_function("select_k_unstable", [values], options, memory_pool)
|
593 |
+
|
594 |
+
|
595 |
+
def bottom_k_unstable(values, k, sort_keys=None, *, memory_pool=None):
|
596 |
+
"""
|
597 |
+
Select the indices of the bottom-k ordered elements from
|
598 |
+
array- or table-like data.
|
599 |
+
|
600 |
+
This is a specialization for :func:`select_k_unstable`. Output is not
|
601 |
+
guaranteed to be stable.
|
602 |
+
|
603 |
+
Parameters
|
604 |
+
----------
|
605 |
+
values : Array, ChunkedArray, RecordBatch, or Table
|
606 |
+
Data to sort and get bottom indices from.
|
607 |
+
k : int
|
608 |
+
The number of `k` elements to keep.
|
609 |
+
sort_keys : List-like
|
610 |
+
Column key names to order by when input is table-like data.
|
611 |
+
memory_pool : MemoryPool, optional
|
612 |
+
If not passed, will allocate memory from the default memory pool.
|
613 |
+
|
614 |
+
Returns
|
615 |
+
-------
|
616 |
+
result : Array of indices
|
617 |
+
Indices of the bottom-k ordered elements
|
618 |
+
|
619 |
+
Examples
|
620 |
+
--------
|
621 |
+
>>> import pyarrow as pa
|
622 |
+
>>> import pyarrow.compute as pc
|
623 |
+
>>> arr = pa.array(["a", "b", "c", None, "e", "f"])
|
624 |
+
>>> pc.bottom_k_unstable(arr, k=3)
|
625 |
+
<pyarrow.lib.UInt64Array object at ...>
|
626 |
+
[
|
627 |
+
0,
|
628 |
+
1,
|
629 |
+
2
|
630 |
+
]
|
631 |
+
"""
|
632 |
+
if sort_keys is None:
|
633 |
+
sort_keys = []
|
634 |
+
if isinstance(values, (pa.Array, pa.ChunkedArray)):
|
635 |
+
sort_keys.append(("dummy", "ascending"))
|
636 |
+
else:
|
637 |
+
sort_keys = map(lambda key_name: (key_name, "ascending"), sort_keys)
|
638 |
+
options = SelectKOptions(k, sort_keys)
|
639 |
+
return call_function("select_k_unstable", [values], options, memory_pool)
|
640 |
+
|
641 |
+
|
642 |
+
def random(n, *, initializer='system', options=None, memory_pool=None):
|
643 |
+
"""
|
644 |
+
Generate numbers in the range [0, 1).
|
645 |
+
|
646 |
+
Generated values are uniformly-distributed, double-precision
|
647 |
+
in range [0, 1). Algorithm and seed can be changed via RandomOptions.
|
648 |
+
|
649 |
+
Parameters
|
650 |
+
----------
|
651 |
+
n : int
|
652 |
+
Number of values to generate, must be greater than or equal to 0
|
653 |
+
initializer : int or str
|
654 |
+
How to initialize the underlying random generator.
|
655 |
+
If an integer is given, it is used as a seed.
|
656 |
+
If "system" is given, the random generator is initialized with
|
657 |
+
a system-specific source of (hopefully true) randomness.
|
658 |
+
Other values are invalid.
|
659 |
+
options : pyarrow.compute.RandomOptions, optional
|
660 |
+
Alternative way of passing options.
|
661 |
+
memory_pool : pyarrow.MemoryPool, optional
|
662 |
+
If not passed, will allocate memory from the default memory pool.
|
663 |
+
"""
|
664 |
+
options = RandomOptions(initializer=initializer)
|
665 |
+
return call_function("random", [], options, memory_pool, length=n)
|
666 |
+
|
667 |
+
|
668 |
+
def field(*name_or_index):
|
669 |
+
"""Reference a column of the dataset.
|
670 |
+
|
671 |
+
Stores only the field's name. Type and other information is known only when
|
672 |
+
the expression is bound to a dataset having an explicit scheme.
|
673 |
+
|
674 |
+
Nested references are allowed by passing multiple names or a tuple of
|
675 |
+
names. For example ``('foo', 'bar')`` references the field named "bar"
|
676 |
+
inside the field named "foo".
|
677 |
+
|
678 |
+
Parameters
|
679 |
+
----------
|
680 |
+
*name_or_index : string, multiple strings, tuple or int
|
681 |
+
The name or index of the (possibly nested) field the expression
|
682 |
+
references to.
|
683 |
+
|
684 |
+
Returns
|
685 |
+
-------
|
686 |
+
field_expr : Expression
|
687 |
+
Reference to the given field
|
688 |
+
|
689 |
+
Examples
|
690 |
+
--------
|
691 |
+
>>> import pyarrow.compute as pc
|
692 |
+
>>> pc.field("a")
|
693 |
+
<pyarrow.compute.Expression a>
|
694 |
+
>>> pc.field(1)
|
695 |
+
<pyarrow.compute.Expression FieldPath(1)>
|
696 |
+
>>> pc.field(("a", "b"))
|
697 |
+
<pyarrow.compute.Expression FieldRef.Nested(FieldRef.Name(a) ...
|
698 |
+
>>> pc.field("a", "b")
|
699 |
+
<pyarrow.compute.Expression FieldRef.Nested(FieldRef.Name(a) ...
|
700 |
+
"""
|
701 |
+
n = len(name_or_index)
|
702 |
+
if n == 1:
|
703 |
+
if isinstance(name_or_index[0], (str, int)):
|
704 |
+
return Expression._field(name_or_index[0])
|
705 |
+
elif isinstance(name_or_index[0], tuple):
|
706 |
+
return Expression._nested_field(name_or_index[0])
|
707 |
+
else:
|
708 |
+
raise TypeError(
|
709 |
+
"field reference should be str, multiple str, tuple or "
|
710 |
+
f"integer, got {type(name_or_index[0])}"
|
711 |
+
)
|
712 |
+
# In case of multiple strings not supplied in a tuple
|
713 |
+
else:
|
714 |
+
return Expression._nested_field(name_or_index)
|
715 |
+
|
716 |
+
|
717 |
+
def scalar(value):
|
718 |
+
"""Expression representing a scalar value.
|
719 |
+
|
720 |
+
Parameters
|
721 |
+
----------
|
722 |
+
value : bool, int, float or string
|
723 |
+
Python value of the scalar. Note that only a subset of types are
|
724 |
+
currently supported.
|
725 |
+
|
726 |
+
Returns
|
727 |
+
-------
|
728 |
+
scalar_expr : Expression
|
729 |
+
An Expression representing the scalar value
|
730 |
+
"""
|
731 |
+
return Expression._scalar(value)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/feather.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
|
19 |
+
import os
|
20 |
+
|
21 |
+
from pyarrow.pandas_compat import _pandas_api # noqa
|
22 |
+
from pyarrow.lib import (Codec, Table, # noqa
|
23 |
+
concat_tables, schema)
|
24 |
+
import pyarrow.lib as ext
|
25 |
+
from pyarrow import _feather
|
26 |
+
from pyarrow._feather import FeatherError # noqa: F401
|
27 |
+
|
28 |
+
|
29 |
+
class FeatherDataset:
|
30 |
+
"""
|
31 |
+
Encapsulates details of reading a list of Feather files.
|
32 |
+
|
33 |
+
Parameters
|
34 |
+
----------
|
35 |
+
path_or_paths : List[str]
|
36 |
+
A list of file names
|
37 |
+
validate_schema : bool, default True
|
38 |
+
Check that individual file schemas are all the same / compatible
|
39 |
+
"""
|
40 |
+
|
41 |
+
def __init__(self, path_or_paths, validate_schema=True):
|
42 |
+
self.paths = path_or_paths
|
43 |
+
self.validate_schema = validate_schema
|
44 |
+
|
45 |
+
def read_table(self, columns=None):
|
46 |
+
"""
|
47 |
+
Read multiple feather files as a single pyarrow.Table
|
48 |
+
|
49 |
+
Parameters
|
50 |
+
----------
|
51 |
+
columns : List[str]
|
52 |
+
Names of columns to read from the file
|
53 |
+
|
54 |
+
Returns
|
55 |
+
-------
|
56 |
+
pyarrow.Table
|
57 |
+
Content of the file as a table (of columns)
|
58 |
+
"""
|
59 |
+
_fil = read_table(self.paths[0], columns=columns)
|
60 |
+
self._tables = [_fil]
|
61 |
+
self.schema = _fil.schema
|
62 |
+
|
63 |
+
for path in self.paths[1:]:
|
64 |
+
table = read_table(path, columns=columns)
|
65 |
+
if self.validate_schema:
|
66 |
+
self.validate_schemas(path, table)
|
67 |
+
self._tables.append(table)
|
68 |
+
return concat_tables(self._tables)
|
69 |
+
|
70 |
+
def validate_schemas(self, piece, table):
|
71 |
+
if not self.schema.equals(table.schema):
|
72 |
+
raise ValueError('Schema in {!s} was different. \n'
|
73 |
+
'{!s}\n\nvs\n\n{!s}'
|
74 |
+
.format(piece, self.schema,
|
75 |
+
table.schema))
|
76 |
+
|
77 |
+
def read_pandas(self, columns=None, use_threads=True):
|
78 |
+
"""
|
79 |
+
Read multiple Parquet files as a single pandas DataFrame
|
80 |
+
|
81 |
+
Parameters
|
82 |
+
----------
|
83 |
+
columns : List[str]
|
84 |
+
Names of columns to read from the file
|
85 |
+
use_threads : bool, default True
|
86 |
+
Use multiple threads when converting to pandas
|
87 |
+
|
88 |
+
Returns
|
89 |
+
-------
|
90 |
+
pandas.DataFrame
|
91 |
+
Content of the file as a pandas DataFrame (of columns)
|
92 |
+
"""
|
93 |
+
return self.read_table(columns=columns).to_pandas(
|
94 |
+
use_threads=use_threads)
|
95 |
+
|
96 |
+
|
97 |
+
def check_chunked_overflow(name, col):
|
98 |
+
if col.num_chunks == 1:
|
99 |
+
return
|
100 |
+
|
101 |
+
if col.type in (ext.binary(), ext.string()):
|
102 |
+
raise ValueError("Column '{}' exceeds 2GB maximum capacity of "
|
103 |
+
"a Feather binary column. This restriction may be "
|
104 |
+
"lifted in the future".format(name))
|
105 |
+
else:
|
106 |
+
# TODO(wesm): Not sure when else this might be reached
|
107 |
+
raise ValueError("Column '{}' of type {} was chunked on conversion "
|
108 |
+
"to Arrow and cannot be currently written to "
|
109 |
+
"Feather format".format(name, str(col.type)))
|
110 |
+
|
111 |
+
|
112 |
+
_FEATHER_SUPPORTED_CODECS = {'lz4', 'zstd', 'uncompressed'}
|
113 |
+
|
114 |
+
|
115 |
+
def write_feather(df, dest, compression=None, compression_level=None,
|
116 |
+
chunksize=None, version=2):
|
117 |
+
"""
|
118 |
+
Write a pandas.DataFrame to Feather format.
|
119 |
+
|
120 |
+
Parameters
|
121 |
+
----------
|
122 |
+
df : pandas.DataFrame or pyarrow.Table
|
123 |
+
Data to write out as Feather format.
|
124 |
+
dest : str
|
125 |
+
Local destination path.
|
126 |
+
compression : string, default None
|
127 |
+
Can be one of {"zstd", "lz4", "uncompressed"}. The default of None uses
|
128 |
+
LZ4 for V2 files if it is available, otherwise uncompressed.
|
129 |
+
compression_level : int, default None
|
130 |
+
Use a compression level particular to the chosen compressor. If None
|
131 |
+
use the default compression level
|
132 |
+
chunksize : int, default None
|
133 |
+
For V2 files, the internal maximum size of Arrow RecordBatch chunks
|
134 |
+
when writing the Arrow IPC file format. None means use the default,
|
135 |
+
which is currently 64K
|
136 |
+
version : int, default 2
|
137 |
+
Feather file version. Version 2 is the current. Version 1 is the more
|
138 |
+
limited legacy format
|
139 |
+
"""
|
140 |
+
if _pandas_api.have_pandas:
|
141 |
+
if (_pandas_api.has_sparse and
|
142 |
+
isinstance(df, _pandas_api.pd.SparseDataFrame)):
|
143 |
+
df = df.to_dense()
|
144 |
+
|
145 |
+
if _pandas_api.is_data_frame(df):
|
146 |
+
# Feather v1 creates a new column in the resultant Table to
|
147 |
+
# store index information if index type is not RangeIndex
|
148 |
+
|
149 |
+
if version == 1:
|
150 |
+
preserve_index = False
|
151 |
+
elif version == 2:
|
152 |
+
preserve_index = None
|
153 |
+
else:
|
154 |
+
raise ValueError("Version value should either be 1 or 2")
|
155 |
+
|
156 |
+
table = Table.from_pandas(df, preserve_index=preserve_index)
|
157 |
+
|
158 |
+
if version == 1:
|
159 |
+
# Version 1 does not chunking
|
160 |
+
for i, name in enumerate(table.schema.names):
|
161 |
+
col = table[i]
|
162 |
+
check_chunked_overflow(name, col)
|
163 |
+
else:
|
164 |
+
table = df
|
165 |
+
|
166 |
+
if version == 1:
|
167 |
+
if len(table.column_names) > len(set(table.column_names)):
|
168 |
+
raise ValueError("cannot serialize duplicate column names")
|
169 |
+
|
170 |
+
if compression is not None:
|
171 |
+
raise ValueError("Feather V1 files do not support compression "
|
172 |
+
"option")
|
173 |
+
|
174 |
+
if chunksize is not None:
|
175 |
+
raise ValueError("Feather V1 files do not support chunksize "
|
176 |
+
"option")
|
177 |
+
else:
|
178 |
+
if compression is None and Codec.is_available('lz4_frame'):
|
179 |
+
compression = 'lz4'
|
180 |
+
elif (compression is not None and
|
181 |
+
compression not in _FEATHER_SUPPORTED_CODECS):
|
182 |
+
raise ValueError('compression="{}" not supported, must be '
|
183 |
+
'one of {}'.format(compression,
|
184 |
+
_FEATHER_SUPPORTED_CODECS))
|
185 |
+
|
186 |
+
try:
|
187 |
+
_feather.write_feather(table, dest, compression=compression,
|
188 |
+
compression_level=compression_level,
|
189 |
+
chunksize=chunksize, version=version)
|
190 |
+
except Exception:
|
191 |
+
if isinstance(dest, str):
|
192 |
+
try:
|
193 |
+
os.remove(dest)
|
194 |
+
except os.error:
|
195 |
+
pass
|
196 |
+
raise
|
197 |
+
|
198 |
+
|
199 |
+
def read_feather(source, columns=None, use_threads=True,
|
200 |
+
memory_map=False, **kwargs):
|
201 |
+
"""
|
202 |
+
Read a pandas.DataFrame from Feather format. To read as pyarrow.Table use
|
203 |
+
feather.read_table.
|
204 |
+
|
205 |
+
Parameters
|
206 |
+
----------
|
207 |
+
source : str file path, or file-like object
|
208 |
+
You can use MemoryMappedFile as source, for explicitly use memory map.
|
209 |
+
columns : sequence, optional
|
210 |
+
Only read a specific set of columns. If not provided, all columns are
|
211 |
+
read.
|
212 |
+
use_threads : bool, default True
|
213 |
+
Whether to parallelize reading using multiple threads. If false the
|
214 |
+
restriction is used in the conversion to Pandas as well as in the
|
215 |
+
reading from Feather format.
|
216 |
+
memory_map : boolean, default False
|
217 |
+
Use memory mapping when opening file on disk, when source is a str.
|
218 |
+
**kwargs
|
219 |
+
Additional keyword arguments passed on to `pyarrow.Table.to_pandas`.
|
220 |
+
|
221 |
+
Returns
|
222 |
+
-------
|
223 |
+
df : pandas.DataFrame
|
224 |
+
The contents of the Feather file as a pandas.DataFrame
|
225 |
+
"""
|
226 |
+
return (read_table(
|
227 |
+
source, columns=columns, memory_map=memory_map,
|
228 |
+
use_threads=use_threads).to_pandas(use_threads=use_threads, **kwargs))
|
229 |
+
|
230 |
+
|
231 |
+
def read_table(source, columns=None, memory_map=False, use_threads=True):
|
232 |
+
"""
|
233 |
+
Read a pyarrow.Table from Feather format
|
234 |
+
|
235 |
+
Parameters
|
236 |
+
----------
|
237 |
+
source : str file path, or file-like object
|
238 |
+
You can use MemoryMappedFile as source, for explicitly use memory map.
|
239 |
+
columns : sequence, optional
|
240 |
+
Only read a specific set of columns. If not provided, all columns are
|
241 |
+
read.
|
242 |
+
memory_map : boolean, default False
|
243 |
+
Use memory mapping when opening file on disk, when source is a str
|
244 |
+
use_threads : bool, default True
|
245 |
+
Whether to parallelize reading using multiple threads.
|
246 |
+
|
247 |
+
Returns
|
248 |
+
-------
|
249 |
+
table : pyarrow.Table
|
250 |
+
The contents of the Feather file as a pyarrow.Table
|
251 |
+
"""
|
252 |
+
reader = _feather.FeatherReader(
|
253 |
+
source, use_memory_map=memory_map, use_threads=use_threads)
|
254 |
+
|
255 |
+
if columns is None:
|
256 |
+
return reader.read()
|
257 |
+
|
258 |
+
column_types = [type(column) for column in columns]
|
259 |
+
if all(map(lambda t: t == int, column_types)):
|
260 |
+
table = reader.read_indices(columns)
|
261 |
+
elif all(map(lambda t: t == str, column_types)):
|
262 |
+
table = reader.read_names(columns)
|
263 |
+
else:
|
264 |
+
column_type_names = [t.__name__ for t in column_types]
|
265 |
+
raise TypeError("Columns must be indices or names. "
|
266 |
+
"Got columns {} of types {}"
|
267 |
+
.format(columns, column_type_names))
|
268 |
+
|
269 |
+
# Feather v1 already respects the column selection
|
270 |
+
if reader.version < 3:
|
271 |
+
return table
|
272 |
+
# Feather v2 reads with sorted / deduplicated selection
|
273 |
+
elif sorted(set(columns)) == columns:
|
274 |
+
return table
|
275 |
+
else:
|
276 |
+
# follow exact order / selection of names
|
277 |
+
return table.select(columns)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/api.h
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Coarse public API while the library is in development
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include "arrow/array.h" // IWYU pragma: export
|
23 |
+
#include "arrow/array/array_run_end.h" // IWYU pragma: export
|
24 |
+
#include "arrow/array/concatenate.h" // IWYU pragma: export
|
25 |
+
#include "arrow/buffer.h" // IWYU pragma: export
|
26 |
+
#include "arrow/builder.h" // IWYU pragma: export
|
27 |
+
#include "arrow/chunked_array.h" // IWYU pragma: export
|
28 |
+
#include "arrow/compare.h" // IWYU pragma: export
|
29 |
+
#include "arrow/config.h" // IWYU pragma: export
|
30 |
+
#include "arrow/datum.h" // IWYU pragma: export
|
31 |
+
#include "arrow/extension_type.h" // IWYU pragma: export
|
32 |
+
#include "arrow/memory_pool.h" // IWYU pragma: export
|
33 |
+
#include "arrow/pretty_print.h" // IWYU pragma: export
|
34 |
+
#include "arrow/record_batch.h" // IWYU pragma: export
|
35 |
+
#include "arrow/result.h" // IWYU pragma: export
|
36 |
+
#include "arrow/status.h" // IWYU pragma: export
|
37 |
+
#include "arrow/table.h" // IWYU pragma: export
|
38 |
+
#include "arrow/table_builder.h" // IWYU pragma: export
|
39 |
+
#include "arrow/tensor.h" // IWYU pragma: export
|
40 |
+
#include "arrow/type.h" // IWYU pragma: export
|
41 |
+
#include "arrow/util/key_value_metadata.h" // IWYU pragma: export
|
42 |
+
#include "arrow/visit_array_inline.h" // IWYU pragma: export
|
43 |
+
#include "arrow/visit_scalar_inline.h" // IWYU pragma: export
|
44 |
+
#include "arrow/visitor.h" // IWYU pragma: export
|
45 |
+
|
46 |
+
/// \brief Top-level namespace for Apache Arrow C++ API
|
47 |
+
namespace arrow {}
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array.h
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Kitchen-sink public API for arrow::Array data structures. C++ library code
|
19 |
+
// (especially header files) in Apache Arrow should use more specific headers
|
20 |
+
// unless it's a file that uses most or all Array types in which case using
|
21 |
+
// arrow/array.h is fine.
|
22 |
+
|
23 |
+
#pragma once
|
24 |
+
|
25 |
+
/// \defgroup numeric-arrays Concrete classes for numeric arrays
|
26 |
+
/// @{
|
27 |
+
/// @}
|
28 |
+
|
29 |
+
/// \defgroup binary-arrays Concrete classes for binary/string arrays
|
30 |
+
/// @{
|
31 |
+
/// @}
|
32 |
+
|
33 |
+
/// \defgroup nested-arrays Concrete classes for nested arrays
|
34 |
+
/// @{
|
35 |
+
/// @}
|
36 |
+
|
37 |
+
/// \defgroup run-end-encoded-arrays Concrete classes for run-end encoded arrays
|
38 |
+
/// @{
|
39 |
+
/// @}
|
40 |
+
|
41 |
+
#include "arrow/array/array_base.h" // IWYU pragma: keep
|
42 |
+
#include "arrow/array/array_binary.h" // IWYU pragma: keep
|
43 |
+
#include "arrow/array/array_decimal.h" // IWYU pragma: keep
|
44 |
+
#include "arrow/array/array_dict.h" // IWYU pragma: keep
|
45 |
+
#include "arrow/array/array_nested.h" // IWYU pragma: keep
|
46 |
+
#include "arrow/array/array_primitive.h" // IWYU pragma: keep
|
47 |
+
#include "arrow/array/array_run_end.h" // IWYU pragma: keep
|
48 |
+
#include "arrow/array/data.h" // IWYU pragma: keep
|
49 |
+
#include "arrow/array/util.h" // IWYU pragma: keep
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h
ADDED
@@ -0,0 +1,587 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <cstring>
|
22 |
+
#include <memory>
|
23 |
+
#include <optional>
|
24 |
+
#include <string>
|
25 |
+
#include <string_view>
|
26 |
+
#include <utility>
|
27 |
+
#include <vector>
|
28 |
+
|
29 |
+
#include "arrow/device.h"
|
30 |
+
#include "arrow/status.h"
|
31 |
+
#include "arrow/type_fwd.h"
|
32 |
+
#include "arrow/util/macros.h"
|
33 |
+
#include "arrow/util/span.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
|
38 |
+
// ----------------------------------------------------------------------
|
39 |
+
// Buffer classes
|
40 |
+
|
41 |
+
/// \class Buffer
|
42 |
+
/// \brief Object containing a pointer to a piece of contiguous memory with a
|
43 |
+
/// particular size.
|
44 |
+
///
|
45 |
+
/// Buffers have two related notions of length: size and capacity. Size is
|
46 |
+
/// the number of bytes that might have valid data. Capacity is the number
|
47 |
+
/// of bytes that were allocated for the buffer in total.
|
48 |
+
///
|
49 |
+
/// The Buffer base class does not own its memory, but subclasses often do.
|
50 |
+
///
|
51 |
+
/// The following invariant is always true: Size <= Capacity
|
52 |
+
class ARROW_EXPORT Buffer {
|
53 |
+
public:
|
54 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Buffer);
|
55 |
+
|
56 |
+
/// \brief Construct from buffer and size without copying memory
|
57 |
+
///
|
58 |
+
/// \param[in] data a memory buffer
|
59 |
+
/// \param[in] size buffer size
|
60 |
+
///
|
61 |
+
/// \note The passed memory must be kept alive through some other means
|
62 |
+
Buffer(const uint8_t* data, int64_t size)
|
63 |
+
: is_mutable_(false),
|
64 |
+
is_cpu_(true),
|
65 |
+
data_(data),
|
66 |
+
size_(size),
|
67 |
+
capacity_(size),
|
68 |
+
device_type_(DeviceAllocationType::kCPU) {
|
69 |
+
SetMemoryManager(default_cpu_memory_manager());
|
70 |
+
}
|
71 |
+
|
72 |
+
Buffer(const uint8_t* data, int64_t size, std::shared_ptr<MemoryManager> mm,
|
73 |
+
std::shared_ptr<Buffer> parent = NULLPTR,
|
74 |
+
std::optional<DeviceAllocationType> device_type_override = std::nullopt)
|
75 |
+
: is_mutable_(false),
|
76 |
+
data_(data),
|
77 |
+
size_(size),
|
78 |
+
capacity_(size),
|
79 |
+
parent_(std::move(parent)) {
|
80 |
+
// SetMemoryManager will also set device_type_
|
81 |
+
SetMemoryManager(std::move(mm));
|
82 |
+
// If a device type is specified, use that instead. Example of when this can be
|
83 |
+
// useful: the CudaMemoryManager can set device_type_ to kCUDA, but you can specify
|
84 |
+
// device_type_override=kCUDA_HOST as the device type to override it.
|
85 |
+
if (device_type_override != std::nullopt) {
|
86 |
+
device_type_ = *device_type_override;
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
Buffer(uintptr_t address, int64_t size, std::shared_ptr<MemoryManager> mm,
|
91 |
+
std::shared_ptr<Buffer> parent = NULLPTR)
|
92 |
+
: Buffer(reinterpret_cast<const uint8_t*>(address), size, std::move(mm),
|
93 |
+
std::move(parent)) {}
|
94 |
+
|
95 |
+
/// \brief Construct from string_view without copying memory
|
96 |
+
///
|
97 |
+
/// \param[in] data a string_view object
|
98 |
+
///
|
99 |
+
/// \note The memory viewed by data must not be deallocated in the lifetime of the
|
100 |
+
/// Buffer; temporary rvalue strings must be stored in an lvalue somewhere
|
101 |
+
explicit Buffer(std::string_view data)
|
102 |
+
: Buffer(reinterpret_cast<const uint8_t*>(data.data()),
|
103 |
+
static_cast<int64_t>(data.size())) {}
|
104 |
+
|
105 |
+
virtual ~Buffer() = default;
|
106 |
+
|
107 |
+
/// An offset into data that is owned by another buffer, but we want to be
|
108 |
+
/// able to retain a valid pointer to it even after other shared_ptr's to the
|
109 |
+
/// parent buffer have been destroyed
|
110 |
+
///
|
111 |
+
/// This method makes no assertions about alignment or padding of the buffer but
|
112 |
+
/// in general we expected buffers to be aligned and padded to 64 bytes. In the future
|
113 |
+
/// we might add utility methods to help determine if a buffer satisfies this contract.
|
114 |
+
Buffer(const std::shared_ptr<Buffer>& parent, const int64_t offset, const int64_t size)
|
115 |
+
: Buffer(parent->data_ + offset, size) {
|
116 |
+
parent_ = parent;
|
117 |
+
SetMemoryManager(parent->memory_manager_);
|
118 |
+
}
|
119 |
+
|
120 |
+
uint8_t operator[](std::size_t i) const { return data_[i]; }
|
121 |
+
|
122 |
+
/// \brief Construct a new std::string with a hexadecimal representation of the buffer.
|
123 |
+
/// \return std::string
|
124 |
+
std::string ToHexString();
|
125 |
+
|
126 |
+
/// Return true if both buffers are the same size and contain the same bytes
|
127 |
+
/// up to the number of compared bytes
|
128 |
+
bool Equals(const Buffer& other, int64_t nbytes) const;
|
129 |
+
|
130 |
+
/// Return true if both buffers are the same size and contain the same bytes
|
131 |
+
bool Equals(const Buffer& other) const;
|
132 |
+
|
133 |
+
/// Copy a section of the buffer into a new Buffer.
|
134 |
+
Result<std::shared_ptr<Buffer>> CopySlice(
|
135 |
+
const int64_t start, const int64_t nbytes,
|
136 |
+
MemoryPool* pool = default_memory_pool()) const;
|
137 |
+
|
138 |
+
/// Zero bytes in padding, i.e. bytes between size_ and capacity_.
|
139 |
+
void ZeroPadding() {
|
140 |
+
#ifndef NDEBUG
|
141 |
+
CheckMutable();
|
142 |
+
#endif
|
143 |
+
// A zero-capacity buffer can have a null data pointer
|
144 |
+
if (capacity_ != 0) {
|
145 |
+
memset(mutable_data() + size_, 0, static_cast<size_t>(capacity_ - size_));
|
146 |
+
}
|
147 |
+
}
|
148 |
+
|
149 |
+
/// \brief Construct an immutable buffer that takes ownership of the contents
|
150 |
+
/// of an std::string (without copying it).
|
151 |
+
///
|
152 |
+
/// \param[in] data a string to own
|
153 |
+
/// \return a new Buffer instance
|
154 |
+
static std::shared_ptr<Buffer> FromString(std::string data);
|
155 |
+
|
156 |
+
/// \brief Construct an immutable buffer that takes ownership of the contents
|
157 |
+
/// of an std::vector (without copying it). Only vectors of TrivialType objects
|
158 |
+
/// (integers, floating point numbers, ...) can be wrapped by this function.
|
159 |
+
///
|
160 |
+
/// \param[in] vec a vector to own
|
161 |
+
/// \return a new Buffer instance
|
162 |
+
template <typename T>
|
163 |
+
static std::shared_ptr<Buffer> FromVector(std::vector<T> vec) {
|
164 |
+
static_assert(std::is_trivial_v<T>,
|
165 |
+
"Buffer::FromVector can only wrap vectors of trivial objects");
|
166 |
+
|
167 |
+
if (vec.empty()) {
|
168 |
+
return std::shared_ptr<Buffer>{new Buffer()};
|
169 |
+
}
|
170 |
+
|
171 |
+
auto* data = reinterpret_cast<uint8_t*>(vec.data());
|
172 |
+
auto size_in_bytes = static_cast<int64_t>(vec.size() * sizeof(T));
|
173 |
+
return std::shared_ptr<Buffer>{
|
174 |
+
new Buffer{data, size_in_bytes},
|
175 |
+
// Keep the vector's buffer alive inside the shared_ptr's destructor until after
|
176 |
+
// we have deleted the Buffer. Note we can't use this trick in FromString since
|
177 |
+
// std::string's data is inline for short strings so moving invalidates pointers
|
178 |
+
// into the string's buffer.
|
179 |
+
[vec = std::move(vec)](Buffer* buffer) { delete buffer; }};
|
180 |
+
}
|
181 |
+
|
182 |
+
/// \brief Create buffer referencing typed memory with some length without
|
183 |
+
/// copying
|
184 |
+
/// \param[in] data the typed memory as C array
|
185 |
+
/// \param[in] length the number of values in the array
|
186 |
+
/// \return a new shared_ptr<Buffer>
|
187 |
+
template <typename T, typename SizeType = int64_t>
|
188 |
+
static std::shared_ptr<Buffer> Wrap(const T* data, SizeType length) {
|
189 |
+
return std::make_shared<Buffer>(reinterpret_cast<const uint8_t*>(data),
|
190 |
+
static_cast<int64_t>(sizeof(T) * length));
|
191 |
+
}
|
192 |
+
|
193 |
+
/// \brief Create buffer referencing std::vector with some length without
|
194 |
+
/// copying
|
195 |
+
/// \param[in] data the vector to be referenced. If this vector is changed,
|
196 |
+
/// the buffer may become invalid
|
197 |
+
/// \return a new shared_ptr<Buffer>
|
198 |
+
template <typename T>
|
199 |
+
static std::shared_ptr<Buffer> Wrap(const std::vector<T>& data) {
|
200 |
+
return std::make_shared<Buffer>(reinterpret_cast<const uint8_t*>(data.data()),
|
201 |
+
static_cast<int64_t>(sizeof(T) * data.size()));
|
202 |
+
}
|
203 |
+
|
204 |
+
/// \brief Copy buffer contents into a new std::string
|
205 |
+
/// \return std::string
|
206 |
+
/// \note Can throw std::bad_alloc if buffer is large
|
207 |
+
std::string ToString() const;
|
208 |
+
|
209 |
+
/// \brief View buffer contents as a std::string_view
|
210 |
+
/// \return std::string_view
|
211 |
+
explicit operator std::string_view() const {
|
212 |
+
return {reinterpret_cast<const char*>(data_), static_cast<size_t>(size_)};
|
213 |
+
}
|
214 |
+
|
215 |
+
/// \brief Return a pointer to the buffer's data
|
216 |
+
///
|
217 |
+
/// The buffer has to be a CPU buffer (`is_cpu()` is true).
|
218 |
+
/// Otherwise, an assertion may be thrown or a null pointer may be returned.
|
219 |
+
///
|
220 |
+
/// To get the buffer's data address regardless of its device, call `address()`.
|
221 |
+
const uint8_t* data() const {
|
222 |
+
#ifndef NDEBUG
|
223 |
+
CheckCPU();
|
224 |
+
#endif
|
225 |
+
return ARROW_PREDICT_TRUE(is_cpu_) ? data_ : NULLPTR;
|
226 |
+
}
|
227 |
+
|
228 |
+
/// \brief Return a pointer to the buffer's data cast to a specific type
|
229 |
+
///
|
230 |
+
/// The buffer has to be a CPU buffer (`is_cpu()` is true).
|
231 |
+
/// Otherwise, an assertion may be thrown or a null pointer may be returned.
|
232 |
+
template <typename T>
|
233 |
+
const T* data_as() const {
|
234 |
+
return reinterpret_cast<const T*>(data());
|
235 |
+
}
|
236 |
+
|
237 |
+
/// \brief Return the buffer's data as a span
|
238 |
+
template <typename T>
|
239 |
+
util::span<const T> span_as() const {
|
240 |
+
return util::span(data_as<T>(), static_cast<size_t>(size() / sizeof(T)));
|
241 |
+
}
|
242 |
+
|
243 |
+
/// \brief Return a writable pointer to the buffer's data
|
244 |
+
///
|
245 |
+
/// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()`
|
246 |
+
/// are true). Otherwise, an assertion may be thrown or a null pointer may
|
247 |
+
/// be returned.
|
248 |
+
///
|
249 |
+
/// To get the buffer's mutable data address regardless of its device, call
|
250 |
+
/// `mutable_address()`.
|
251 |
+
uint8_t* mutable_data() {
|
252 |
+
#ifndef NDEBUG
|
253 |
+
CheckCPU();
|
254 |
+
CheckMutable();
|
255 |
+
#endif
|
256 |
+
return ARROW_PREDICT_TRUE(is_cpu_ && is_mutable_) ? const_cast<uint8_t*>(data_)
|
257 |
+
: NULLPTR;
|
258 |
+
}
|
259 |
+
|
260 |
+
/// \brief Return a writable pointer to the buffer's data cast to a specific type
|
261 |
+
///
|
262 |
+
/// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()`
|
263 |
+
/// are true). Otherwise, an assertion may be thrown or a null pointer may
|
264 |
+
/// be returned.
|
265 |
+
template <typename T>
|
266 |
+
T* mutable_data_as() {
|
267 |
+
return reinterpret_cast<T*>(mutable_data());
|
268 |
+
}
|
269 |
+
|
270 |
+
/// \brief Return the buffer's mutable data as a span
|
271 |
+
template <typename T>
|
272 |
+
util::span<T> mutable_span_as() {
|
273 |
+
return util::span(mutable_data_as<T>(), static_cast<size_t>(size() / sizeof(T)));
|
274 |
+
}
|
275 |
+
|
276 |
+
/// \brief Return the device address of the buffer's data
|
277 |
+
uintptr_t address() const { return reinterpret_cast<uintptr_t>(data_); }
|
278 |
+
|
279 |
+
/// \brief Return a writable device address to the buffer's data
|
280 |
+
///
|
281 |
+
/// The buffer has to be a mutable buffer (`is_mutable()` is true).
|
282 |
+
/// Otherwise, an assertion may be thrown or 0 may be returned.
|
283 |
+
uintptr_t mutable_address() const {
|
284 |
+
#ifndef NDEBUG
|
285 |
+
CheckMutable();
|
286 |
+
#endif
|
287 |
+
return ARROW_PREDICT_TRUE(is_mutable_) ? reinterpret_cast<uintptr_t>(data_) : 0;
|
288 |
+
}
|
289 |
+
|
290 |
+
/// \brief Return the buffer's size in bytes
|
291 |
+
int64_t size() const { return size_; }
|
292 |
+
|
293 |
+
/// \brief Return the buffer's capacity (number of allocated bytes)
|
294 |
+
int64_t capacity() const { return capacity_; }
|
295 |
+
|
296 |
+
/// \brief Whether the buffer is directly CPU-accessible
|
297 |
+
///
|
298 |
+
/// If this function returns true, you can read directly from the buffer's
|
299 |
+
/// `data()` pointer. Otherwise, you'll have to `View()` or `Copy()` it.
|
300 |
+
bool is_cpu() const { return is_cpu_; }
|
301 |
+
|
302 |
+
/// \brief Whether the buffer is mutable
|
303 |
+
///
|
304 |
+
/// If this function returns true, you are allowed to modify buffer contents
|
305 |
+
/// using the pointer returned by `mutable_data()` or `mutable_address()`.
|
306 |
+
bool is_mutable() const { return is_mutable_; }
|
307 |
+
|
308 |
+
const std::shared_ptr<Device>& device() const { return memory_manager_->device(); }
|
309 |
+
|
310 |
+
const std::shared_ptr<MemoryManager>& memory_manager() const { return memory_manager_; }
|
311 |
+
|
312 |
+
DeviceAllocationType device_type() const { return device_type_; }
|
313 |
+
|
314 |
+
std::shared_ptr<Buffer> parent() const { return parent_; }
|
315 |
+
|
316 |
+
/// \brief Get a RandomAccessFile for reading a buffer
|
317 |
+
///
|
318 |
+
/// The returned file object reads from this buffer's underlying memory.
|
319 |
+
static Result<std::shared_ptr<io::RandomAccessFile>> GetReader(std::shared_ptr<Buffer>);
|
320 |
+
|
321 |
+
/// \brief Get a OutputStream for writing to a buffer
|
322 |
+
///
|
323 |
+
/// The buffer must be mutable. The returned stream object writes into the buffer's
|
324 |
+
/// underlying memory (but it won't resize it).
|
325 |
+
static Result<std::shared_ptr<io::OutputStream>> GetWriter(std::shared_ptr<Buffer>);
|
326 |
+
|
327 |
+
/// \brief Copy buffer
|
328 |
+
///
|
329 |
+
/// The buffer contents will be copied into a new buffer allocated by the
|
330 |
+
/// given MemoryManager. This function supports cross-device copies.
|
331 |
+
static Result<std::shared_ptr<Buffer>> Copy(std::shared_ptr<Buffer> source,
|
332 |
+
const std::shared_ptr<MemoryManager>& to);
|
333 |
+
|
334 |
+
/// \brief Copy a non-owned buffer
|
335 |
+
///
|
336 |
+
/// This is useful for cases where the source memory area is externally managed
|
337 |
+
/// (its lifetime not tied to the source Buffer), otherwise please use Copy().
|
338 |
+
static Result<std::unique_ptr<Buffer>> CopyNonOwned(
|
339 |
+
const Buffer& source, const std::shared_ptr<MemoryManager>& to);
|
340 |
+
|
341 |
+
/// \brief View buffer
|
342 |
+
///
|
343 |
+
/// Return a Buffer that reflects this buffer, seen potentially from another
|
344 |
+
/// device, without making an explicit copy of the contents. The underlying
|
345 |
+
/// mechanism is typically implemented by the kernel or device driver, and may
|
346 |
+
/// involve lazy caching of parts of the buffer contents on the destination
|
347 |
+
/// device's memory.
|
348 |
+
///
|
349 |
+
/// If a non-copy view is unsupported for the buffer on the given device,
|
350 |
+
/// nullptr is returned. An error can be returned if some low-level
|
351 |
+
/// operation fails (such as an out-of-memory condition).
|
352 |
+
static Result<std::shared_ptr<Buffer>> View(std::shared_ptr<Buffer> source,
|
353 |
+
const std::shared_ptr<MemoryManager>& to);
|
354 |
+
|
355 |
+
/// \brief View or copy buffer
|
356 |
+
///
|
357 |
+
/// Try to view buffer contents on the given MemoryManager's device, but
|
358 |
+
/// fall back to copying if a no-copy view isn't supported.
|
359 |
+
static Result<std::shared_ptr<Buffer>> ViewOrCopy(
|
360 |
+
std::shared_ptr<Buffer> source, const std::shared_ptr<MemoryManager>& to);
|
361 |
+
|
362 |
+
virtual std::shared_ptr<Device::SyncEvent> device_sync_event() const { return NULLPTR; }
|
363 |
+
|
364 |
+
protected:
|
365 |
+
bool is_mutable_;
|
366 |
+
bool is_cpu_;
|
367 |
+
const uint8_t* data_;
|
368 |
+
int64_t size_;
|
369 |
+
int64_t capacity_;
|
370 |
+
DeviceAllocationType device_type_;
|
371 |
+
|
372 |
+
// null by default, but may be set
|
373 |
+
std::shared_ptr<Buffer> parent_;
|
374 |
+
|
375 |
+
private:
|
376 |
+
// private so that subclasses are forced to call SetMemoryManager()
|
377 |
+
std::shared_ptr<MemoryManager> memory_manager_;
|
378 |
+
|
379 |
+
protected:
|
380 |
+
Buffer();
|
381 |
+
|
382 |
+
void CheckMutable() const;
|
383 |
+
void CheckCPU() const;
|
384 |
+
|
385 |
+
void SetMemoryManager(std::shared_ptr<MemoryManager> mm) {
|
386 |
+
memory_manager_ = std::move(mm);
|
387 |
+
is_cpu_ = memory_manager_->is_cpu();
|
388 |
+
device_type_ = memory_manager_->device()->device_type();
|
389 |
+
}
|
390 |
+
};
|
391 |
+
|
392 |
+
/// \defgroup buffer-slicing-functions Functions for slicing buffers
|
393 |
+
///
|
394 |
+
/// @{
|
395 |
+
|
396 |
+
/// \brief Construct a view on a buffer at the given offset and length.
|
397 |
+
///
|
398 |
+
/// This function cannot fail and does not check for errors (except in debug builds)
|
399 |
+
static inline std::shared_ptr<Buffer> SliceBuffer(const std::shared_ptr<Buffer>& buffer,
|
400 |
+
const int64_t offset,
|
401 |
+
const int64_t length) {
|
402 |
+
return std::make_shared<Buffer>(buffer, offset, length);
|
403 |
+
}
|
404 |
+
|
405 |
+
/// \brief Construct a view on a buffer at the given offset, up to the buffer's end.
|
406 |
+
///
|
407 |
+
/// This function cannot fail and does not check for errors (except in debug builds)
|
408 |
+
static inline std::shared_ptr<Buffer> SliceBuffer(const std::shared_ptr<Buffer>& buffer,
|
409 |
+
const int64_t offset) {
|
410 |
+
int64_t length = buffer->size() - offset;
|
411 |
+
return SliceBuffer(buffer, offset, length);
|
412 |
+
}
|
413 |
+
|
414 |
+
/// \brief Input-checking version of SliceBuffer
|
415 |
+
///
|
416 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
417 |
+
ARROW_EXPORT
|
418 |
+
Result<std::shared_ptr<Buffer>> SliceBufferSafe(const std::shared_ptr<Buffer>& buffer,
|
419 |
+
int64_t offset);
|
420 |
+
/// \brief Input-checking version of SliceBuffer
|
421 |
+
///
|
422 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
423 |
+
/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size.
|
424 |
+
ARROW_EXPORT
|
425 |
+
Result<std::shared_ptr<Buffer>> SliceBufferSafe(const std::shared_ptr<Buffer>& buffer,
|
426 |
+
int64_t offset, int64_t length);
|
427 |
+
|
428 |
+
/// \brief Like SliceBuffer, but construct a mutable buffer slice.
|
429 |
+
///
|
430 |
+
/// If the parent buffer is not mutable, behavior is undefined (it may abort
|
431 |
+
/// in debug builds).
|
432 |
+
ARROW_EXPORT
|
433 |
+
std::shared_ptr<Buffer> SliceMutableBuffer(const std::shared_ptr<Buffer>& buffer,
|
434 |
+
const int64_t offset, const int64_t length);
|
435 |
+
|
436 |
+
/// \brief Like SliceBuffer, but construct a mutable buffer slice.
|
437 |
+
///
|
438 |
+
/// If the parent buffer is not mutable, behavior is undefined (it may abort
|
439 |
+
/// in debug builds).
|
440 |
+
static inline std::shared_ptr<Buffer> SliceMutableBuffer(
|
441 |
+
const std::shared_ptr<Buffer>& buffer, const int64_t offset) {
|
442 |
+
int64_t length = buffer->size() - offset;
|
443 |
+
return SliceMutableBuffer(buffer, offset, length);
|
444 |
+
}
|
445 |
+
|
446 |
+
/// \brief Input-checking version of SliceMutableBuffer
|
447 |
+
///
|
448 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
449 |
+
ARROW_EXPORT
|
450 |
+
Result<std::shared_ptr<Buffer>> SliceMutableBufferSafe(
|
451 |
+
const std::shared_ptr<Buffer>& buffer, int64_t offset);
|
452 |
+
/// \brief Input-checking version of SliceMutableBuffer
|
453 |
+
///
|
454 |
+
/// An Invalid Status is returned if the requested slice falls out of bounds.
|
455 |
+
/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size.
|
456 |
+
ARROW_EXPORT
|
457 |
+
Result<std::shared_ptr<Buffer>> SliceMutableBufferSafe(
|
458 |
+
const std::shared_ptr<Buffer>& buffer, int64_t offset, int64_t length);
|
459 |
+
|
460 |
+
/// @}
|
461 |
+
|
462 |
+
/// \class MutableBuffer
|
463 |
+
/// \brief A Buffer whose contents can be mutated. May or may not own its data.
|
464 |
+
class ARROW_EXPORT MutableBuffer : public Buffer {
|
465 |
+
public:
|
466 |
+
MutableBuffer(uint8_t* data, const int64_t size) : Buffer(data, size) {
|
467 |
+
is_mutable_ = true;
|
468 |
+
}
|
469 |
+
|
470 |
+
MutableBuffer(uint8_t* data, const int64_t size, std::shared_ptr<MemoryManager> mm)
|
471 |
+
: Buffer(data, size, std::move(mm)) {
|
472 |
+
is_mutable_ = true;
|
473 |
+
}
|
474 |
+
|
475 |
+
MutableBuffer(const std::shared_ptr<Buffer>& parent, const int64_t offset,
|
476 |
+
const int64_t size);
|
477 |
+
|
478 |
+
/// \brief Create buffer referencing typed memory with some length
|
479 |
+
/// \param[in] data the typed memory as C array
|
480 |
+
/// \param[in] length the number of values in the array
|
481 |
+
/// \return a new shared_ptr<Buffer>
|
482 |
+
template <typename T, typename SizeType = int64_t>
|
483 |
+
static std::shared_ptr<Buffer> Wrap(T* data, SizeType length) {
|
484 |
+
return std::make_shared<MutableBuffer>(reinterpret_cast<uint8_t*>(data),
|
485 |
+
static_cast<int64_t>(sizeof(T) * length));
|
486 |
+
}
|
487 |
+
|
488 |
+
protected:
|
489 |
+
MutableBuffer() : Buffer(NULLPTR, 0) {}
|
490 |
+
};
|
491 |
+
|
492 |
+
/// \class ResizableBuffer
|
493 |
+
/// \brief A mutable buffer that can be resized
|
494 |
+
class ARROW_EXPORT ResizableBuffer : public MutableBuffer {
|
495 |
+
public:
|
496 |
+
/// Change buffer reported size to indicated size, allocating memory if
|
497 |
+
/// necessary. This will ensure that the capacity of the buffer is a multiple
|
498 |
+
/// of 64 bytes as defined in Layout.md.
|
499 |
+
/// Consider using ZeroPadding afterwards, to conform to the Arrow layout
|
500 |
+
/// specification.
|
501 |
+
///
|
502 |
+
/// @param new_size The new size for the buffer.
|
503 |
+
/// @param shrink_to_fit Whether to shrink the capacity if new size < current size
|
504 |
+
virtual Status Resize(const int64_t new_size, bool shrink_to_fit) = 0;
|
505 |
+
Status Resize(const int64_t new_size) {
|
506 |
+
return Resize(new_size, /*shrink_to_fit=*/true);
|
507 |
+
}
|
508 |
+
|
509 |
+
/// Ensure that buffer has enough memory allocated to fit the indicated
|
510 |
+
/// capacity (and meets the 64 byte padding requirement in Layout.md).
|
511 |
+
/// It does not change buffer's reported size and doesn't zero the padding.
|
512 |
+
virtual Status Reserve(const int64_t new_capacity) = 0;
|
513 |
+
|
514 |
+
template <class T>
|
515 |
+
Status TypedResize(const int64_t new_nb_elements, bool shrink_to_fit = true) {
|
516 |
+
return Resize(sizeof(T) * new_nb_elements, shrink_to_fit);
|
517 |
+
}
|
518 |
+
|
519 |
+
template <class T>
|
520 |
+
Status TypedReserve(const int64_t new_nb_elements) {
|
521 |
+
return Reserve(sizeof(T) * new_nb_elements);
|
522 |
+
}
|
523 |
+
|
524 |
+
protected:
|
525 |
+
ResizableBuffer(uint8_t* data, int64_t size) : MutableBuffer(data, size) {}
|
526 |
+
ResizableBuffer(uint8_t* data, int64_t size, std::shared_ptr<MemoryManager> mm)
|
527 |
+
: MutableBuffer(data, size, std::move(mm)) {}
|
528 |
+
};
|
529 |
+
|
530 |
+
/// \defgroup buffer-allocation-functions Functions for allocating buffers
|
531 |
+
///
|
532 |
+
/// @{
|
533 |
+
|
534 |
+
/// \brief Allocate a fixed size mutable buffer from a memory pool, zero its padding.
|
535 |
+
///
|
536 |
+
/// \param[in] size size of buffer to allocate
|
537 |
+
/// \param[in] pool a memory pool
|
538 |
+
ARROW_EXPORT
|
539 |
+
Result<std::unique_ptr<Buffer>> AllocateBuffer(const int64_t size,
|
540 |
+
MemoryPool* pool = NULLPTR);
|
541 |
+
ARROW_EXPORT
|
542 |
+
Result<std::unique_ptr<Buffer>> AllocateBuffer(const int64_t size, int64_t alignment,
|
543 |
+
MemoryPool* pool = NULLPTR);
|
544 |
+
|
545 |
+
/// \brief Allocate a resizeable buffer from a memory pool, zero its padding.
|
546 |
+
///
|
547 |
+
/// \param[in] size size of buffer to allocate
|
548 |
+
/// \param[in] pool a memory pool
|
549 |
+
ARROW_EXPORT
|
550 |
+
Result<std::unique_ptr<ResizableBuffer>> AllocateResizableBuffer(
|
551 |
+
const int64_t size, MemoryPool* pool = NULLPTR);
|
552 |
+
ARROW_EXPORT
|
553 |
+
Result<std::unique_ptr<ResizableBuffer>> AllocateResizableBuffer(
|
554 |
+
const int64_t size, const int64_t alignment, MemoryPool* pool = NULLPTR);
|
555 |
+
|
556 |
+
/// \brief Allocate a bitmap buffer from a memory pool
|
557 |
+
/// no guarantee on values is provided.
|
558 |
+
///
|
559 |
+
/// \param[in] length size in bits of bitmap to allocate
|
560 |
+
/// \param[in] pool memory pool to allocate memory from
|
561 |
+
ARROW_EXPORT
|
562 |
+
Result<std::shared_ptr<Buffer>> AllocateBitmap(int64_t length,
|
563 |
+
MemoryPool* pool = NULLPTR);
|
564 |
+
|
565 |
+
/// \brief Allocate a zero-initialized bitmap buffer from a memory pool
|
566 |
+
///
|
567 |
+
/// \param[in] length size in bits of bitmap to allocate
|
568 |
+
/// \param[in] pool memory pool to allocate memory from
|
569 |
+
ARROW_EXPORT
|
570 |
+
Result<std::shared_ptr<Buffer>> AllocateEmptyBitmap(int64_t length,
|
571 |
+
MemoryPool* pool = NULLPTR);
|
572 |
+
|
573 |
+
ARROW_EXPORT
|
574 |
+
Result<std::shared_ptr<Buffer>> AllocateEmptyBitmap(int64_t length, int64_t alignment,
|
575 |
+
MemoryPool* pool = NULLPTR);
|
576 |
+
|
577 |
+
/// \brief Concatenate multiple buffers into a single buffer
|
578 |
+
///
|
579 |
+
/// \param[in] buffers to be concatenated
|
580 |
+
/// \param[in] pool memory pool to allocate the new buffer from
|
581 |
+
ARROW_EXPORT
|
582 |
+
Result<std::shared_ptr<Buffer>> ConcatenateBuffers(const BufferVector& buffers,
|
583 |
+
MemoryPool* pool = NULLPTR);
|
584 |
+
|
585 |
+
/// @}
|
586 |
+
|
587 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <algorithm>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <cstring>
|
23 |
+
#include <memory>
|
24 |
+
#include <string>
|
25 |
+
#include <utility>
|
26 |
+
|
27 |
+
#include "arrow/buffer.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/util/bit_util.h"
|
30 |
+
#include "arrow/util/bitmap_generate.h"
|
31 |
+
#include "arrow/util/bitmap_ops.h"
|
32 |
+
#include "arrow/util/macros.h"
|
33 |
+
#include "arrow/util/ubsan.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
|
38 |
+
// ----------------------------------------------------------------------
|
39 |
+
// Buffer builder classes
|
40 |
+
|
41 |
+
/// \class BufferBuilder
|
42 |
+
/// \brief A class for incrementally building a contiguous chunk of in-memory
|
43 |
+
/// data
|
44 |
+
class ARROW_EXPORT BufferBuilder {
|
45 |
+
public:
|
46 |
+
explicit BufferBuilder(MemoryPool* pool = default_memory_pool(),
|
47 |
+
int64_t alignment = kDefaultBufferAlignment)
|
48 |
+
: pool_(pool),
|
49 |
+
data_(/*ensure never null to make ubsan happy and avoid check penalties below*/
|
50 |
+
util::MakeNonNull<uint8_t>()),
|
51 |
+
capacity_(0),
|
52 |
+
size_(0),
|
53 |
+
alignment_(alignment) {}
|
54 |
+
|
55 |
+
/// \brief Constructs new Builder that will start using
|
56 |
+
/// the provided buffer until Finish/Reset are called.
|
57 |
+
/// The buffer is not resized.
|
58 |
+
explicit BufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
|
59 |
+
MemoryPool* pool = default_memory_pool(),
|
60 |
+
int64_t alignment = kDefaultBufferAlignment)
|
61 |
+
: buffer_(std::move(buffer)),
|
62 |
+
pool_(pool),
|
63 |
+
data_(buffer_->mutable_data()),
|
64 |
+
capacity_(buffer_->capacity()),
|
65 |
+
size_(buffer_->size()),
|
66 |
+
alignment_(alignment) {}
|
67 |
+
|
68 |
+
/// \brief Resize the buffer to the nearest multiple of 64 bytes
|
69 |
+
///
|
70 |
+
/// \param new_capacity the new capacity of the of the builder. Will be
|
71 |
+
/// rounded up to a multiple of 64 bytes for padding
|
72 |
+
/// \param shrink_to_fit if new capacity is smaller than the existing,
|
73 |
+
/// reallocate internal buffer. Set to false to avoid reallocations when
|
74 |
+
/// shrinking the builder.
|
75 |
+
/// \return Status
|
76 |
+
Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
|
77 |
+
if (buffer_ == NULLPTR) {
|
78 |
+
ARROW_ASSIGN_OR_RAISE(buffer_,
|
79 |
+
AllocateResizableBuffer(new_capacity, alignment_, pool_));
|
80 |
+
} else {
|
81 |
+
ARROW_RETURN_NOT_OK(buffer_->Resize(new_capacity, shrink_to_fit));
|
82 |
+
}
|
83 |
+
capacity_ = buffer_->capacity();
|
84 |
+
data_ = buffer_->mutable_data();
|
85 |
+
return Status::OK();
|
86 |
+
}
|
87 |
+
|
88 |
+
/// \brief Ensure that builder can accommodate the additional number of bytes
|
89 |
+
/// without the need to perform allocations
|
90 |
+
///
|
91 |
+
/// \param[in] additional_bytes number of additional bytes to make space for
|
92 |
+
/// \return Status
|
93 |
+
Status Reserve(const int64_t additional_bytes) {
|
94 |
+
auto min_capacity = size_ + additional_bytes;
|
95 |
+
if (min_capacity <= capacity_) {
|
96 |
+
return Status::OK();
|
97 |
+
}
|
98 |
+
return Resize(GrowByFactor(capacity_, min_capacity), false);
|
99 |
+
}
|
100 |
+
|
101 |
+
/// \brief Return a capacity expanded by the desired growth factor
|
102 |
+
static int64_t GrowByFactor(int64_t current_capacity, int64_t new_capacity) {
|
103 |
+
// Doubling capacity except for large Reserve requests. 2x growth strategy
|
104 |
+
// (versus 1.5x) seems to have slightly better performance when using
|
105 |
+
// jemalloc, but significantly better performance when using the system
|
106 |
+
// allocator. See ARROW-6450 for further discussion
|
107 |
+
return std::max(new_capacity, current_capacity * 2);
|
108 |
+
}
|
109 |
+
|
110 |
+
/// \brief Append the given data to the buffer
|
111 |
+
///
|
112 |
+
/// The buffer is automatically expanded if necessary.
|
113 |
+
Status Append(const void* data, const int64_t length) {
|
114 |
+
if (ARROW_PREDICT_FALSE(size_ + length > capacity_)) {
|
115 |
+
ARROW_RETURN_NOT_OK(Resize(GrowByFactor(capacity_, size_ + length), false));
|
116 |
+
}
|
117 |
+
UnsafeAppend(data, length);
|
118 |
+
return Status::OK();
|
119 |
+
}
|
120 |
+
|
121 |
+
/// \brief Append the given data to the buffer
|
122 |
+
///
|
123 |
+
/// The buffer is automatically expanded if necessary.
|
124 |
+
Status Append(std::string_view v) { return Append(v.data(), v.size()); }
|
125 |
+
|
126 |
+
/// \brief Append copies of a value to the buffer
|
127 |
+
///
|
128 |
+
/// The buffer is automatically expanded if necessary.
|
129 |
+
Status Append(const int64_t num_copies, uint8_t value) {
|
130 |
+
ARROW_RETURN_NOT_OK(Reserve(num_copies));
|
131 |
+
UnsafeAppend(num_copies, value);
|
132 |
+
return Status::OK();
|
133 |
+
}
|
134 |
+
|
135 |
+
// Advance pointer and zero out memory
|
136 |
+
Status Advance(const int64_t length) { return Append(length, 0); }
|
137 |
+
|
138 |
+
// Advance pointer, but don't allocate or zero memory
|
139 |
+
void UnsafeAdvance(const int64_t length) { size_ += length; }
|
140 |
+
|
141 |
+
// Unsafe methods don't check existing size
|
142 |
+
void UnsafeAppend(const void* data, const int64_t length) {
|
143 |
+
memcpy(data_ + size_, data, static_cast<size_t>(length));
|
144 |
+
size_ += length;
|
145 |
+
}
|
146 |
+
|
147 |
+
void UnsafeAppend(std::string_view v) {
|
148 |
+
UnsafeAppend(v.data(), static_cast<int64_t>(v.size()));
|
149 |
+
}
|
150 |
+
|
151 |
+
void UnsafeAppend(const int64_t num_copies, uint8_t value) {
|
152 |
+
memset(data_ + size_, value, static_cast<size_t>(num_copies));
|
153 |
+
size_ += num_copies;
|
154 |
+
}
|
155 |
+
|
156 |
+
/// \brief Return result of builder as a Buffer object.
|
157 |
+
///
|
158 |
+
/// The builder is reset and can be reused afterwards.
|
159 |
+
///
|
160 |
+
/// \param[out] out the finalized Buffer object
|
161 |
+
/// \param shrink_to_fit if the buffer size is smaller than its capacity,
|
162 |
+
/// reallocate to fit more tightly in memory. Set to false to avoid
|
163 |
+
/// a reallocation, at the expense of potentially more memory consumption.
|
164 |
+
/// \return Status
|
165 |
+
Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
|
166 |
+
ARROW_RETURN_NOT_OK(Resize(size_, shrink_to_fit));
|
167 |
+
if (size_ != 0) buffer_->ZeroPadding();
|
168 |
+
*out = buffer_;
|
169 |
+
if (*out == NULLPTR) {
|
170 |
+
ARROW_ASSIGN_OR_RAISE(*out, AllocateBuffer(0, alignment_, pool_));
|
171 |
+
}
|
172 |
+
Reset();
|
173 |
+
return Status::OK();
|
174 |
+
}
|
175 |
+
|
176 |
+
Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
|
177 |
+
std::shared_ptr<Buffer> out;
|
178 |
+
ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
|
179 |
+
return out;
|
180 |
+
}
|
181 |
+
|
182 |
+
/// \brief Like Finish, but override the final buffer size
|
183 |
+
///
|
184 |
+
/// This is useful after writing data directly into the builder memory
|
185 |
+
/// without calling the Append methods (basically, when using BufferBuilder
|
186 |
+
/// mostly for memory allocation).
|
187 |
+
Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
|
188 |
+
bool shrink_to_fit = true) {
|
189 |
+
size_ = final_length;
|
190 |
+
return Finish(shrink_to_fit);
|
191 |
+
}
|
192 |
+
|
193 |
+
void Reset() {
|
194 |
+
buffer_ = NULLPTR;
|
195 |
+
capacity_ = size_ = 0;
|
196 |
+
}
|
197 |
+
|
198 |
+
/// \brief Set size to a smaller value without modifying builder
|
199 |
+
/// contents. For reusable BufferBuilder classes
|
200 |
+
/// \param[in] position must be non-negative and less than or equal
|
201 |
+
/// to the current length()
|
202 |
+
void Rewind(int64_t position) { size_ = position; }
|
203 |
+
|
204 |
+
int64_t capacity() const { return capacity_; }
|
205 |
+
int64_t length() const { return size_; }
|
206 |
+
const uint8_t* data() const { return data_; }
|
207 |
+
uint8_t* mutable_data() { return data_; }
|
208 |
+
template <typename T>
|
209 |
+
const T* data_as() const {
|
210 |
+
return reinterpret_cast<const T*>(data_);
|
211 |
+
}
|
212 |
+
template <typename T>
|
213 |
+
T* mutable_data_as() {
|
214 |
+
return reinterpret_cast<T*>(data_);
|
215 |
+
}
|
216 |
+
|
217 |
+
private:
|
218 |
+
std::shared_ptr<ResizableBuffer> buffer_;
|
219 |
+
MemoryPool* pool_;
|
220 |
+
uint8_t* data_;
|
221 |
+
int64_t capacity_;
|
222 |
+
int64_t size_;
|
223 |
+
int64_t alignment_;
|
224 |
+
};
|
225 |
+
|
226 |
+
template <typename T, typename Enable = void>
|
227 |
+
class TypedBufferBuilder;
|
228 |
+
|
229 |
+
/// \brief A BufferBuilder for building a buffer of arithmetic elements
|
230 |
+
template <typename T>
|
231 |
+
class TypedBufferBuilder<
|
232 |
+
T, typename std::enable_if<std::is_arithmetic<T>::value ||
|
233 |
+
std::is_standard_layout<T>::value>::type> {
|
234 |
+
public:
|
235 |
+
explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(),
|
236 |
+
int64_t alignment = kDefaultBufferAlignment)
|
237 |
+
: bytes_builder_(pool, alignment) {}
|
238 |
+
|
239 |
+
explicit TypedBufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
|
240 |
+
MemoryPool* pool = default_memory_pool())
|
241 |
+
: bytes_builder_(std::move(buffer), pool) {}
|
242 |
+
|
243 |
+
explicit TypedBufferBuilder(BufferBuilder builder)
|
244 |
+
: bytes_builder_(std::move(builder)) {}
|
245 |
+
|
246 |
+
BufferBuilder* bytes_builder() { return &bytes_builder_; }
|
247 |
+
|
248 |
+
Status Append(T value) {
|
249 |
+
return bytes_builder_.Append(reinterpret_cast<uint8_t*>(&value), sizeof(T));
|
250 |
+
}
|
251 |
+
|
252 |
+
Status Append(const T* values, int64_t num_elements) {
|
253 |
+
return bytes_builder_.Append(reinterpret_cast<const uint8_t*>(values),
|
254 |
+
num_elements * sizeof(T));
|
255 |
+
}
|
256 |
+
|
257 |
+
Status Append(const int64_t num_copies, T value) {
|
258 |
+
ARROW_RETURN_NOT_OK(Reserve(num_copies + length()));
|
259 |
+
UnsafeAppend(num_copies, value);
|
260 |
+
return Status::OK();
|
261 |
+
}
|
262 |
+
|
263 |
+
void UnsafeAppend(T value) {
|
264 |
+
bytes_builder_.UnsafeAppend(reinterpret_cast<uint8_t*>(&value), sizeof(T));
|
265 |
+
}
|
266 |
+
|
267 |
+
void UnsafeAppend(const T* values, int64_t num_elements) {
|
268 |
+
bytes_builder_.UnsafeAppend(reinterpret_cast<const uint8_t*>(values),
|
269 |
+
num_elements * sizeof(T));
|
270 |
+
}
|
271 |
+
|
272 |
+
template <typename Iter>
|
273 |
+
void UnsafeAppend(Iter values_begin, Iter values_end) {
|
274 |
+
auto num_elements = static_cast<int64_t>(std::distance(values_begin, values_end));
|
275 |
+
auto data = mutable_data() + length();
|
276 |
+
bytes_builder_.UnsafeAdvance(num_elements * sizeof(T));
|
277 |
+
std::copy(values_begin, values_end, data);
|
278 |
+
}
|
279 |
+
|
280 |
+
void UnsafeAppend(const int64_t num_copies, T value) {
|
281 |
+
auto data = mutable_data() + length();
|
282 |
+
bytes_builder_.UnsafeAdvance(num_copies * sizeof(T));
|
283 |
+
std::fill(data, data + num_copies, value);
|
284 |
+
}
|
285 |
+
|
286 |
+
Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
|
287 |
+
return bytes_builder_.Resize(new_capacity * sizeof(T), shrink_to_fit);
|
288 |
+
}
|
289 |
+
|
290 |
+
Status Reserve(const int64_t additional_elements) {
|
291 |
+
return bytes_builder_.Reserve(additional_elements * sizeof(T));
|
292 |
+
}
|
293 |
+
|
294 |
+
Status Advance(const int64_t length) {
|
295 |
+
return bytes_builder_.Advance(length * sizeof(T));
|
296 |
+
}
|
297 |
+
|
298 |
+
Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
|
299 |
+
return bytes_builder_.Finish(out, shrink_to_fit);
|
300 |
+
}
|
301 |
+
|
302 |
+
Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
|
303 |
+
std::shared_ptr<Buffer> out;
|
304 |
+
ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
|
305 |
+
return out;
|
306 |
+
}
|
307 |
+
|
308 |
+
/// \brief Like Finish, but override the final buffer size
|
309 |
+
///
|
310 |
+
/// This is useful after writing data directly into the builder memory
|
311 |
+
/// without calling the Append methods (basically, when using TypedBufferBuilder
|
312 |
+
/// only for memory allocation).
|
313 |
+
Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
|
314 |
+
bool shrink_to_fit = true) {
|
315 |
+
return bytes_builder_.FinishWithLength(final_length * sizeof(T), shrink_to_fit);
|
316 |
+
}
|
317 |
+
|
318 |
+
void Reset() { bytes_builder_.Reset(); }
|
319 |
+
|
320 |
+
int64_t length() const { return bytes_builder_.length() / sizeof(T); }
|
321 |
+
int64_t capacity() const { return bytes_builder_.capacity() / sizeof(T); }
|
322 |
+
const T* data() const { return reinterpret_cast<const T*>(bytes_builder_.data()); }
|
323 |
+
T* mutable_data() { return reinterpret_cast<T*>(bytes_builder_.mutable_data()); }
|
324 |
+
|
325 |
+
private:
|
326 |
+
BufferBuilder bytes_builder_;
|
327 |
+
};
|
328 |
+
|
329 |
+
/// \brief A BufferBuilder for building a buffer containing a bitmap
|
330 |
+
template <>
|
331 |
+
class TypedBufferBuilder<bool> {
|
332 |
+
public:
|
333 |
+
explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(),
|
334 |
+
int64_t alignment = kDefaultBufferAlignment)
|
335 |
+
: bytes_builder_(pool, alignment) {}
|
336 |
+
|
337 |
+
explicit TypedBufferBuilder(BufferBuilder builder)
|
338 |
+
: bytes_builder_(std::move(builder)) {}
|
339 |
+
|
340 |
+
BufferBuilder* bytes_builder() { return &bytes_builder_; }
|
341 |
+
|
342 |
+
Status Append(bool value) {
|
343 |
+
ARROW_RETURN_NOT_OK(Reserve(1));
|
344 |
+
UnsafeAppend(value);
|
345 |
+
return Status::OK();
|
346 |
+
}
|
347 |
+
|
348 |
+
Status Append(const uint8_t* valid_bytes, int64_t num_elements) {
|
349 |
+
ARROW_RETURN_NOT_OK(Reserve(num_elements));
|
350 |
+
UnsafeAppend(valid_bytes, num_elements);
|
351 |
+
return Status::OK();
|
352 |
+
}
|
353 |
+
|
354 |
+
Status Append(const int64_t num_copies, bool value) {
|
355 |
+
ARROW_RETURN_NOT_OK(Reserve(num_copies));
|
356 |
+
UnsafeAppend(num_copies, value);
|
357 |
+
return Status::OK();
|
358 |
+
}
|
359 |
+
|
360 |
+
void UnsafeAppend(bool value) {
|
361 |
+
bit_util::SetBitTo(mutable_data(), bit_length_, value);
|
362 |
+
if (!value) {
|
363 |
+
++false_count_;
|
364 |
+
}
|
365 |
+
++bit_length_;
|
366 |
+
}
|
367 |
+
|
368 |
+
/// \brief Append bits from an array of bytes (one value per byte)
|
369 |
+
void UnsafeAppend(const uint8_t* bytes, int64_t num_elements) {
|
370 |
+
if (num_elements == 0) return;
|
371 |
+
int64_t i = 0;
|
372 |
+
internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
|
373 |
+
bool value = bytes[i++];
|
374 |
+
false_count_ += !value;
|
375 |
+
return value;
|
376 |
+
});
|
377 |
+
bit_length_ += num_elements;
|
378 |
+
}
|
379 |
+
|
380 |
+
/// \brief Append bits from a packed bitmap
|
381 |
+
void UnsafeAppend(const uint8_t* bitmap, int64_t offset, int64_t num_elements) {
|
382 |
+
if (num_elements == 0) return;
|
383 |
+
internal::CopyBitmap(bitmap, offset, num_elements, mutable_data(), bit_length_);
|
384 |
+
false_count_ += num_elements - internal::CountSetBits(bitmap, offset, num_elements);
|
385 |
+
bit_length_ += num_elements;
|
386 |
+
}
|
387 |
+
|
388 |
+
void UnsafeAppend(const int64_t num_copies, bool value) {
|
389 |
+
bit_util::SetBitsTo(mutable_data(), bit_length_, num_copies, value);
|
390 |
+
false_count_ += num_copies * !value;
|
391 |
+
bit_length_ += num_copies;
|
392 |
+
}
|
393 |
+
|
394 |
+
template <bool count_falses, typename Generator>
|
395 |
+
void UnsafeAppend(const int64_t num_elements, Generator&& gen) {
|
396 |
+
if (num_elements == 0) return;
|
397 |
+
|
398 |
+
if (count_falses) {
|
399 |
+
internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
|
400 |
+
bool value = gen();
|
401 |
+
false_count_ += !value;
|
402 |
+
return value;
|
403 |
+
});
|
404 |
+
} else {
|
405 |
+
internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements,
|
406 |
+
std::forward<Generator>(gen));
|
407 |
+
}
|
408 |
+
bit_length_ += num_elements;
|
409 |
+
}
|
410 |
+
|
411 |
+
Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
|
412 |
+
const int64_t old_byte_capacity = bytes_builder_.capacity();
|
413 |
+
ARROW_RETURN_NOT_OK(
|
414 |
+
bytes_builder_.Resize(bit_util::BytesForBits(new_capacity), shrink_to_fit));
|
415 |
+
// Resize() may have chosen a larger capacity (e.g. for padding),
|
416 |
+
// so ask it again before calling memset().
|
417 |
+
const int64_t new_byte_capacity = bytes_builder_.capacity();
|
418 |
+
if (new_byte_capacity > old_byte_capacity) {
|
419 |
+
// The additional buffer space is 0-initialized for convenience,
|
420 |
+
// so that other methods can simply bump the length.
|
421 |
+
memset(mutable_data() + old_byte_capacity, 0,
|
422 |
+
static_cast<size_t>(new_byte_capacity - old_byte_capacity));
|
423 |
+
}
|
424 |
+
return Status::OK();
|
425 |
+
}
|
426 |
+
|
427 |
+
Status Reserve(const int64_t additional_elements) {
|
428 |
+
return Resize(
|
429 |
+
BufferBuilder::GrowByFactor(bit_length_, bit_length_ + additional_elements),
|
430 |
+
false);
|
431 |
+
}
|
432 |
+
|
433 |
+
Status Advance(const int64_t length) {
|
434 |
+
ARROW_RETURN_NOT_OK(Reserve(length));
|
435 |
+
bit_length_ += length;
|
436 |
+
false_count_ += length;
|
437 |
+
return Status::OK();
|
438 |
+
}
|
439 |
+
|
440 |
+
Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
|
441 |
+
// set bytes_builder_.size_ == byte size of data
|
442 |
+
bytes_builder_.UnsafeAdvance(bit_util::BytesForBits(bit_length_) -
|
443 |
+
bytes_builder_.length());
|
444 |
+
bit_length_ = false_count_ = 0;
|
445 |
+
return bytes_builder_.Finish(out, shrink_to_fit);
|
446 |
+
}
|
447 |
+
|
448 |
+
Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
|
449 |
+
std::shared_ptr<Buffer> out;
|
450 |
+
ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
|
451 |
+
return out;
|
452 |
+
}
|
453 |
+
|
454 |
+
/// \brief Like Finish, but override the final buffer size
|
455 |
+
///
|
456 |
+
/// This is useful after writing data directly into the builder memory
|
457 |
+
/// without calling the Append methods (basically, when using TypedBufferBuilder
|
458 |
+
/// only for memory allocation).
|
459 |
+
Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
|
460 |
+
bool shrink_to_fit = true) {
|
461 |
+
const auto final_byte_length = bit_util::BytesForBits(final_length);
|
462 |
+
bytes_builder_.UnsafeAdvance(final_byte_length - bytes_builder_.length());
|
463 |
+
bit_length_ = false_count_ = 0;
|
464 |
+
return bytes_builder_.FinishWithLength(final_byte_length, shrink_to_fit);
|
465 |
+
}
|
466 |
+
|
467 |
+
void Reset() {
|
468 |
+
bytes_builder_.Reset();
|
469 |
+
bit_length_ = false_count_ = 0;
|
470 |
+
}
|
471 |
+
|
472 |
+
int64_t length() const { return bit_length_; }
|
473 |
+
int64_t capacity() const { return bytes_builder_.capacity() * 8; }
|
474 |
+
const uint8_t* data() const { return bytes_builder_.data(); }
|
475 |
+
uint8_t* mutable_data() { return bytes_builder_.mutable_data(); }
|
476 |
+
int64_t false_count() const { return false_count_; }
|
477 |
+
|
478 |
+
private:
|
479 |
+
BufferBuilder bytes_builder_;
|
480 |
+
int64_t bit_length_ = 0;
|
481 |
+
int64_t false_count_ = 0;
|
482 |
+
};
|
483 |
+
|
484 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/array/builder_adaptive.h" // IWYU pragma: keep
|
23 |
+
#include "arrow/array/builder_base.h" // IWYU pragma: keep
|
24 |
+
#include "arrow/array/builder_binary.h" // IWYU pragma: keep
|
25 |
+
#include "arrow/array/builder_decimal.h" // IWYU pragma: keep
|
26 |
+
#include "arrow/array/builder_dict.h" // IWYU pragma: keep
|
27 |
+
#include "arrow/array/builder_nested.h" // IWYU pragma: keep
|
28 |
+
#include "arrow/array/builder_primitive.h" // IWYU pragma: keep
|
29 |
+
#include "arrow/array/builder_run_end.h" // IWYU pragma: keep
|
30 |
+
#include "arrow/array/builder_time.h" // IWYU pragma: keep
|
31 |
+
#include "arrow/array/builder_union.h" // IWYU pragma: keep
|
32 |
+
#include "arrow/status.h"
|
33 |
+
#include "arrow/util/visibility.h"
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cassert>
|
22 |
+
#include <cstdint>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/type_fwd.h"
|
26 |
+
#include "arrow/util/macros.h"
|
27 |
+
|
28 |
+
namespace arrow::internal {
|
29 |
+
|
30 |
+
struct ChunkLocation {
|
31 |
+
/// \brief Index of the chunk in the array of chunks
|
32 |
+
///
|
33 |
+
/// The value is always in the range `[0, chunks.size()]`. `chunks.size()` is used
|
34 |
+
/// to represent out-of-bounds locations.
|
35 |
+
int64_t chunk_index = 0;
|
36 |
+
|
37 |
+
/// \brief Index of the value in the chunk
|
38 |
+
///
|
39 |
+
/// The value is undefined if chunk_index >= chunks.size()
|
40 |
+
int64_t index_in_chunk = 0;
|
41 |
+
};
|
42 |
+
|
43 |
+
/// \brief An utility that incrementally resolves logical indices into
|
44 |
+
/// physical indices in a chunked array.
|
45 |
+
struct ARROW_EXPORT ChunkResolver {
|
46 |
+
private:
|
47 |
+
/// \brief Array containing `chunks.size() + 1` offsets.
|
48 |
+
///
|
49 |
+
/// `offsets_[i]` is the starting logical index of chunk `i`. `offsets_[0]` is always 0
|
50 |
+
/// and `offsets_[chunks.size()]` is the logical length of the chunked array.
|
51 |
+
std::vector<int64_t> offsets_;
|
52 |
+
|
53 |
+
/// \brief Cache of the index of the last resolved chunk.
|
54 |
+
///
|
55 |
+
/// \invariant `cached_chunk_ in [0, chunks.size()]`
|
56 |
+
mutable std::atomic<int64_t> cached_chunk_;
|
57 |
+
|
58 |
+
public:
|
59 |
+
explicit ChunkResolver(const ArrayVector& chunks) noexcept;
|
60 |
+
explicit ChunkResolver(const std::vector<const Array*>& chunks) noexcept;
|
61 |
+
explicit ChunkResolver(const RecordBatchVector& batches) noexcept;
|
62 |
+
|
63 |
+
ChunkResolver(ChunkResolver&& other) noexcept;
|
64 |
+
ChunkResolver& operator=(ChunkResolver&& other) noexcept;
|
65 |
+
|
66 |
+
ChunkResolver(const ChunkResolver& other) noexcept;
|
67 |
+
ChunkResolver& operator=(const ChunkResolver& other) noexcept;
|
68 |
+
|
69 |
+
/// \brief Resolve a logical index to a ChunkLocation.
|
70 |
+
///
|
71 |
+
/// The returned ChunkLocation contains the chunk index and the within-chunk index
|
72 |
+
/// equivalent to the logical index.
|
73 |
+
///
|
74 |
+
/// \pre index >= 0
|
75 |
+
/// \post location.chunk_index in [0, chunks.size()]
|
76 |
+
/// \param index The logical index to resolve
|
77 |
+
/// \return ChunkLocation with a valid chunk_index if index is within
|
78 |
+
/// bounds, or with chunk_index == chunks.size() if logical index is
|
79 |
+
/// `>= chunked_array.length()`.
|
80 |
+
inline ChunkLocation Resolve(int64_t index) const {
|
81 |
+
const auto cached_chunk = cached_chunk_.load(std::memory_order_relaxed);
|
82 |
+
const auto chunk_index =
|
83 |
+
ResolveChunkIndex</*StoreCachedChunk=*/true>(index, cached_chunk);
|
84 |
+
return {chunk_index, index - offsets_[chunk_index]};
|
85 |
+
}
|
86 |
+
|
87 |
+
/// \brief Resolve a logical index to a ChunkLocation.
|
88 |
+
///
|
89 |
+
/// The returned ChunkLocation contains the chunk index and the within-chunk index
|
90 |
+
/// equivalent to the logical index.
|
91 |
+
///
|
92 |
+
/// \pre index >= 0
|
93 |
+
/// \post location.chunk_index in [0, chunks.size()]
|
94 |
+
/// \param index The logical index to resolve
|
95 |
+
/// \param hint ChunkLocation{} or the last ChunkLocation returned by
|
96 |
+
/// this ChunkResolver.
|
97 |
+
/// \return ChunkLocation with a valid chunk_index if index is within
|
98 |
+
/// bounds, or with chunk_index == chunks.size() if logical index is
|
99 |
+
/// `>= chunked_array.length()`.
|
100 |
+
inline ChunkLocation ResolveWithChunkIndexHint(int64_t index,
|
101 |
+
ChunkLocation hint) const {
|
102 |
+
assert(hint.chunk_index < static_cast<int64_t>(offsets_.size()));
|
103 |
+
const auto chunk_index =
|
104 |
+
ResolveChunkIndex</*StoreCachedChunk=*/false>(index, hint.chunk_index);
|
105 |
+
return {chunk_index, index - offsets_[chunk_index]};
|
106 |
+
}
|
107 |
+
|
108 |
+
private:
|
109 |
+
template <bool StoreCachedChunk>
|
110 |
+
inline int64_t ResolveChunkIndex(int64_t index, int64_t cached_chunk) const {
|
111 |
+
// It is common for algorithms sequentially processing arrays to make consecutive
|
112 |
+
// accesses at a relatively small distance from each other, hence often falling in the
|
113 |
+
// same chunk.
|
114 |
+
//
|
115 |
+
// This is guaranteed when merging (assuming each side of the merge uses its
|
116 |
+
// own resolver), and is the most common case in recursive invocations of
|
117 |
+
// partitioning.
|
118 |
+
const auto num_offsets = static_cast<int64_t>(offsets_.size());
|
119 |
+
const int64_t* offsets = offsets_.data();
|
120 |
+
if (ARROW_PREDICT_TRUE(index >= offsets[cached_chunk]) &&
|
121 |
+
(cached_chunk + 1 == num_offsets || index < offsets[cached_chunk + 1])) {
|
122 |
+
return cached_chunk;
|
123 |
+
}
|
124 |
+
// lo < hi is guaranteed by `num_offsets = chunks.size() + 1`
|
125 |
+
const auto chunk_index = Bisect(index, offsets, /*lo=*/0, /*hi=*/num_offsets);
|
126 |
+
if constexpr (StoreCachedChunk) {
|
127 |
+
assert(chunk_index < static_cast<int64_t>(offsets_.size()));
|
128 |
+
cached_chunk_.store(chunk_index, std::memory_order_relaxed);
|
129 |
+
}
|
130 |
+
return chunk_index;
|
131 |
+
}
|
132 |
+
|
133 |
+
/// \brief Find the index of the chunk that contains the logical index.
|
134 |
+
///
|
135 |
+
/// Any non-negative index is accepted. When `hi=num_offsets`, the largest
|
136 |
+
/// possible return value is `num_offsets-1` which is equal to
|
137 |
+
/// `chunks.size()`. The is returned when the logical index is out-of-bounds.
|
138 |
+
///
|
139 |
+
/// \pre index >= 0
|
140 |
+
/// \pre lo < hi
|
141 |
+
/// \pre lo >= 0 && hi <= offsets_.size()
|
142 |
+
static inline int64_t Bisect(int64_t index, const int64_t* offsets, int64_t lo,
|
143 |
+
int64_t hi) {
|
144 |
+
// Similar to std::upper_bound(), but slightly different as our offsets
|
145 |
+
// array always starts with 0.
|
146 |
+
auto n = hi - lo;
|
147 |
+
// First iteration does not need to check for n > 1
|
148 |
+
// (lo < hi is guaranteed by the precondition).
|
149 |
+
assert(n > 1 && "lo < hi is a precondition of Bisect");
|
150 |
+
do {
|
151 |
+
const int64_t m = n >> 1;
|
152 |
+
const int64_t mid = lo + m;
|
153 |
+
if (index >= offsets[mid]) {
|
154 |
+
lo = mid;
|
155 |
+
n -= m;
|
156 |
+
} else {
|
157 |
+
n = m;
|
158 |
+
}
|
159 |
+
} while (n > 1);
|
160 |
+
return lo;
|
161 |
+
}
|
162 |
+
};
|
163 |
+
|
164 |
+
} // namespace arrow::internal
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <utility>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/chunk_resolver.h"
|
27 |
+
#include "arrow/compare.h"
|
28 |
+
#include "arrow/result.h"
|
29 |
+
#include "arrow/status.h"
|
30 |
+
#include "arrow/type_fwd.h"
|
31 |
+
#include "arrow/util/macros.h"
|
32 |
+
#include "arrow/util/visibility.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
|
36 |
+
class Array;
|
37 |
+
class DataType;
|
38 |
+
class MemoryPool;
|
39 |
+
namespace stl {
|
40 |
+
template <typename T, typename V>
|
41 |
+
class ChunkedArrayIterator;
|
42 |
+
} // namespace stl
|
43 |
+
|
44 |
+
/// \class ChunkedArray
|
45 |
+
/// \brief A data structure managing a list of primitive Arrow arrays logically
|
46 |
+
/// as one large array
|
47 |
+
///
|
48 |
+
/// Data chunking is treated throughout this project largely as an
|
49 |
+
/// implementation detail for performance and memory use optimization.
|
50 |
+
/// ChunkedArray allows Array objects to be collected and interpreted
|
51 |
+
/// as a single logical array without requiring an expensive concatenation
|
52 |
+
/// step.
|
53 |
+
///
|
54 |
+
/// In some cases, data produced by a function may exceed the capacity of an
|
55 |
+
/// Array (like BinaryArray or StringArray) and so returning multiple Arrays is
|
56 |
+
/// the only possibility. In these cases, we recommend returning a ChunkedArray
|
57 |
+
/// instead of vector of Arrays or some alternative.
|
58 |
+
///
|
59 |
+
/// When data is processed in parallel, it may not be practical or possible to
|
60 |
+
/// create large contiguous memory allocations and write output into them. With
|
61 |
+
/// some data types, like binary and string types, it is not possible at all to
|
62 |
+
/// produce non-chunked array outputs without requiring a concatenation step at
|
63 |
+
/// the end of processing.
|
64 |
+
///
|
65 |
+
/// Application developers may tune chunk sizes based on analysis of
|
66 |
+
/// performance profiles but many developer-users will not need to be
|
67 |
+
/// especially concerned with the chunking details.
|
68 |
+
///
|
69 |
+
/// Preserving the chunk layout/sizes in processing steps is generally not
|
70 |
+
/// considered to be a contract in APIs. A function may decide to alter the
|
71 |
+
/// chunking of its result. Similarly, APIs accepting multiple ChunkedArray
|
72 |
+
/// inputs should not expect the chunk layout to be the same in each input.
|
73 |
+
class ARROW_EXPORT ChunkedArray {
|
74 |
+
public:
|
75 |
+
ChunkedArray(ChunkedArray&&) = default;
|
76 |
+
ChunkedArray& operator=(ChunkedArray&&) = default;
|
77 |
+
|
78 |
+
/// \brief Construct a chunked array from a single Array
|
79 |
+
explicit ChunkedArray(std::shared_ptr<Array> chunk)
|
80 |
+
: ChunkedArray(ArrayVector{std::move(chunk)}) {}
|
81 |
+
|
82 |
+
/// \brief Construct a chunked array from a vector of arrays and an optional data type
|
83 |
+
///
|
84 |
+
/// The vector elements must have the same data type.
|
85 |
+
/// If the data type is passed explicitly, the vector may be empty.
|
86 |
+
/// If the data type is omitted, the vector must be non-empty.
|
87 |
+
explicit ChunkedArray(ArrayVector chunks, std::shared_ptr<DataType> type = NULLPTR);
|
88 |
+
|
89 |
+
// \brief Constructor with basic input validation.
|
90 |
+
static Result<std::shared_ptr<ChunkedArray>> Make(
|
91 |
+
ArrayVector chunks, std::shared_ptr<DataType> type = NULLPTR);
|
92 |
+
|
93 |
+
/// \brief Create an empty ChunkedArray of a given type
|
94 |
+
///
|
95 |
+
/// The output ChunkedArray will have one chunk with an empty
|
96 |
+
/// array of the given type.
|
97 |
+
///
|
98 |
+
/// \param[in] type the data type of the empty ChunkedArray
|
99 |
+
/// \param[in] pool the memory pool to allocate memory from
|
100 |
+
/// \return the resulting ChunkedArray
|
101 |
+
static Result<std::shared_ptr<ChunkedArray>> MakeEmpty(
|
102 |
+
std::shared_ptr<DataType> type, MemoryPool* pool = default_memory_pool());
|
103 |
+
|
104 |
+
/// \return the total length of the chunked array; computed on construction
|
105 |
+
int64_t length() const { return length_; }
|
106 |
+
|
107 |
+
/// \return the total number of nulls among all chunks
|
108 |
+
int64_t null_count() const { return null_count_; }
|
109 |
+
|
110 |
+
/// \return the total number of chunks in the chunked array
|
111 |
+
int num_chunks() const { return static_cast<int>(chunks_.size()); }
|
112 |
+
|
113 |
+
/// \return chunk a particular chunk from the chunked array
|
114 |
+
const std::shared_ptr<Array>& chunk(int i) const { return chunks_[i]; }
|
115 |
+
|
116 |
+
/// \return an ArrayVector of chunks
|
117 |
+
const ArrayVector& chunks() const { return chunks_; }
|
118 |
+
|
119 |
+
/// \brief Construct a zero-copy slice of the chunked array with the
|
120 |
+
/// indicated offset and length
|
121 |
+
///
|
122 |
+
/// \param[in] offset the position of the first element in the constructed
|
123 |
+
/// slice
|
124 |
+
/// \param[in] length the length of the slice. If there are not enough
|
125 |
+
/// elements in the chunked array, the length will be adjusted accordingly
|
126 |
+
///
|
127 |
+
/// \return a new object wrapped in std::shared_ptr<ChunkedArray>
|
128 |
+
std::shared_ptr<ChunkedArray> Slice(int64_t offset, int64_t length) const;
|
129 |
+
|
130 |
+
/// \brief Slice from offset until end of the chunked array
|
131 |
+
std::shared_ptr<ChunkedArray> Slice(int64_t offset) const;
|
132 |
+
|
133 |
+
/// \brief Flatten this chunked array as a vector of chunked arrays, one
|
134 |
+
/// for each struct field
|
135 |
+
///
|
136 |
+
/// \param[in] pool The pool for buffer allocations, if any
|
137 |
+
Result<std::vector<std::shared_ptr<ChunkedArray>>> Flatten(
|
138 |
+
MemoryPool* pool = default_memory_pool()) const;
|
139 |
+
|
140 |
+
/// Construct a zero-copy view of this chunked array with the given
|
141 |
+
/// type. Calls Array::View on each constituent chunk. Always succeeds if
|
142 |
+
/// there are zero chunks
|
143 |
+
Result<std::shared_ptr<ChunkedArray>> View(const std::shared_ptr<DataType>& type) const;
|
144 |
+
|
145 |
+
/// \brief Return the type of the chunked array
|
146 |
+
const std::shared_ptr<DataType>& type() const { return type_; }
|
147 |
+
|
148 |
+
/// \brief Return a Scalar containing the value of this array at index
|
149 |
+
Result<std::shared_ptr<Scalar>> GetScalar(int64_t index) const;
|
150 |
+
|
151 |
+
/// \brief Determine if two chunked arrays are equal.
|
152 |
+
///
|
153 |
+
/// Two chunked arrays can be equal only if they have equal datatypes.
|
154 |
+
/// However, they may be equal even if they have different chunkings.
|
155 |
+
bool Equals(const ChunkedArray& other,
|
156 |
+
const EqualOptions& opts = EqualOptions::Defaults()) const;
|
157 |
+
/// \brief Determine if two chunked arrays are equal.
|
158 |
+
bool Equals(const std::shared_ptr<ChunkedArray>& other,
|
159 |
+
const EqualOptions& opts = EqualOptions::Defaults()) const;
|
160 |
+
/// \brief Determine if two chunked arrays approximately equal
|
161 |
+
bool ApproxEquals(const ChunkedArray& other,
|
162 |
+
const EqualOptions& = EqualOptions::Defaults()) const;
|
163 |
+
|
164 |
+
/// \return PrettyPrint representation suitable for debugging
|
165 |
+
std::string ToString() const;
|
166 |
+
|
167 |
+
/// \brief Perform cheap validation checks to determine obvious inconsistencies
|
168 |
+
/// within the chunk array's internal data.
|
169 |
+
///
|
170 |
+
/// This is O(k*m) where k is the number of array descendents,
|
171 |
+
/// and m is the number of chunks.
|
172 |
+
///
|
173 |
+
/// \return Status
|
174 |
+
Status Validate() const;
|
175 |
+
|
176 |
+
/// \brief Perform extensive validation checks to determine inconsistencies
|
177 |
+
/// within the chunk array's internal data.
|
178 |
+
///
|
179 |
+
/// This is O(k*n) where k is the number of array descendents,
|
180 |
+
/// and n is the length in elements.
|
181 |
+
///
|
182 |
+
/// \return Status
|
183 |
+
Status ValidateFull() const;
|
184 |
+
|
185 |
+
protected:
|
186 |
+
ArrayVector chunks_;
|
187 |
+
std::shared_ptr<DataType> type_;
|
188 |
+
int64_t length_;
|
189 |
+
int64_t null_count_;
|
190 |
+
|
191 |
+
private:
|
192 |
+
template <typename T, typename V>
|
193 |
+
friend class ::arrow::stl::ChunkedArrayIterator;
|
194 |
+
internal::ChunkResolver chunk_resolver_;
|
195 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(ChunkedArray);
|
196 |
+
};
|
197 |
+
|
198 |
+
namespace internal {
|
199 |
+
|
200 |
+
/// \brief EXPERIMENTAL: Utility for incremental iteration over contiguous
|
201 |
+
/// pieces of potentially differently-chunked ChunkedArray objects
|
202 |
+
class ARROW_EXPORT MultipleChunkIterator {
|
203 |
+
public:
|
204 |
+
MultipleChunkIterator(const ChunkedArray& left, const ChunkedArray& right)
|
205 |
+
: left_(left),
|
206 |
+
right_(right),
|
207 |
+
pos_(0),
|
208 |
+
length_(left.length()),
|
209 |
+
chunk_idx_left_(0),
|
210 |
+
chunk_idx_right_(0),
|
211 |
+
chunk_pos_left_(0),
|
212 |
+
chunk_pos_right_(0) {}
|
213 |
+
|
214 |
+
bool Next(std::shared_ptr<Array>* next_left, std::shared_ptr<Array>* next_right);
|
215 |
+
|
216 |
+
int64_t position() const { return pos_; }
|
217 |
+
|
218 |
+
private:
|
219 |
+
const ChunkedArray& left_;
|
220 |
+
const ChunkedArray& right_;
|
221 |
+
|
222 |
+
// The amount of the entire ChunkedArray consumed
|
223 |
+
int64_t pos_;
|
224 |
+
|
225 |
+
// Length of the chunked array(s)
|
226 |
+
int64_t length_;
|
227 |
+
|
228 |
+
// Current left chunk
|
229 |
+
int chunk_idx_left_;
|
230 |
+
|
231 |
+
// Current right chunk
|
232 |
+
int chunk_idx_right_;
|
233 |
+
|
234 |
+
// Offset into the current left chunk
|
235 |
+
int64_t chunk_pos_left_;
|
236 |
+
|
237 |
+
// Offset into the current right chunk
|
238 |
+
int64_t chunk_pos_right_;
|
239 |
+
};
|
240 |
+
|
241 |
+
/// \brief Evaluate binary function on two ChunkedArray objects having possibly
|
242 |
+
/// different chunk layouts. The passed binary function / functor should have
|
243 |
+
/// the following signature.
|
244 |
+
///
|
245 |
+
/// Status(const Array&, const Array&, int64_t)
|
246 |
+
///
|
247 |
+
/// The third argument is the absolute position relative to the start of each
|
248 |
+
/// ChunkedArray. The function is executed against each contiguous pair of
|
249 |
+
/// array segments, slicing if necessary.
|
250 |
+
///
|
251 |
+
/// For example, if two arrays have chunk sizes
|
252 |
+
///
|
253 |
+
/// left: [10, 10, 20]
|
254 |
+
/// right: [15, 10, 15]
|
255 |
+
///
|
256 |
+
/// Then the following invocations take place (pseudocode)
|
257 |
+
///
|
258 |
+
/// func(left.chunk[0][0:10], right.chunk[0][0:10], 0)
|
259 |
+
/// func(left.chunk[1][0:5], right.chunk[0][10:15], 10)
|
260 |
+
/// func(left.chunk[1][5:10], right.chunk[1][0:5], 15)
|
261 |
+
/// func(left.chunk[2][0:5], right.chunk[1][5:10], 20)
|
262 |
+
/// func(left.chunk[2][5:20], right.chunk[2][:], 25)
|
263 |
+
template <typename Action>
|
264 |
+
Status ApplyBinaryChunked(const ChunkedArray& left, const ChunkedArray& right,
|
265 |
+
Action&& action) {
|
266 |
+
MultipleChunkIterator iterator(left, right);
|
267 |
+
std::shared_ptr<Array> left_piece, right_piece;
|
268 |
+
while (iterator.Next(&left_piece, &right_piece)) {
|
269 |
+
ARROW_RETURN_NOT_OK(action(*left_piece, *right_piece, iterator.position()));
|
270 |
+
}
|
271 |
+
return Status::OK();
|
272 |
+
}
|
273 |
+
|
274 |
+
} // namespace internal
|
275 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Functions for comparing Arrow data structures
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <cstdint>
|
23 |
+
#include <iosfwd>
|
24 |
+
|
25 |
+
#include "arrow/util/macros.h"
|
26 |
+
#include "arrow/util/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
|
30 |
+
class Array;
|
31 |
+
class DataType;
|
32 |
+
class Tensor;
|
33 |
+
class SparseTensor;
|
34 |
+
struct Scalar;
|
35 |
+
|
36 |
+
static constexpr double kDefaultAbsoluteTolerance = 1E-5;
|
37 |
+
|
38 |
+
/// A container of options for equality comparisons
|
39 |
+
class EqualOptions {
|
40 |
+
public:
|
41 |
+
/// Whether or not NaNs are considered equal.
|
42 |
+
bool nans_equal() const { return nans_equal_; }
|
43 |
+
|
44 |
+
/// Return a new EqualOptions object with the "nans_equal" property changed.
|
45 |
+
EqualOptions nans_equal(bool v) const {
|
46 |
+
auto res = EqualOptions(*this);
|
47 |
+
res.nans_equal_ = v;
|
48 |
+
return res;
|
49 |
+
}
|
50 |
+
|
51 |
+
/// Whether or not zeros with differing signs are considered equal.
|
52 |
+
bool signed_zeros_equal() const { return signed_zeros_equal_; }
|
53 |
+
|
54 |
+
/// Return a new EqualOptions object with the "signed_zeros_equal" property changed.
|
55 |
+
EqualOptions signed_zeros_equal(bool v) const {
|
56 |
+
auto res = EqualOptions(*this);
|
57 |
+
res.signed_zeros_equal_ = v;
|
58 |
+
return res;
|
59 |
+
}
|
60 |
+
|
61 |
+
/// The absolute tolerance for approximate comparisons of floating-point values.
|
62 |
+
double atol() const { return atol_; }
|
63 |
+
|
64 |
+
/// Return a new EqualOptions object with the "atol" property changed.
|
65 |
+
EqualOptions atol(double v) const {
|
66 |
+
auto res = EqualOptions(*this);
|
67 |
+
res.atol_ = v;
|
68 |
+
return res;
|
69 |
+
}
|
70 |
+
|
71 |
+
/// The ostream to which a diff will be formatted if arrays disagree.
|
72 |
+
/// If this is null (the default) no diff will be formatted.
|
73 |
+
std::ostream* diff_sink() const { return diff_sink_; }
|
74 |
+
|
75 |
+
/// Return a new EqualOptions object with the "diff_sink" property changed.
|
76 |
+
/// This option will be ignored if diff formatting of the types of compared arrays is
|
77 |
+
/// not supported.
|
78 |
+
EqualOptions diff_sink(std::ostream* diff_sink) const {
|
79 |
+
auto res = EqualOptions(*this);
|
80 |
+
res.diff_sink_ = diff_sink;
|
81 |
+
return res;
|
82 |
+
}
|
83 |
+
|
84 |
+
static EqualOptions Defaults() { return {}; }
|
85 |
+
|
86 |
+
protected:
|
87 |
+
double atol_ = kDefaultAbsoluteTolerance;
|
88 |
+
bool nans_equal_ = false;
|
89 |
+
bool signed_zeros_equal_ = true;
|
90 |
+
|
91 |
+
std::ostream* diff_sink_ = NULLPTR;
|
92 |
+
};
|
93 |
+
|
94 |
+
/// Returns true if the arrays are exactly equal
|
95 |
+
ARROW_EXPORT bool ArrayEquals(const Array& left, const Array& right,
|
96 |
+
const EqualOptions& = EqualOptions::Defaults());
|
97 |
+
|
98 |
+
/// Returns true if the arrays are approximately equal. For non-floating point
|
99 |
+
/// types, this is equivalent to ArrayEquals(left, right)
|
100 |
+
ARROW_EXPORT bool ArrayApproxEquals(const Array& left, const Array& right,
|
101 |
+
const EqualOptions& = EqualOptions::Defaults());
|
102 |
+
|
103 |
+
/// Returns true if indicated equal-length segment of arrays are exactly equal
|
104 |
+
ARROW_EXPORT bool ArrayRangeEquals(const Array& left, const Array& right,
|
105 |
+
int64_t start_idx, int64_t end_idx,
|
106 |
+
int64_t other_start_idx,
|
107 |
+
const EqualOptions& = EqualOptions::Defaults());
|
108 |
+
|
109 |
+
/// Returns true if indicated equal-length segment of arrays are approximately equal
|
110 |
+
ARROW_EXPORT bool ArrayRangeApproxEquals(const Array& left, const Array& right,
|
111 |
+
int64_t start_idx, int64_t end_idx,
|
112 |
+
int64_t other_start_idx,
|
113 |
+
const EqualOptions& = EqualOptions::Defaults());
|
114 |
+
|
115 |
+
ARROW_EXPORT bool TensorEquals(const Tensor& left, const Tensor& right,
|
116 |
+
const EqualOptions& = EqualOptions::Defaults());
|
117 |
+
|
118 |
+
/// EXPERIMENTAL: Returns true if the given sparse tensors are exactly equal
|
119 |
+
ARROW_EXPORT bool SparseTensorEquals(const SparseTensor& left, const SparseTensor& right,
|
120 |
+
const EqualOptions& = EqualOptions::Defaults());
|
121 |
+
|
122 |
+
/// Returns true if the type metadata are exactly equal
|
123 |
+
/// \param[in] left a DataType
|
124 |
+
/// \param[in] right a DataType
|
125 |
+
/// \param[in] check_metadata whether to compare KeyValueMetadata for child
|
126 |
+
/// fields
|
127 |
+
ARROW_EXPORT bool TypeEquals(const DataType& left, const DataType& right,
|
128 |
+
bool check_metadata = true);
|
129 |
+
|
130 |
+
/// Returns true if scalars are equal
|
131 |
+
/// \param[in] left a Scalar
|
132 |
+
/// \param[in] right a Scalar
|
133 |
+
/// \param[in] options comparison options
|
134 |
+
ARROW_EXPORT bool ScalarEquals(const Scalar& left, const Scalar& right,
|
135 |
+
const EqualOptions& options = EqualOptions::Defaults());
|
136 |
+
|
137 |
+
/// Returns true if scalars are approximately equal
|
138 |
+
/// \param[in] left a Scalar
|
139 |
+
/// \param[in] right a Scalar
|
140 |
+
/// \param[in] options comparison options
|
141 |
+
ARROW_EXPORT bool ScalarApproxEquals(
|
142 |
+
const Scalar& left, const Scalar& right,
|
143 |
+
const EqualOptions& options = EqualOptions::Defaults());
|
144 |
+
|
145 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h
ADDED
@@ -0,0 +1,1717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Eager evaluation convenience APIs for invoking common functions, including
|
19 |
+
// necessary memory allocations
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <optional>
|
24 |
+
#include <string>
|
25 |
+
#include <utility>
|
26 |
+
|
27 |
+
#include "arrow/compute/function_options.h"
|
28 |
+
#include "arrow/compute/type_fwd.h"
|
29 |
+
#include "arrow/datum.h"
|
30 |
+
#include "arrow/result.h"
|
31 |
+
#include "arrow/util/macros.h"
|
32 |
+
#include "arrow/util/visibility.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
namespace compute {
|
36 |
+
|
37 |
+
/// \addtogroup compute-concrete-options
|
38 |
+
///
|
39 |
+
/// @{
|
40 |
+
|
41 |
+
class ARROW_EXPORT ArithmeticOptions : public FunctionOptions {
|
42 |
+
public:
|
43 |
+
explicit ArithmeticOptions(bool check_overflow = false);
|
44 |
+
static constexpr char const kTypeName[] = "ArithmeticOptions";
|
45 |
+
bool check_overflow;
|
46 |
+
};
|
47 |
+
|
48 |
+
class ARROW_EXPORT ElementWiseAggregateOptions : public FunctionOptions {
|
49 |
+
public:
|
50 |
+
explicit ElementWiseAggregateOptions(bool skip_nulls = true);
|
51 |
+
static constexpr char const kTypeName[] = "ElementWiseAggregateOptions";
|
52 |
+
static ElementWiseAggregateOptions Defaults() { return ElementWiseAggregateOptions{}; }
|
53 |
+
bool skip_nulls;
|
54 |
+
};
|
55 |
+
|
56 |
+
/// Rounding and tie-breaking modes for round compute functions.
|
57 |
+
/// Additional details and examples are provided in compute.rst.
|
58 |
+
enum class RoundMode : int8_t {
|
59 |
+
/// Round to nearest integer less than or equal in magnitude (aka "floor")
|
60 |
+
DOWN,
|
61 |
+
/// Round to nearest integer greater than or equal in magnitude (aka "ceil")
|
62 |
+
UP,
|
63 |
+
/// Get the integral part without fractional digits (aka "trunc")
|
64 |
+
TOWARDS_ZERO,
|
65 |
+
/// Round negative values with DOWN rule
|
66 |
+
/// and positive values with UP rule (aka "away from zero")
|
67 |
+
TOWARDS_INFINITY,
|
68 |
+
/// Round ties with DOWN rule (also called "round half towards negative infinity")
|
69 |
+
HALF_DOWN,
|
70 |
+
/// Round ties with UP rule (also called "round half towards positive infinity")
|
71 |
+
HALF_UP,
|
72 |
+
/// Round ties with TOWARDS_ZERO rule (also called "round half away from infinity")
|
73 |
+
HALF_TOWARDS_ZERO,
|
74 |
+
/// Round ties with TOWARDS_INFINITY rule (also called "round half away from zero")
|
75 |
+
HALF_TOWARDS_INFINITY,
|
76 |
+
/// Round ties to nearest even integer
|
77 |
+
HALF_TO_EVEN,
|
78 |
+
/// Round ties to nearest odd integer
|
79 |
+
HALF_TO_ODD,
|
80 |
+
};
|
81 |
+
|
82 |
+
class ARROW_EXPORT RoundOptions : public FunctionOptions {
|
83 |
+
public:
|
84 |
+
explicit RoundOptions(int64_t ndigits = 0,
|
85 |
+
RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
86 |
+
static constexpr char const kTypeName[] = "RoundOptions";
|
87 |
+
static RoundOptions Defaults() { return RoundOptions(); }
|
88 |
+
/// Rounding precision (number of digits to round to)
|
89 |
+
int64_t ndigits;
|
90 |
+
/// Rounding and tie-breaking mode
|
91 |
+
RoundMode round_mode;
|
92 |
+
};
|
93 |
+
|
94 |
+
class ARROW_EXPORT RoundBinaryOptions : public FunctionOptions {
|
95 |
+
public:
|
96 |
+
explicit RoundBinaryOptions(RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
97 |
+
static constexpr char const kTypeName[] = "RoundBinaryOptions";
|
98 |
+
static RoundBinaryOptions Defaults() { return RoundBinaryOptions(); }
|
99 |
+
/// Rounding and tie-breaking mode
|
100 |
+
RoundMode round_mode;
|
101 |
+
};
|
102 |
+
|
103 |
+
enum class CalendarUnit : int8_t {
|
104 |
+
NANOSECOND,
|
105 |
+
MICROSECOND,
|
106 |
+
MILLISECOND,
|
107 |
+
SECOND,
|
108 |
+
MINUTE,
|
109 |
+
HOUR,
|
110 |
+
DAY,
|
111 |
+
WEEK,
|
112 |
+
MONTH,
|
113 |
+
QUARTER,
|
114 |
+
YEAR
|
115 |
+
};
|
116 |
+
|
117 |
+
class ARROW_EXPORT RoundTemporalOptions : public FunctionOptions {
|
118 |
+
public:
|
119 |
+
explicit RoundTemporalOptions(int multiple = 1, CalendarUnit unit = CalendarUnit::DAY,
|
120 |
+
bool week_starts_monday = true,
|
121 |
+
bool ceil_is_strictly_greater = false,
|
122 |
+
bool calendar_based_origin = false);
|
123 |
+
static constexpr char const kTypeName[] = "RoundTemporalOptions";
|
124 |
+
static RoundTemporalOptions Defaults() { return RoundTemporalOptions(); }
|
125 |
+
|
126 |
+
/// Number of units to round to
|
127 |
+
int multiple;
|
128 |
+
/// The unit used for rounding of time
|
129 |
+
CalendarUnit unit;
|
130 |
+
/// What day does the week start with (Monday=true, Sunday=false)
|
131 |
+
bool week_starts_monday;
|
132 |
+
/// Enable this flag to return a rounded value that is strictly greater than the input.
|
133 |
+
/// For example: ceiling 1970-01-01T00:00:00 to 3 hours would yield 1970-01-01T03:00:00
|
134 |
+
/// if set to true and 1970-01-01T00:00:00 if set to false.
|
135 |
+
/// This applies for ceiling only.
|
136 |
+
bool ceil_is_strictly_greater;
|
137 |
+
/// By default time is rounded to a multiple of units since 1970-01-01T00:00:00.
|
138 |
+
/// By setting calendar_based_origin to true, time will be rounded to a number
|
139 |
+
/// of units since the last greater calendar unit.
|
140 |
+
/// For example: rounding to a multiple of days since the beginning of the month or
|
141 |
+
/// to hours since the beginning of the day.
|
142 |
+
/// Exceptions: week and quarter are not used as greater units, therefore days will
|
143 |
+
/// will be rounded to the beginning of the month not week. Greater unit of week
|
144 |
+
/// is year.
|
145 |
+
/// Note that ceiling and rounding might change sorting order of an array near greater
|
146 |
+
/// unit change. For example rounding YYYY-mm-dd 23:00:00 to 5 hours will ceil and
|
147 |
+
/// round to YYYY-mm-dd+1 01:00:00 and floor to YYYY-mm-dd 20:00:00. On the other hand
|
148 |
+
/// YYYY-mm-dd+1 00:00:00 will ceil, round and floor to YYYY-mm-dd+1 00:00:00. This
|
149 |
+
/// can break the order of an already ordered array.
|
150 |
+
bool calendar_based_origin;
|
151 |
+
};
|
152 |
+
|
153 |
+
class ARROW_EXPORT RoundToMultipleOptions : public FunctionOptions {
|
154 |
+
public:
|
155 |
+
explicit RoundToMultipleOptions(double multiple = 1.0,
|
156 |
+
RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
157 |
+
explicit RoundToMultipleOptions(std::shared_ptr<Scalar> multiple,
|
158 |
+
RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
159 |
+
static constexpr char const kTypeName[] = "RoundToMultipleOptions";
|
160 |
+
static RoundToMultipleOptions Defaults() { return RoundToMultipleOptions(); }
|
161 |
+
/// Rounding scale (multiple to round to).
|
162 |
+
///
|
163 |
+
/// Should be a positive numeric scalar of a type compatible with the
|
164 |
+
/// argument to be rounded. The cast kernel is used to convert the rounding
|
165 |
+
/// multiple to match the result type.
|
166 |
+
std::shared_ptr<Scalar> multiple;
|
167 |
+
/// Rounding and tie-breaking mode
|
168 |
+
RoundMode round_mode;
|
169 |
+
};
|
170 |
+
|
171 |
+
/// Options for var_args_join.
|
172 |
+
class ARROW_EXPORT JoinOptions : public FunctionOptions {
|
173 |
+
public:
|
174 |
+
/// How to handle null values. (A null separator always results in a null output.)
|
175 |
+
enum NullHandlingBehavior {
|
176 |
+
/// A null in any input results in a null in the output.
|
177 |
+
EMIT_NULL,
|
178 |
+
/// Nulls in inputs are skipped.
|
179 |
+
SKIP,
|
180 |
+
/// Nulls in inputs are replaced with the replacement string.
|
181 |
+
REPLACE,
|
182 |
+
};
|
183 |
+
explicit JoinOptions(NullHandlingBehavior null_handling = EMIT_NULL,
|
184 |
+
std::string null_replacement = "");
|
185 |
+
static constexpr char const kTypeName[] = "JoinOptions";
|
186 |
+
static JoinOptions Defaults() { return JoinOptions(); }
|
187 |
+
NullHandlingBehavior null_handling;
|
188 |
+
std::string null_replacement;
|
189 |
+
};
|
190 |
+
|
191 |
+
class ARROW_EXPORT MatchSubstringOptions : public FunctionOptions {
|
192 |
+
public:
|
193 |
+
explicit MatchSubstringOptions(std::string pattern, bool ignore_case = false);
|
194 |
+
MatchSubstringOptions();
|
195 |
+
static constexpr char const kTypeName[] = "MatchSubstringOptions";
|
196 |
+
|
197 |
+
/// The exact substring (or regex, depending on kernel) to look for inside input values.
|
198 |
+
std::string pattern;
|
199 |
+
/// Whether to perform a case-insensitive match.
|
200 |
+
bool ignore_case;
|
201 |
+
};
|
202 |
+
|
203 |
+
class ARROW_EXPORT SplitOptions : public FunctionOptions {
|
204 |
+
public:
|
205 |
+
explicit SplitOptions(int64_t max_splits = -1, bool reverse = false);
|
206 |
+
static constexpr char const kTypeName[] = "SplitOptions";
|
207 |
+
|
208 |
+
/// Maximum number of splits allowed, or unlimited when -1
|
209 |
+
int64_t max_splits;
|
210 |
+
/// Start splitting from the end of the string (only relevant when max_splits != -1)
|
211 |
+
bool reverse;
|
212 |
+
};
|
213 |
+
|
214 |
+
class ARROW_EXPORT SplitPatternOptions : public FunctionOptions {
|
215 |
+
public:
|
216 |
+
explicit SplitPatternOptions(std::string pattern, int64_t max_splits = -1,
|
217 |
+
bool reverse = false);
|
218 |
+
SplitPatternOptions();
|
219 |
+
static constexpr char const kTypeName[] = "SplitPatternOptions";
|
220 |
+
|
221 |
+
/// The exact substring to split on.
|
222 |
+
std::string pattern;
|
223 |
+
/// Maximum number of splits allowed, or unlimited when -1
|
224 |
+
int64_t max_splits;
|
225 |
+
/// Start splitting from the end of the string (only relevant when max_splits != -1)
|
226 |
+
bool reverse;
|
227 |
+
};
|
228 |
+
|
229 |
+
class ARROW_EXPORT ReplaceSliceOptions : public FunctionOptions {
|
230 |
+
public:
|
231 |
+
explicit ReplaceSliceOptions(int64_t start, int64_t stop, std::string replacement);
|
232 |
+
ReplaceSliceOptions();
|
233 |
+
static constexpr char const kTypeName[] = "ReplaceSliceOptions";
|
234 |
+
|
235 |
+
/// Index to start slicing at
|
236 |
+
int64_t start;
|
237 |
+
/// Index to stop slicing at
|
238 |
+
int64_t stop;
|
239 |
+
/// String to replace the slice with
|
240 |
+
std::string replacement;
|
241 |
+
};
|
242 |
+
|
243 |
+
class ARROW_EXPORT ReplaceSubstringOptions : public FunctionOptions {
|
244 |
+
public:
|
245 |
+
explicit ReplaceSubstringOptions(std::string pattern, std::string replacement,
|
246 |
+
int64_t max_replacements = -1);
|
247 |
+
ReplaceSubstringOptions();
|
248 |
+
static constexpr char const kTypeName[] = "ReplaceSubstringOptions";
|
249 |
+
|
250 |
+
/// Pattern to match, literal, or regular expression depending on which kernel is used
|
251 |
+
std::string pattern;
|
252 |
+
/// String to replace the pattern with
|
253 |
+
std::string replacement;
|
254 |
+
/// Max number of substrings to replace (-1 means unbounded)
|
255 |
+
int64_t max_replacements;
|
256 |
+
};
|
257 |
+
|
258 |
+
class ARROW_EXPORT ExtractRegexOptions : public FunctionOptions {
|
259 |
+
public:
|
260 |
+
explicit ExtractRegexOptions(std::string pattern);
|
261 |
+
ExtractRegexOptions();
|
262 |
+
static constexpr char const kTypeName[] = "ExtractRegexOptions";
|
263 |
+
|
264 |
+
/// Regular expression with named capture fields
|
265 |
+
std::string pattern;
|
266 |
+
};
|
267 |
+
|
268 |
+
/// Options for IsIn and IndexIn functions
|
269 |
+
class ARROW_EXPORT SetLookupOptions : public FunctionOptions {
|
270 |
+
public:
|
271 |
+
/// How to handle null values.
|
272 |
+
enum NullMatchingBehavior {
|
273 |
+
/// MATCH, any null in `value_set` is successfully matched in
|
274 |
+
/// the input.
|
275 |
+
MATCH,
|
276 |
+
/// SKIP, any null in `value_set` is ignored and nulls in the input
|
277 |
+
/// produce null (IndexIn) or false (IsIn) values in the output.
|
278 |
+
SKIP,
|
279 |
+
/// EMIT_NULL, any null in `value_set` is ignored and nulls in the
|
280 |
+
/// input produce null (IndexIn and IsIn) values in the output.
|
281 |
+
EMIT_NULL,
|
282 |
+
/// INCONCLUSIVE, null values are regarded as unknown values, which is
|
283 |
+
/// sql-compatible. nulls in the input produce null (IndexIn and IsIn)
|
284 |
+
/// values in the output. Besides, if `value_set` contains a null,
|
285 |
+
/// non-null unmatched values in the input also produce null values
|
286 |
+
/// (IndexIn and IsIn) in the output.
|
287 |
+
INCONCLUSIVE
|
288 |
+
};
|
289 |
+
|
290 |
+
explicit SetLookupOptions(Datum value_set, NullMatchingBehavior = MATCH);
|
291 |
+
SetLookupOptions();
|
292 |
+
|
293 |
+
// DEPRECATED(will be removed after removing of skip_nulls)
|
294 |
+
explicit SetLookupOptions(Datum value_set, bool skip_nulls);
|
295 |
+
|
296 |
+
static constexpr char const kTypeName[] = "SetLookupOptions";
|
297 |
+
|
298 |
+
/// The set of values to look up input values into.
|
299 |
+
Datum value_set;
|
300 |
+
|
301 |
+
NullMatchingBehavior null_matching_behavior;
|
302 |
+
|
303 |
+
// DEPRECATED(will be removed after removing of skip_nulls)
|
304 |
+
NullMatchingBehavior GetNullMatchingBehavior() const;
|
305 |
+
|
306 |
+
// DEPRECATED(use null_matching_behavior instead)
|
307 |
+
/// Whether nulls in `value_set` count for lookup.
|
308 |
+
///
|
309 |
+
/// If true, any null in `value_set` is ignored and nulls in the input
|
310 |
+
/// produce null (IndexIn) or false (IsIn) values in the output.
|
311 |
+
/// If false, any null in `value_set` is successfully matched in
|
312 |
+
/// the input.
|
313 |
+
std::optional<bool> skip_nulls;
|
314 |
+
};
|
315 |
+
|
316 |
+
/// Options for struct_field function
|
317 |
+
class ARROW_EXPORT StructFieldOptions : public FunctionOptions {
|
318 |
+
public:
|
319 |
+
explicit StructFieldOptions(std::vector<int> indices);
|
320 |
+
explicit StructFieldOptions(std::initializer_list<int>);
|
321 |
+
explicit StructFieldOptions(FieldRef field_ref);
|
322 |
+
StructFieldOptions();
|
323 |
+
static constexpr char const kTypeName[] = "StructFieldOptions";
|
324 |
+
|
325 |
+
/// The FieldRef specifying what to extract from struct or union.
|
326 |
+
FieldRef field_ref;
|
327 |
+
};
|
328 |
+
|
329 |
+
class ARROW_EXPORT StrptimeOptions : public FunctionOptions {
|
330 |
+
public:
|
331 |
+
explicit StrptimeOptions(std::string format, TimeUnit::type unit,
|
332 |
+
bool error_is_null = false);
|
333 |
+
StrptimeOptions();
|
334 |
+
static constexpr char const kTypeName[] = "StrptimeOptions";
|
335 |
+
|
336 |
+
/// The desired format string.
|
337 |
+
std::string format;
|
338 |
+
/// The desired time resolution
|
339 |
+
TimeUnit::type unit;
|
340 |
+
/// Return null on parsing errors if true or raise if false
|
341 |
+
bool error_is_null;
|
342 |
+
};
|
343 |
+
|
344 |
+
class ARROW_EXPORT StrftimeOptions : public FunctionOptions {
|
345 |
+
public:
|
346 |
+
explicit StrftimeOptions(std::string format, std::string locale = "C");
|
347 |
+
StrftimeOptions();
|
348 |
+
|
349 |
+
static constexpr char const kTypeName[] = "StrftimeOptions";
|
350 |
+
|
351 |
+
static constexpr const char* kDefaultFormat = "%Y-%m-%dT%H:%M:%S";
|
352 |
+
|
353 |
+
/// The desired format string.
|
354 |
+
std::string format;
|
355 |
+
/// The desired output locale string.
|
356 |
+
std::string locale;
|
357 |
+
};
|
358 |
+
|
359 |
+
class ARROW_EXPORT PadOptions : public FunctionOptions {
|
360 |
+
public:
|
361 |
+
explicit PadOptions(int64_t width, std::string padding = " ");
|
362 |
+
PadOptions();
|
363 |
+
static constexpr char const kTypeName[] = "PadOptions";
|
364 |
+
|
365 |
+
/// The desired string length.
|
366 |
+
int64_t width;
|
367 |
+
/// What to pad the string with. Should be one codepoint (Unicode)/byte (ASCII).
|
368 |
+
std::string padding;
|
369 |
+
};
|
370 |
+
|
371 |
+
class ARROW_EXPORT TrimOptions : public FunctionOptions {
|
372 |
+
public:
|
373 |
+
explicit TrimOptions(std::string characters);
|
374 |
+
TrimOptions();
|
375 |
+
static constexpr char const kTypeName[] = "TrimOptions";
|
376 |
+
|
377 |
+
/// The individual characters to be trimmed from the string.
|
378 |
+
std::string characters;
|
379 |
+
};
|
380 |
+
|
381 |
+
class ARROW_EXPORT SliceOptions : public FunctionOptions {
|
382 |
+
public:
|
383 |
+
explicit SliceOptions(int64_t start, int64_t stop = std::numeric_limits<int64_t>::max(),
|
384 |
+
int64_t step = 1);
|
385 |
+
SliceOptions();
|
386 |
+
static constexpr char const kTypeName[] = "SliceOptions";
|
387 |
+
int64_t start, stop, step;
|
388 |
+
};
|
389 |
+
|
390 |
+
class ARROW_EXPORT ListSliceOptions : public FunctionOptions {
|
391 |
+
public:
|
392 |
+
explicit ListSliceOptions(int64_t start, std::optional<int64_t> stop = std::nullopt,
|
393 |
+
int64_t step = 1,
|
394 |
+
std::optional<bool> return_fixed_size_list = std::nullopt);
|
395 |
+
ListSliceOptions();
|
396 |
+
static constexpr char const kTypeName[] = "ListSliceOptions";
|
397 |
+
/// The start of list slicing.
|
398 |
+
int64_t start;
|
399 |
+
/// Optional stop of list slicing. If not set, then slice to end. (NotImplemented)
|
400 |
+
std::optional<int64_t> stop;
|
401 |
+
/// Slicing step
|
402 |
+
int64_t step;
|
403 |
+
// Whether to return a FixedSizeListArray. If true _and_ stop is after
|
404 |
+
// a list element's length, nulls will be appended to create the requested slice size.
|
405 |
+
// Default of `nullopt` will return whatever type it got in.
|
406 |
+
std::optional<bool> return_fixed_size_list;
|
407 |
+
};
|
408 |
+
|
409 |
+
class ARROW_EXPORT NullOptions : public FunctionOptions {
|
410 |
+
public:
|
411 |
+
explicit NullOptions(bool nan_is_null = false);
|
412 |
+
static constexpr char const kTypeName[] = "NullOptions";
|
413 |
+
static NullOptions Defaults() { return NullOptions{}; }
|
414 |
+
|
415 |
+
bool nan_is_null;
|
416 |
+
};
|
417 |
+
|
418 |
+
enum CompareOperator : int8_t {
|
419 |
+
EQUAL,
|
420 |
+
NOT_EQUAL,
|
421 |
+
GREATER,
|
422 |
+
GREATER_EQUAL,
|
423 |
+
LESS,
|
424 |
+
LESS_EQUAL,
|
425 |
+
};
|
426 |
+
|
427 |
+
struct ARROW_EXPORT CompareOptions {
|
428 |
+
explicit CompareOptions(CompareOperator op) : op(op) {}
|
429 |
+
CompareOptions() : CompareOptions(CompareOperator::EQUAL) {}
|
430 |
+
enum CompareOperator op;
|
431 |
+
};
|
432 |
+
|
433 |
+
class ARROW_EXPORT MakeStructOptions : public FunctionOptions {
|
434 |
+
public:
|
435 |
+
MakeStructOptions(std::vector<std::string> n, std::vector<bool> r,
|
436 |
+
std::vector<std::shared_ptr<const KeyValueMetadata>> m);
|
437 |
+
explicit MakeStructOptions(std::vector<std::string> n);
|
438 |
+
MakeStructOptions();
|
439 |
+
static constexpr char const kTypeName[] = "MakeStructOptions";
|
440 |
+
|
441 |
+
/// Names for wrapped columns
|
442 |
+
std::vector<std::string> field_names;
|
443 |
+
|
444 |
+
/// Nullability bits for wrapped columns
|
445 |
+
std::vector<bool> field_nullability;
|
446 |
+
|
447 |
+
/// Metadata attached to wrapped columns
|
448 |
+
std::vector<std::shared_ptr<const KeyValueMetadata>> field_metadata;
|
449 |
+
};
|
450 |
+
|
451 |
+
struct ARROW_EXPORT DayOfWeekOptions : public FunctionOptions {
|
452 |
+
public:
|
453 |
+
explicit DayOfWeekOptions(bool count_from_zero = true, uint32_t week_start = 1);
|
454 |
+
static constexpr char const kTypeName[] = "DayOfWeekOptions";
|
455 |
+
static DayOfWeekOptions Defaults() { return DayOfWeekOptions(); }
|
456 |
+
|
457 |
+
/// Number days from 0 if true and from 1 if false
|
458 |
+
bool count_from_zero;
|
459 |
+
/// What day does the week start with (Monday=1, Sunday=7).
|
460 |
+
/// The numbering is unaffected by the count_from_zero parameter.
|
461 |
+
uint32_t week_start;
|
462 |
+
};
|
463 |
+
|
464 |
+
/// Used to control timestamp timezone conversion and handling ambiguous/nonexistent
|
465 |
+
/// times.
|
466 |
+
struct ARROW_EXPORT AssumeTimezoneOptions : public FunctionOptions {
|
467 |
+
public:
|
468 |
+
/// \brief How to interpret ambiguous local times that can be interpreted as
|
469 |
+
/// multiple instants (normally two) due to DST shifts.
|
470 |
+
///
|
471 |
+
/// AMBIGUOUS_EARLIEST emits the earliest instant amongst possible interpretations.
|
472 |
+
/// AMBIGUOUS_LATEST emits the latest instant amongst possible interpretations.
|
473 |
+
enum Ambiguous { AMBIGUOUS_RAISE, AMBIGUOUS_EARLIEST, AMBIGUOUS_LATEST };
|
474 |
+
|
475 |
+
/// \brief How to handle local times that do not exist due to DST shifts.
|
476 |
+
///
|
477 |
+
/// NONEXISTENT_EARLIEST emits the instant "just before" the DST shift instant
|
478 |
+
/// in the given timestamp precision (for example, for a nanoseconds precision
|
479 |
+
/// timestamp, this is one nanosecond before the DST shift instant).
|
480 |
+
/// NONEXISTENT_LATEST emits the DST shift instant.
|
481 |
+
enum Nonexistent { NONEXISTENT_RAISE, NONEXISTENT_EARLIEST, NONEXISTENT_LATEST };
|
482 |
+
|
483 |
+
explicit AssumeTimezoneOptions(std::string timezone,
|
484 |
+
Ambiguous ambiguous = AMBIGUOUS_RAISE,
|
485 |
+
Nonexistent nonexistent = NONEXISTENT_RAISE);
|
486 |
+
AssumeTimezoneOptions();
|
487 |
+
static constexpr char const kTypeName[] = "AssumeTimezoneOptions";
|
488 |
+
|
489 |
+
/// Timezone to convert timestamps from
|
490 |
+
std::string timezone;
|
491 |
+
|
492 |
+
/// How to interpret ambiguous local times (due to DST shifts)
|
493 |
+
Ambiguous ambiguous;
|
494 |
+
/// How to interpret nonexistent local times (due to DST shifts)
|
495 |
+
Nonexistent nonexistent;
|
496 |
+
};
|
497 |
+
|
498 |
+
struct ARROW_EXPORT WeekOptions : public FunctionOptions {
|
499 |
+
public:
|
500 |
+
explicit WeekOptions(bool week_starts_monday = true, bool count_from_zero = false,
|
501 |
+
bool first_week_is_fully_in_year = false);
|
502 |
+
static constexpr char const kTypeName[] = "WeekOptions";
|
503 |
+
static WeekOptions Defaults() { return WeekOptions{}; }
|
504 |
+
static WeekOptions ISODefaults() {
|
505 |
+
return WeekOptions{/*week_starts_monday*/ true,
|
506 |
+
/*count_from_zero=*/false,
|
507 |
+
/*first_week_is_fully_in_year=*/false};
|
508 |
+
}
|
509 |
+
static WeekOptions USDefaults() {
|
510 |
+
return WeekOptions{/*week_starts_monday*/ false,
|
511 |
+
/*count_from_zero=*/false,
|
512 |
+
/*first_week_is_fully_in_year=*/false};
|
513 |
+
}
|
514 |
+
|
515 |
+
/// What day does the week start with (Monday=true, Sunday=false)
|
516 |
+
bool week_starts_monday;
|
517 |
+
/// Dates from current year that fall into last ISO week of the previous year return
|
518 |
+
/// 0 if true and 52 or 53 if false.
|
519 |
+
bool count_from_zero;
|
520 |
+
/// Must the first week be fully in January (true), or is a week that begins on
|
521 |
+
/// December 29, 30, or 31 considered to be the first week of the new year (false)?
|
522 |
+
bool first_week_is_fully_in_year;
|
523 |
+
};
|
524 |
+
|
525 |
+
struct ARROW_EXPORT Utf8NormalizeOptions : public FunctionOptions {
|
526 |
+
public:
|
527 |
+
enum Form { NFC, NFKC, NFD, NFKD };
|
528 |
+
|
529 |
+
explicit Utf8NormalizeOptions(Form form = NFC);
|
530 |
+
static Utf8NormalizeOptions Defaults() { return Utf8NormalizeOptions(); }
|
531 |
+
static constexpr char const kTypeName[] = "Utf8NormalizeOptions";
|
532 |
+
|
533 |
+
/// The Unicode normalization form to apply
|
534 |
+
Form form;
|
535 |
+
};
|
536 |
+
|
537 |
+
class ARROW_EXPORT RandomOptions : public FunctionOptions {
|
538 |
+
public:
|
539 |
+
enum Initializer { SystemRandom, Seed };
|
540 |
+
|
541 |
+
static RandomOptions FromSystemRandom() { return RandomOptions{SystemRandom, 0}; }
|
542 |
+
static RandomOptions FromSeed(uint64_t seed) { return RandomOptions{Seed, seed}; }
|
543 |
+
|
544 |
+
RandomOptions(Initializer initializer, uint64_t seed);
|
545 |
+
RandomOptions();
|
546 |
+
static constexpr char const kTypeName[] = "RandomOptions";
|
547 |
+
static RandomOptions Defaults() { return RandomOptions(); }
|
548 |
+
|
549 |
+
/// The type of initialization for random number generation - system or provided seed.
|
550 |
+
Initializer initializer;
|
551 |
+
/// The seed value used to initialize the random number generation.
|
552 |
+
uint64_t seed;
|
553 |
+
};
|
554 |
+
|
555 |
+
/// Options for map_lookup function
|
556 |
+
class ARROW_EXPORT MapLookupOptions : public FunctionOptions {
|
557 |
+
public:
|
558 |
+
enum Occurrence {
|
559 |
+
/// Return the first matching value
|
560 |
+
FIRST,
|
561 |
+
/// Return the last matching value
|
562 |
+
LAST,
|
563 |
+
/// Return all matching values
|
564 |
+
ALL
|
565 |
+
};
|
566 |
+
|
567 |
+
explicit MapLookupOptions(std::shared_ptr<Scalar> query_key, Occurrence occurrence);
|
568 |
+
MapLookupOptions();
|
569 |
+
|
570 |
+
constexpr static char const kTypeName[] = "MapLookupOptions";
|
571 |
+
|
572 |
+
/// The key to lookup in the map
|
573 |
+
std::shared_ptr<Scalar> query_key;
|
574 |
+
|
575 |
+
/// Whether to return the first, last, or all matching values
|
576 |
+
Occurrence occurrence;
|
577 |
+
};
|
578 |
+
|
579 |
+
/// @}
|
580 |
+
|
581 |
+
/// \brief Get the absolute value of a value.
|
582 |
+
///
|
583 |
+
/// If argument is null the result will be null.
|
584 |
+
///
|
585 |
+
/// \param[in] arg the value transformed
|
586 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
587 |
+
/// \param[in] ctx the function execution context, optional
|
588 |
+
/// \return the elementwise absolute value
|
589 |
+
ARROW_EXPORT
|
590 |
+
Result<Datum> AbsoluteValue(const Datum& arg,
|
591 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
592 |
+
ExecContext* ctx = NULLPTR);
|
593 |
+
|
594 |
+
/// \brief Add two values together. Array values must be the same length. If
|
595 |
+
/// either addend is null the result will be null.
|
596 |
+
///
|
597 |
+
/// \param[in] left the first addend
|
598 |
+
/// \param[in] right the second addend
|
599 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
600 |
+
/// \param[in] ctx the function execution context, optional
|
601 |
+
/// \return the elementwise sum
|
602 |
+
ARROW_EXPORT
|
603 |
+
Result<Datum> Add(const Datum& left, const Datum& right,
|
604 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
605 |
+
ExecContext* ctx = NULLPTR);
|
606 |
+
|
607 |
+
/// \brief Subtract two values. Array values must be the same length. If the
|
608 |
+
/// minuend or subtrahend is null the result will be null.
|
609 |
+
///
|
610 |
+
/// \param[in] left the value subtracted from (minuend)
|
611 |
+
/// \param[in] right the value by which the minuend is reduced (subtrahend)
|
612 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
613 |
+
/// \param[in] ctx the function execution context, optional
|
614 |
+
/// \return the elementwise difference
|
615 |
+
ARROW_EXPORT
|
616 |
+
Result<Datum> Subtract(const Datum& left, const Datum& right,
|
617 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
618 |
+
ExecContext* ctx = NULLPTR);
|
619 |
+
|
620 |
+
/// \brief Multiply two values. Array values must be the same length. If either
|
621 |
+
/// factor is null the result will be null.
|
622 |
+
///
|
623 |
+
/// \param[in] left the first factor
|
624 |
+
/// \param[in] right the second factor
|
625 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
626 |
+
/// \param[in] ctx the function execution context, optional
|
627 |
+
/// \return the elementwise product
|
628 |
+
ARROW_EXPORT
|
629 |
+
Result<Datum> Multiply(const Datum& left, const Datum& right,
|
630 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
631 |
+
ExecContext* ctx = NULLPTR);
|
632 |
+
|
633 |
+
/// \brief Divide two values. Array values must be the same length. If either
|
634 |
+
/// argument is null the result will be null. For integer types, if there is
|
635 |
+
/// a zero divisor, an error will be raised.
|
636 |
+
///
|
637 |
+
/// \param[in] left the dividend
|
638 |
+
/// \param[in] right the divisor
|
639 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
640 |
+
/// \param[in] ctx the function execution context, optional
|
641 |
+
/// \return the elementwise quotient
|
642 |
+
ARROW_EXPORT
|
643 |
+
Result<Datum> Divide(const Datum& left, const Datum& right,
|
644 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
645 |
+
ExecContext* ctx = NULLPTR);
|
646 |
+
|
647 |
+
/// \brief Negate values.
|
648 |
+
///
|
649 |
+
/// If argument is null the result will be null.
|
650 |
+
///
|
651 |
+
/// \param[in] arg the value negated
|
652 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
653 |
+
/// \param[in] ctx the function execution context, optional
|
654 |
+
/// \return the elementwise negation
|
655 |
+
ARROW_EXPORT
|
656 |
+
Result<Datum> Negate(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
657 |
+
ExecContext* ctx = NULLPTR);
|
658 |
+
|
659 |
+
/// \brief Raise the values of base array to the power of the exponent array values.
|
660 |
+
/// Array values must be the same length. If either base or exponent is null the result
|
661 |
+
/// will be null.
|
662 |
+
///
|
663 |
+
/// \param[in] left the base
|
664 |
+
/// \param[in] right the exponent
|
665 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
666 |
+
/// \param[in] ctx the function execution context, optional
|
667 |
+
/// \return the elementwise base value raised to the power of exponent
|
668 |
+
ARROW_EXPORT
|
669 |
+
Result<Datum> Power(const Datum& left, const Datum& right,
|
670 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
671 |
+
ExecContext* ctx = NULLPTR);
|
672 |
+
|
673 |
+
/// \brief Raise Euler's number to the power of specified exponent, element-wise.
|
674 |
+
/// If the exponent value is null the result will be null.
|
675 |
+
///
|
676 |
+
/// \param[in] arg the exponent
|
677 |
+
/// \param[in] ctx the function execution context, optional
|
678 |
+
/// \return the element-wise Euler's number raised to the power of exponent
|
679 |
+
ARROW_EXPORT
|
680 |
+
Result<Datum> Exp(const Datum& arg, ExecContext* ctx = NULLPTR);
|
681 |
+
|
682 |
+
/// \brief Left shift the left array by the right array. Array values must be the
|
683 |
+
/// same length. If either operand is null, the result will be null.
|
684 |
+
///
|
685 |
+
/// \param[in] left the value to shift
|
686 |
+
/// \param[in] right the value to shift by
|
687 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
688 |
+
/// \param[in] ctx the function execution context, optional
|
689 |
+
/// \return the elementwise left value shifted left by the right value
|
690 |
+
ARROW_EXPORT
|
691 |
+
Result<Datum> ShiftLeft(const Datum& left, const Datum& right,
|
692 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
693 |
+
ExecContext* ctx = NULLPTR);
|
694 |
+
|
695 |
+
/// \brief Right shift the left array by the right array. Array values must be the
|
696 |
+
/// same length. If either operand is null, the result will be null. Performs a
|
697 |
+
/// logical shift for unsigned values, and an arithmetic shift for signed values.
|
698 |
+
///
|
699 |
+
/// \param[in] left the value to shift
|
700 |
+
/// \param[in] right the value to shift by
|
701 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
702 |
+
/// \param[in] ctx the function execution context, optional
|
703 |
+
/// \return the elementwise left value shifted right by the right value
|
704 |
+
ARROW_EXPORT
|
705 |
+
Result<Datum> ShiftRight(const Datum& left, const Datum& right,
|
706 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
707 |
+
ExecContext* ctx = NULLPTR);
|
708 |
+
|
709 |
+
/// \brief Compute the sine of the array values.
|
710 |
+
/// \param[in] arg The values to compute the sine for.
|
711 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
712 |
+
/// \param[in] ctx the function execution context, optional
|
713 |
+
/// \return the elementwise sine of the values
|
714 |
+
ARROW_EXPORT
|
715 |
+
Result<Datum> Sin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
716 |
+
ExecContext* ctx = NULLPTR);
|
717 |
+
|
718 |
+
/// \brief Compute the cosine of the array values.
|
719 |
+
/// \param[in] arg The values to compute the cosine for.
|
720 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
721 |
+
/// \param[in] ctx the function execution context, optional
|
722 |
+
/// \return the elementwise cosine of the values
|
723 |
+
ARROW_EXPORT
|
724 |
+
Result<Datum> Cos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
725 |
+
ExecContext* ctx = NULLPTR);
|
726 |
+
|
727 |
+
/// \brief Compute the inverse sine (arcsine) of the array values.
|
728 |
+
/// \param[in] arg The values to compute the inverse sine for.
|
729 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
730 |
+
/// \param[in] ctx the function execution context, optional
|
731 |
+
/// \return the elementwise inverse sine of the values
|
732 |
+
ARROW_EXPORT
|
733 |
+
Result<Datum> Asin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
734 |
+
ExecContext* ctx = NULLPTR);
|
735 |
+
|
736 |
+
/// \brief Compute the inverse cosine (arccosine) of the array values.
|
737 |
+
/// \param[in] arg The values to compute the inverse cosine for.
|
738 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
739 |
+
/// \param[in] ctx the function execution context, optional
|
740 |
+
/// \return the elementwise inverse cosine of the values
|
741 |
+
ARROW_EXPORT
|
742 |
+
Result<Datum> Acos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
743 |
+
ExecContext* ctx = NULLPTR);
|
744 |
+
|
745 |
+
/// \brief Compute the tangent of the array values.
|
746 |
+
/// \param[in] arg The values to compute the tangent for.
|
747 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
748 |
+
/// \param[in] ctx the function execution context, optional
|
749 |
+
/// \return the elementwise tangent of the values
|
750 |
+
ARROW_EXPORT
|
751 |
+
Result<Datum> Tan(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
752 |
+
ExecContext* ctx = NULLPTR);
|
753 |
+
|
754 |
+
/// \brief Compute the inverse tangent (arctangent) of the array values.
|
755 |
+
/// \param[in] arg The values to compute the inverse tangent for.
|
756 |
+
/// \param[in] ctx the function execution context, optional
|
757 |
+
/// \return the elementwise inverse tangent of the values
|
758 |
+
ARROW_EXPORT
|
759 |
+
Result<Datum> Atan(const Datum& arg, ExecContext* ctx = NULLPTR);
|
760 |
+
|
761 |
+
/// \brief Compute the inverse tangent (arctangent) of y/x, using the
|
762 |
+
/// argument signs to determine the correct quadrant.
|
763 |
+
/// \param[in] y The y-values to compute the inverse tangent for.
|
764 |
+
/// \param[in] x The x-values to compute the inverse tangent for.
|
765 |
+
/// \param[in] ctx the function execution context, optional
|
766 |
+
/// \return the elementwise inverse tangent of the values
|
767 |
+
ARROW_EXPORT
|
768 |
+
Result<Datum> Atan2(const Datum& y, const Datum& x, ExecContext* ctx = NULLPTR);
|
769 |
+
|
770 |
+
/// \brief Get the natural log of a value.
|
771 |
+
///
|
772 |
+
/// If argument is null the result will be null.
|
773 |
+
///
|
774 |
+
/// \param[in] arg The values to compute the logarithm for.
|
775 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
776 |
+
/// \param[in] ctx the function execution context, optional
|
777 |
+
/// \return the elementwise natural log
|
778 |
+
ARROW_EXPORT
|
779 |
+
Result<Datum> Ln(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
780 |
+
ExecContext* ctx = NULLPTR);
|
781 |
+
|
782 |
+
/// \brief Get the log base 10 of a value.
|
783 |
+
///
|
784 |
+
/// If argument is null the result will be null.
|
785 |
+
///
|
786 |
+
/// \param[in] arg The values to compute the logarithm for.
|
787 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
788 |
+
/// \param[in] ctx the function execution context, optional
|
789 |
+
/// \return the elementwise log base 10
|
790 |
+
ARROW_EXPORT
|
791 |
+
Result<Datum> Log10(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
792 |
+
ExecContext* ctx = NULLPTR);
|
793 |
+
|
794 |
+
/// \brief Get the log base 2 of a value.
|
795 |
+
///
|
796 |
+
/// If argument is null the result will be null.
|
797 |
+
///
|
798 |
+
/// \param[in] arg The values to compute the logarithm for.
|
799 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
800 |
+
/// \param[in] ctx the function execution context, optional
|
801 |
+
/// \return the elementwise log base 2
|
802 |
+
ARROW_EXPORT
|
803 |
+
Result<Datum> Log2(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
804 |
+
ExecContext* ctx = NULLPTR);
|
805 |
+
|
806 |
+
/// \brief Get the natural log of (1 + value).
|
807 |
+
///
|
808 |
+
/// If argument is null the result will be null.
|
809 |
+
/// This function may be more accurate than Log(1 + value) for values close to zero.
|
810 |
+
///
|
811 |
+
/// \param[in] arg The values to compute the logarithm for.
|
812 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
813 |
+
/// \param[in] ctx the function execution context, optional
|
814 |
+
/// \return the elementwise natural log
|
815 |
+
ARROW_EXPORT
|
816 |
+
Result<Datum> Log1p(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
817 |
+
ExecContext* ctx = NULLPTR);
|
818 |
+
|
819 |
+
/// \brief Get the log of a value to the given base.
|
820 |
+
///
|
821 |
+
/// If argument is null the result will be null.
|
822 |
+
///
|
823 |
+
/// \param[in] arg The values to compute the logarithm for.
|
824 |
+
/// \param[in] base The given base.
|
825 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
826 |
+
/// \param[in] ctx the function execution context, optional
|
827 |
+
/// \return the elementwise log to the given base
|
828 |
+
ARROW_EXPORT
|
829 |
+
Result<Datum> Logb(const Datum& arg, const Datum& base,
|
830 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
831 |
+
ExecContext* ctx = NULLPTR);
|
832 |
+
|
833 |
+
/// \brief Get the square-root of a value.
|
834 |
+
///
|
835 |
+
/// If argument is null the result will be null.
|
836 |
+
///
|
837 |
+
/// \param[in] arg The values to compute the square-root for.
|
838 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
839 |
+
/// \param[in] ctx the function execution context, optional
|
840 |
+
/// \return the elementwise square-root
|
841 |
+
ARROW_EXPORT
|
842 |
+
Result<Datum> Sqrt(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
843 |
+
ExecContext* ctx = NULLPTR);
|
844 |
+
|
845 |
+
/// \brief Round to the nearest integer less than or equal in magnitude to the
|
846 |
+
/// argument.
|
847 |
+
///
|
848 |
+
/// If argument is null the result will be null.
|
849 |
+
///
|
850 |
+
/// \param[in] arg the value to round
|
851 |
+
/// \param[in] ctx the function execution context, optional
|
852 |
+
/// \return the rounded value
|
853 |
+
ARROW_EXPORT
|
854 |
+
Result<Datum> Floor(const Datum& arg, ExecContext* ctx = NULLPTR);
|
855 |
+
|
856 |
+
/// \brief Round to the nearest integer greater than or equal in magnitude to the
|
857 |
+
/// argument.
|
858 |
+
///
|
859 |
+
/// If argument is null the result will be null.
|
860 |
+
///
|
861 |
+
/// \param[in] arg the value to round
|
862 |
+
/// \param[in] ctx the function execution context, optional
|
863 |
+
/// \return the rounded value
|
864 |
+
ARROW_EXPORT
|
865 |
+
Result<Datum> Ceil(const Datum& arg, ExecContext* ctx = NULLPTR);
|
866 |
+
|
867 |
+
/// \brief Get the integral part without fractional digits.
|
868 |
+
///
|
869 |
+
/// If argument is null the result will be null.
|
870 |
+
///
|
871 |
+
/// \param[in] arg the value to truncate
|
872 |
+
/// \param[in] ctx the function execution context, optional
|
873 |
+
/// \return the truncated value
|
874 |
+
ARROW_EXPORT
|
875 |
+
Result<Datum> Trunc(const Datum& arg, ExecContext* ctx = NULLPTR);
|
876 |
+
|
877 |
+
/// \brief Find the element-wise maximum of any number of arrays or scalars.
|
878 |
+
/// Array values must be the same length.
|
879 |
+
///
|
880 |
+
/// \param[in] args arrays or scalars to operate on.
|
881 |
+
/// \param[in] options options for handling nulls, optional
|
882 |
+
/// \param[in] ctx the function execution context, optional
|
883 |
+
/// \return the element-wise maximum
|
884 |
+
ARROW_EXPORT
|
885 |
+
Result<Datum> MaxElementWise(
|
886 |
+
const std::vector<Datum>& args,
|
887 |
+
ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(),
|
888 |
+
ExecContext* ctx = NULLPTR);
|
889 |
+
|
890 |
+
/// \brief Find the element-wise minimum of any number of arrays or scalars.
|
891 |
+
/// Array values must be the same length.
|
892 |
+
///
|
893 |
+
/// \param[in] args arrays or scalars to operate on.
|
894 |
+
/// \param[in] options options for handling nulls, optional
|
895 |
+
/// \param[in] ctx the function execution context, optional
|
896 |
+
/// \return the element-wise minimum
|
897 |
+
ARROW_EXPORT
|
898 |
+
Result<Datum> MinElementWise(
|
899 |
+
const std::vector<Datum>& args,
|
900 |
+
ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(),
|
901 |
+
ExecContext* ctx = NULLPTR);
|
902 |
+
|
903 |
+
/// \brief Get the sign of a value. Array values can be of arbitrary length. If argument
|
904 |
+
/// is null the result will be null.
|
905 |
+
///
|
906 |
+
/// \param[in] arg the value to extract sign from
|
907 |
+
/// \param[in] ctx the function execution context, optional
|
908 |
+
/// \return the element-wise sign function
|
909 |
+
ARROW_EXPORT
|
910 |
+
Result<Datum> Sign(const Datum& arg, ExecContext* ctx = NULLPTR);
|
911 |
+
|
912 |
+
/// \brief Round a value to a given precision.
|
913 |
+
///
|
914 |
+
/// If arg is null the result will be null.
|
915 |
+
///
|
916 |
+
/// \param[in] arg the value to be rounded
|
917 |
+
/// \param[in] options rounding options (rounding mode and number of digits), optional
|
918 |
+
/// \param[in] ctx the function execution context, optional
|
919 |
+
/// \return the element-wise rounded value
|
920 |
+
ARROW_EXPORT
|
921 |
+
Result<Datum> Round(const Datum& arg, RoundOptions options = RoundOptions::Defaults(),
|
922 |
+
ExecContext* ctx = NULLPTR);
|
923 |
+
|
924 |
+
/// \brief Round a value to a given precision.
|
925 |
+
///
|
926 |
+
/// If arg1 is null the result will be null.
|
927 |
+
/// If arg2 is null then the result will be null. If arg2 is negative, then the rounding
|
928 |
+
/// place will be shifted to the left (thus -1 would correspond to rounding to the nearest
|
929 |
+
/// ten). If positive, the rounding place will shift to the right (and +1 would
|
930 |
+
/// correspond to rounding to the nearest tenth).
|
931 |
+
///
|
932 |
+
/// \param[in] arg1 the value to be rounded
|
933 |
+
/// \param[in] arg2 the number of significant digits to round to
|
934 |
+
/// \param[in] options rounding options, optional
|
935 |
+
/// \param[in] ctx the function execution context, optional
|
936 |
+
/// \return the element-wise rounded value
|
937 |
+
ARROW_EXPORT
|
938 |
+
Result<Datum> RoundBinary(const Datum& arg1, const Datum& arg2,
|
939 |
+
RoundBinaryOptions options = RoundBinaryOptions::Defaults(),
|
940 |
+
ExecContext* ctx = NULLPTR);
|
941 |
+
|
942 |
+
/// \brief Round a value to a given multiple.
|
943 |
+
///
|
944 |
+
/// If argument is null the result will be null.
|
945 |
+
///
|
946 |
+
/// \param[in] arg the value to round
|
947 |
+
/// \param[in] options rounding options (rounding mode and multiple), optional
|
948 |
+
/// \param[in] ctx the function execution context, optional
|
949 |
+
/// \return the element-wise rounded value
|
950 |
+
ARROW_EXPORT
|
951 |
+
Result<Datum> RoundToMultiple(
|
952 |
+
const Datum& arg, RoundToMultipleOptions options = RoundToMultipleOptions::Defaults(),
|
953 |
+
ExecContext* ctx = NULLPTR);
|
954 |
+
|
955 |
+
/// \brief Ceil a temporal value to a given frequency
|
956 |
+
///
|
957 |
+
/// If argument is null the result will be null.
|
958 |
+
///
|
959 |
+
/// \param[in] arg the temporal value to ceil
|
960 |
+
/// \param[in] options temporal rounding options, optional
|
961 |
+
/// \param[in] ctx the function execution context, optional
|
962 |
+
/// \return the element-wise rounded value
|
963 |
+
///
|
964 |
+
/// \since 7.0.0
|
965 |
+
/// \note API not yet finalized
|
966 |
+
ARROW_EXPORT
|
967 |
+
Result<Datum> CeilTemporal(
|
968 |
+
const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
|
969 |
+
ExecContext* ctx = NULLPTR);
|
970 |
+
|
971 |
+
/// \brief Floor a temporal value to a given frequency
|
972 |
+
///
|
973 |
+
/// If argument is null the result will be null.
|
974 |
+
///
|
975 |
+
/// \param[in] arg the temporal value to floor
|
976 |
+
/// \param[in] options temporal rounding options, optional
|
977 |
+
/// \param[in] ctx the function execution context, optional
|
978 |
+
/// \return the element-wise rounded value
|
979 |
+
///
|
980 |
+
/// \since 7.0.0
|
981 |
+
/// \note API not yet finalized
|
982 |
+
ARROW_EXPORT
|
983 |
+
Result<Datum> FloorTemporal(
|
984 |
+
const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
|
985 |
+
ExecContext* ctx = NULLPTR);
|
986 |
+
|
987 |
+
/// \brief Round a temporal value to a given frequency
|
988 |
+
///
|
989 |
+
/// If argument is null the result will be null.
|
990 |
+
///
|
991 |
+
/// \param[in] arg the temporal value to round
|
992 |
+
/// \param[in] options temporal rounding options, optional
|
993 |
+
/// \param[in] ctx the function execution context, optional
|
994 |
+
/// \return the element-wise rounded value
|
995 |
+
///
|
996 |
+
/// \since 7.0.0
|
997 |
+
/// \note API not yet finalized
|
998 |
+
ARROW_EXPORT
|
999 |
+
Result<Datum> RoundTemporal(
|
1000 |
+
const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
|
1001 |
+
ExecContext* ctx = NULLPTR);
|
1002 |
+
|
1003 |
+
/// \brief Invert the values of a boolean datum
|
1004 |
+
/// \param[in] value datum to invert
|
1005 |
+
/// \param[in] ctx the function execution context, optional
|
1006 |
+
/// \return the resulting datum
|
1007 |
+
///
|
1008 |
+
/// \since 1.0.0
|
1009 |
+
/// \note API not yet finalized
|
1010 |
+
ARROW_EXPORT
|
1011 |
+
Result<Datum> Invert(const Datum& value, ExecContext* ctx = NULLPTR);
|
1012 |
+
|
1013 |
+
/// \brief Element-wise AND of two boolean datums which always propagates nulls
|
1014 |
+
/// (null and false is null).
|
1015 |
+
///
|
1016 |
+
/// \param[in] left left operand
|
1017 |
+
/// \param[in] right right operand
|
1018 |
+
/// \param[in] ctx the function execution context, optional
|
1019 |
+
/// \return the resulting datum
|
1020 |
+
///
|
1021 |
+
/// \since 1.0.0
|
1022 |
+
/// \note API not yet finalized
|
1023 |
+
ARROW_EXPORT
|
1024 |
+
Result<Datum> And(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1025 |
+
|
1026 |
+
/// \brief Element-wise AND of two boolean datums with a Kleene truth table
|
1027 |
+
/// (null and false is false).
|
1028 |
+
///
|
1029 |
+
/// \param[in] left left operand
|
1030 |
+
/// \param[in] right right operand
|
1031 |
+
/// \param[in] ctx the function execution context, optional
|
1032 |
+
/// \return the resulting datum
|
1033 |
+
///
|
1034 |
+
/// \since 1.0.0
|
1035 |
+
/// \note API not yet finalized
|
1036 |
+
ARROW_EXPORT
|
1037 |
+
Result<Datum> KleeneAnd(const Datum& left, const Datum& right,
|
1038 |
+
ExecContext* ctx = NULLPTR);
|
1039 |
+
|
1040 |
+
/// \brief Element-wise OR of two boolean datums which always propagates nulls
|
1041 |
+
/// (null and true is null).
|
1042 |
+
///
|
1043 |
+
/// \param[in] left left operand
|
1044 |
+
/// \param[in] right right operand
|
1045 |
+
/// \param[in] ctx the function execution context, optional
|
1046 |
+
/// \return the resulting datum
|
1047 |
+
///
|
1048 |
+
/// \since 1.0.0
|
1049 |
+
/// \note API not yet finalized
|
1050 |
+
ARROW_EXPORT
|
1051 |
+
Result<Datum> Or(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1052 |
+
|
1053 |
+
/// \brief Element-wise OR of two boolean datums with a Kleene truth table
|
1054 |
+
/// (null or true is true).
|
1055 |
+
///
|
1056 |
+
/// \param[in] left left operand
|
1057 |
+
/// \param[in] right right operand
|
1058 |
+
/// \param[in] ctx the function execution context, optional
|
1059 |
+
/// \return the resulting datum
|
1060 |
+
///
|
1061 |
+
/// \since 1.0.0
|
1062 |
+
/// \note API not yet finalized
|
1063 |
+
ARROW_EXPORT
|
1064 |
+
Result<Datum> KleeneOr(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1065 |
+
|
1066 |
+
/// \brief Element-wise XOR of two boolean datums
|
1067 |
+
/// \param[in] left left operand
|
1068 |
+
/// \param[in] right right operand
|
1069 |
+
/// \param[in] ctx the function execution context, optional
|
1070 |
+
/// \return the resulting datum
|
1071 |
+
///
|
1072 |
+
/// \since 1.0.0
|
1073 |
+
/// \note API not yet finalized
|
1074 |
+
ARROW_EXPORT
|
1075 |
+
Result<Datum> Xor(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1076 |
+
|
1077 |
+
/// \brief Element-wise AND NOT of two boolean datums which always propagates nulls
|
1078 |
+
/// (null and not true is null).
|
1079 |
+
///
|
1080 |
+
/// \param[in] left left operand
|
1081 |
+
/// \param[in] right right operand
|
1082 |
+
/// \param[in] ctx the function execution context, optional
|
1083 |
+
/// \return the resulting datum
|
1084 |
+
///
|
1085 |
+
/// \since 3.0.0
|
1086 |
+
/// \note API not yet finalized
|
1087 |
+
ARROW_EXPORT
|
1088 |
+
Result<Datum> AndNot(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1089 |
+
|
1090 |
+
/// \brief Element-wise AND NOT of two boolean datums with a Kleene truth table
|
1091 |
+
/// (false and not null is false, null and not true is false).
|
1092 |
+
///
|
1093 |
+
/// \param[in] left left operand
|
1094 |
+
/// \param[in] right right operand
|
1095 |
+
/// \param[in] ctx the function execution context, optional
|
1096 |
+
/// \return the resulting datum
|
1097 |
+
///
|
1098 |
+
/// \since 3.0.0
|
1099 |
+
/// \note API not yet finalized
|
1100 |
+
ARROW_EXPORT
|
1101 |
+
Result<Datum> KleeneAndNot(const Datum& left, const Datum& right,
|
1102 |
+
ExecContext* ctx = NULLPTR);
|
1103 |
+
|
1104 |
+
/// \brief IsIn returns true for each element of `values` that is contained in
|
1105 |
+
/// `value_set`
|
1106 |
+
///
|
1107 |
+
/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls.
|
1108 |
+
///
|
1109 |
+
/// \param[in] values array-like input to look up in value_set
|
1110 |
+
/// \param[in] options SetLookupOptions
|
1111 |
+
/// \param[in] ctx the function execution context, optional
|
1112 |
+
/// \return the resulting datum
|
1113 |
+
///
|
1114 |
+
/// \since 1.0.0
|
1115 |
+
/// \note API not yet finalized
|
1116 |
+
ARROW_EXPORT
|
1117 |
+
Result<Datum> IsIn(const Datum& values, const SetLookupOptions& options,
|
1118 |
+
ExecContext* ctx = NULLPTR);
|
1119 |
+
ARROW_EXPORT
|
1120 |
+
Result<Datum> IsIn(const Datum& values, const Datum& value_set,
|
1121 |
+
ExecContext* ctx = NULLPTR);
|
1122 |
+
|
1123 |
+
/// \brief IndexIn examines each slot in the values against a value_set array.
|
1124 |
+
/// If the value is not found in value_set, null will be output.
|
1125 |
+
/// If found, the index of occurrence within value_set (ignoring duplicates)
|
1126 |
+
/// will be output.
|
1127 |
+
///
|
1128 |
+
/// For example given values = [99, 42, 3, null] and
|
1129 |
+
/// value_set = [3, 3, 99], the output will be = [2, null, 0, null]
|
1130 |
+
///
|
1131 |
+
/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls.
|
1132 |
+
///
|
1133 |
+
/// \param[in] values array-like input
|
1134 |
+
/// \param[in] options SetLookupOptions
|
1135 |
+
/// \param[in] ctx the function execution context, optional
|
1136 |
+
/// \return the resulting datum
|
1137 |
+
///
|
1138 |
+
/// \since 1.0.0
|
1139 |
+
/// \note API not yet finalized
|
1140 |
+
ARROW_EXPORT
|
1141 |
+
Result<Datum> IndexIn(const Datum& values, const SetLookupOptions& options,
|
1142 |
+
ExecContext* ctx = NULLPTR);
|
1143 |
+
ARROW_EXPORT
|
1144 |
+
Result<Datum> IndexIn(const Datum& values, const Datum& value_set,
|
1145 |
+
ExecContext* ctx = NULLPTR);
|
1146 |
+
|
1147 |
+
/// \brief IsValid returns true for each element of `values` that is not null,
|
1148 |
+
/// false otherwise
|
1149 |
+
///
|
1150 |
+
/// \param[in] values input to examine for validity
|
1151 |
+
/// \param[in] ctx the function execution context, optional
|
1152 |
+
/// \return the resulting datum
|
1153 |
+
///
|
1154 |
+
/// \since 1.0.0
|
1155 |
+
/// \note API not yet finalized
|
1156 |
+
ARROW_EXPORT
|
1157 |
+
Result<Datum> IsValid(const Datum& values, ExecContext* ctx = NULLPTR);
|
1158 |
+
|
1159 |
+
/// \brief IsNull returns true for each element of `values` that is null,
|
1160 |
+
/// false otherwise
|
1161 |
+
///
|
1162 |
+
/// \param[in] values input to examine for nullity
|
1163 |
+
/// \param[in] options NullOptions
|
1164 |
+
/// \param[in] ctx the function execution context, optional
|
1165 |
+
/// \return the resulting datum
|
1166 |
+
///
|
1167 |
+
/// \since 1.0.0
|
1168 |
+
/// \note API not yet finalized
|
1169 |
+
ARROW_EXPORT
|
1170 |
+
Result<Datum> IsNull(const Datum& values, NullOptions options = NullOptions::Defaults(),
|
1171 |
+
ExecContext* ctx = NULLPTR);
|
1172 |
+
|
1173 |
+
/// \brief IsNan returns true for each element of `values` that is NaN,
|
1174 |
+
/// false otherwise
|
1175 |
+
///
|
1176 |
+
/// \param[in] values input to look for NaN
|
1177 |
+
/// \param[in] ctx the function execution context, optional
|
1178 |
+
/// \return the resulting datum
|
1179 |
+
///
|
1180 |
+
/// \since 3.0.0
|
1181 |
+
/// \note API not yet finalized
|
1182 |
+
ARROW_EXPORT
|
1183 |
+
Result<Datum> IsNan(const Datum& values, ExecContext* ctx = NULLPTR);
|
1184 |
+
|
1185 |
+
/// \brief IfElse returns elements chosen from `left` or `right`
|
1186 |
+
/// depending on `cond`. `null` values in `cond` will be promoted to the result
|
1187 |
+
///
|
1188 |
+
/// \param[in] cond `Boolean` condition Scalar/ Array
|
1189 |
+
/// \param[in] left Scalar/ Array
|
1190 |
+
/// \param[in] right Scalar/ Array
|
1191 |
+
/// \param[in] ctx the function execution context, optional
|
1192 |
+
///
|
1193 |
+
/// \return the resulting datum
|
1194 |
+
///
|
1195 |
+
/// \since 5.0.0
|
1196 |
+
/// \note API not yet finalized
|
1197 |
+
ARROW_EXPORT
|
1198 |
+
Result<Datum> IfElse(const Datum& cond, const Datum& left, const Datum& right,
|
1199 |
+
ExecContext* ctx = NULLPTR);
|
1200 |
+
|
1201 |
+
/// \brief CaseWhen behaves like a switch/case or if-else if-else statement: for
|
1202 |
+
/// each row, select the first value for which the corresponding condition is
|
1203 |
+
/// true, or (if given) select the 'else' value, else emit null. Note that a
|
1204 |
+
/// null condition is the same as false.
|
1205 |
+
///
|
1206 |
+
/// \param[in] cond Conditions (Boolean)
|
1207 |
+
/// \param[in] cases Values (any type), along with an optional 'else' value.
|
1208 |
+
/// \param[in] ctx the function execution context, optional
|
1209 |
+
///
|
1210 |
+
/// \return the resulting datum
|
1211 |
+
///
|
1212 |
+
/// \since 5.0.0
|
1213 |
+
/// \note API not yet finalized
|
1214 |
+
ARROW_EXPORT
|
1215 |
+
Result<Datum> CaseWhen(const Datum& cond, const std::vector<Datum>& cases,
|
1216 |
+
ExecContext* ctx = NULLPTR);
|
1217 |
+
|
1218 |
+
/// \brief Year returns year for each element of `values`
|
1219 |
+
///
|
1220 |
+
/// \param[in] values input to extract year from
|
1221 |
+
/// \param[in] ctx the function execution context, optional
|
1222 |
+
/// \return the resulting datum
|
1223 |
+
///
|
1224 |
+
/// \since 5.0.0
|
1225 |
+
/// \note API not yet finalized
|
1226 |
+
ARROW_EXPORT
|
1227 |
+
Result<Datum> Year(const Datum& values, ExecContext* ctx = NULLPTR);
|
1228 |
+
|
1229 |
+
/// \brief IsLeapYear returns if a year is a leap year for each element of `values`
|
1230 |
+
///
|
1231 |
+
/// \param[in] values input to extract leap year indicator from
|
1232 |
+
/// \param[in] ctx the function execution context, optional
|
1233 |
+
/// \return the resulting datum
|
1234 |
+
///
|
1235 |
+
/// \since 8.0.0
|
1236 |
+
/// \note API not yet finalized
|
1237 |
+
ARROW_EXPORT
|
1238 |
+
Result<Datum> IsLeapYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1239 |
+
|
1240 |
+
/// \brief Month returns month for each element of `values`.
|
1241 |
+
/// Month is encoded as January=1, December=12
|
1242 |
+
///
|
1243 |
+
/// \param[in] values input to extract month from
|
1244 |
+
/// \param[in] ctx the function execution context, optional
|
1245 |
+
/// \return the resulting datum
|
1246 |
+
///
|
1247 |
+
/// \since 5.0.0
|
1248 |
+
/// \note API not yet finalized
|
1249 |
+
ARROW_EXPORT
|
1250 |
+
Result<Datum> Month(const Datum& values, ExecContext* ctx = NULLPTR);
|
1251 |
+
|
1252 |
+
/// \brief Day returns day number for each element of `values`
|
1253 |
+
///
|
1254 |
+
/// \param[in] values input to extract day from
|
1255 |
+
/// \param[in] ctx the function execution context, optional
|
1256 |
+
/// \return the resulting datum
|
1257 |
+
///
|
1258 |
+
/// \since 5.0.0
|
1259 |
+
/// \note API not yet finalized
|
1260 |
+
ARROW_EXPORT
|
1261 |
+
Result<Datum> Day(const Datum& values, ExecContext* ctx = NULLPTR);
|
1262 |
+
|
1263 |
+
/// \brief YearMonthDay returns a struct containing the Year, Month and Day value for
|
1264 |
+
/// each element of `values`.
|
1265 |
+
///
|
1266 |
+
/// \param[in] values input to extract (year, month, day) struct from
|
1267 |
+
/// \param[in] ctx the function execution context, optional
|
1268 |
+
/// \return the resulting datum
|
1269 |
+
///
|
1270 |
+
/// \since 7.0.0
|
1271 |
+
/// \note API not yet finalized
|
1272 |
+
ARROW_EXPORT
|
1273 |
+
Result<Datum> YearMonthDay(const Datum& values, ExecContext* ctx = NULLPTR);
|
1274 |
+
|
1275 |
+
/// \brief DayOfWeek returns number of the day of the week value for each element of
|
1276 |
+
/// `values`.
|
1277 |
+
///
|
1278 |
+
/// By default week starts on Monday denoted by 0 and ends on Sunday denoted
|
1279 |
+
/// by 6. Start day of the week (Monday=1, Sunday=7) and numbering base (0 or 1) can be
|
1280 |
+
/// set using DayOfWeekOptions
|
1281 |
+
///
|
1282 |
+
/// \param[in] values input to extract number of the day of the week from
|
1283 |
+
/// \param[in] options for setting start of the week and day numbering
|
1284 |
+
/// \param[in] ctx the function execution context, optional
|
1285 |
+
/// \return the resulting datum
|
1286 |
+
///
|
1287 |
+
/// \since 5.0.0
|
1288 |
+
/// \note API not yet finalized
|
1289 |
+
ARROW_EXPORT Result<Datum> DayOfWeek(const Datum& values,
|
1290 |
+
DayOfWeekOptions options = DayOfWeekOptions(),
|
1291 |
+
ExecContext* ctx = NULLPTR);
|
1292 |
+
|
1293 |
+
/// \brief DayOfYear returns number of day of the year for each element of `values`.
|
1294 |
+
/// January 1st maps to day number 1, February 1st to 32, etc.
|
1295 |
+
///
|
1296 |
+
/// \param[in] values input to extract number of day of the year from
|
1297 |
+
/// \param[in] ctx the function execution context, optional
|
1298 |
+
/// \return the resulting datum
|
1299 |
+
///
|
1300 |
+
/// \since 5.0.0
|
1301 |
+
/// \note API not yet finalized
|
1302 |
+
ARROW_EXPORT Result<Datum> DayOfYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1303 |
+
|
1304 |
+
/// \brief ISOYear returns ISO year number for each element of `values`.
|
1305 |
+
/// First week of an ISO year has the majority (4 or more) of its days in January.
|
1306 |
+
///
|
1307 |
+
/// \param[in] values input to extract ISO year from
|
1308 |
+
/// \param[in] ctx the function execution context, optional
|
1309 |
+
/// \return the resulting datum
|
1310 |
+
///
|
1311 |
+
/// \since 5.0.0
|
1312 |
+
/// \note API not yet finalized
|
1313 |
+
ARROW_EXPORT
|
1314 |
+
Result<Datum> ISOYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1315 |
+
|
1316 |
+
/// \brief USYear returns US epidemiological year number for each element of `values`.
|
1317 |
+
/// First week of US epidemiological year has the majority (4 or more) of it's
|
1318 |
+
/// days in January. Last week of US epidemiological year has the year's last
|
1319 |
+
/// Wednesday in it. US epidemiological week starts on Sunday.
|
1320 |
+
///
|
1321 |
+
/// \param[in] values input to extract US epidemiological year from
|
1322 |
+
/// \param[in] ctx the function execution context, optional
|
1323 |
+
/// \return the resulting datum
|
1324 |
+
///
|
1325 |
+
/// \since 8.0.0
|
1326 |
+
/// \note API not yet finalized
|
1327 |
+
ARROW_EXPORT
|
1328 |
+
Result<Datum> USYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1329 |
+
|
1330 |
+
/// \brief ISOWeek returns ISO week of year number for each element of `values`.
|
1331 |
+
/// First ISO week has the majority (4 or more) of its days in January.
|
1332 |
+
/// ISO week starts on Monday. Year can have 52 or 53 weeks.
|
1333 |
+
/// Week numbering can start with 1.
|
1334 |
+
///
|
1335 |
+
/// \param[in] values input to extract ISO week of year from
|
1336 |
+
/// \param[in] ctx the function execution context, optional
|
1337 |
+
/// \return the resulting datum
|
1338 |
+
///
|
1339 |
+
/// \since 5.0.0
|
1340 |
+
/// \note API not yet finalized
|
1341 |
+
ARROW_EXPORT Result<Datum> ISOWeek(const Datum& values, ExecContext* ctx = NULLPTR);
|
1342 |
+
|
1343 |
+
/// \brief USWeek returns US week of year number for each element of `values`.
|
1344 |
+
/// First US week has the majority (4 or more) of its days in January.
|
1345 |
+
/// US week starts on Sunday. Year can have 52 or 53 weeks.
|
1346 |
+
/// Week numbering starts with 1.
|
1347 |
+
///
|
1348 |
+
/// \param[in] values input to extract US week of year from
|
1349 |
+
/// \param[in] ctx the function execution context, optional
|
1350 |
+
/// \return the resulting datum
|
1351 |
+
///
|
1352 |
+
/// \since 6.0.0
|
1353 |
+
/// \note API not yet finalized
|
1354 |
+
ARROW_EXPORT Result<Datum> USWeek(const Datum& values, ExecContext* ctx = NULLPTR);
|
1355 |
+
|
1356 |
+
/// \brief Week returns week of year number for each element of `values`.
|
1357 |
+
/// First ISO week has the majority (4 or more) of its days in January.
|
1358 |
+
/// Year can have 52 or 53 weeks. Week numbering can start with 0 or 1
|
1359 |
+
/// depending on DayOfWeekOptions.count_from_zero.
|
1360 |
+
///
|
1361 |
+
/// \param[in] values input to extract week of year from
|
1362 |
+
/// \param[in] options for setting numbering start
|
1363 |
+
/// \param[in] ctx the function execution context, optional
|
1364 |
+
/// \return the resulting datum
|
1365 |
+
///
|
1366 |
+
/// \since 6.0.0
|
1367 |
+
/// \note API not yet finalized
|
1368 |
+
ARROW_EXPORT Result<Datum> Week(const Datum& values, WeekOptions options = WeekOptions(),
|
1369 |
+
ExecContext* ctx = NULLPTR);
|
1370 |
+
|
1371 |
+
/// \brief ISOCalendar returns a (ISO year, ISO week, ISO day of week) struct for
|
1372 |
+
/// each element of `values`.
|
1373 |
+
/// ISO week starts on Monday denoted by 1 and ends on Sunday denoted by 7.
|
1374 |
+
///
|
1375 |
+
/// \param[in] values input to ISO calendar struct from
|
1376 |
+
/// \param[in] ctx the function execution context, optional
|
1377 |
+
/// \return the resulting datum
|
1378 |
+
///
|
1379 |
+
/// \since 5.0.0
|
1380 |
+
/// \note API not yet finalized
|
1381 |
+
ARROW_EXPORT Result<Datum> ISOCalendar(const Datum& values, ExecContext* ctx = NULLPTR);
|
1382 |
+
|
1383 |
+
/// \brief Quarter returns the quarter of year number for each element of `values`
|
1384 |
+
/// First quarter maps to 1 and fourth quarter maps to 4.
|
1385 |
+
///
|
1386 |
+
/// \param[in] values input to extract quarter of year from
|
1387 |
+
/// \param[in] ctx the function execution context, optional
|
1388 |
+
/// \return the resulting datum
|
1389 |
+
///
|
1390 |
+
/// \since 5.0.0
|
1391 |
+
/// \note API not yet finalized
|
1392 |
+
ARROW_EXPORT Result<Datum> Quarter(const Datum& values, ExecContext* ctx = NULLPTR);
|
1393 |
+
|
1394 |
+
/// \brief Hour returns hour value for each element of `values`
|
1395 |
+
///
|
1396 |
+
/// \param[in] values input to extract hour from
|
1397 |
+
/// \param[in] ctx the function execution context, optional
|
1398 |
+
/// \return the resulting datum
|
1399 |
+
///
|
1400 |
+
/// \since 5.0.0
|
1401 |
+
/// \note API not yet finalized
|
1402 |
+
ARROW_EXPORT
|
1403 |
+
Result<Datum> Hour(const Datum& values, ExecContext* ctx = NULLPTR);
|
1404 |
+
|
1405 |
+
/// \brief Minute returns minutes value for each element of `values`
|
1406 |
+
///
|
1407 |
+
/// \param[in] values input to extract minutes from
|
1408 |
+
/// \param[in] ctx the function execution context, optional
|
1409 |
+
/// \return the resulting datum
|
1410 |
+
///
|
1411 |
+
/// \since 5.0.0
|
1412 |
+
/// \note API not yet finalized
|
1413 |
+
ARROW_EXPORT
|
1414 |
+
Result<Datum> Minute(const Datum& values, ExecContext* ctx = NULLPTR);
|
1415 |
+
|
1416 |
+
/// \brief Second returns seconds value for each element of `values`
|
1417 |
+
///
|
1418 |
+
/// \param[in] values input to extract seconds from
|
1419 |
+
/// \param[in] ctx the function execution context, optional
|
1420 |
+
/// \return the resulting datum
|
1421 |
+
///
|
1422 |
+
/// \since 5.0.0
|
1423 |
+
/// \note API not yet finalized
|
1424 |
+
ARROW_EXPORT
|
1425 |
+
Result<Datum> Second(const Datum& values, ExecContext* ctx = NULLPTR);
|
1426 |
+
|
1427 |
+
/// \brief Millisecond returns number of milliseconds since the last full second
|
1428 |
+
/// for each element of `values`
|
1429 |
+
///
|
1430 |
+
/// \param[in] values input to extract milliseconds from
|
1431 |
+
/// \param[in] ctx the function execution context, optional
|
1432 |
+
/// \return the resulting datum
|
1433 |
+
///
|
1434 |
+
/// \since 5.0.0
|
1435 |
+
/// \note API not yet finalized
|
1436 |
+
ARROW_EXPORT
|
1437 |
+
Result<Datum> Millisecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1438 |
+
|
1439 |
+
/// \brief Microsecond returns number of microseconds since the last full millisecond
|
1440 |
+
/// for each element of `values`
|
1441 |
+
///
|
1442 |
+
/// \param[in] values input to extract microseconds from
|
1443 |
+
/// \param[in] ctx the function execution context, optional
|
1444 |
+
/// \return the resulting datum
|
1445 |
+
///
|
1446 |
+
/// \since 5.0.0
|
1447 |
+
/// \note API not yet finalized
|
1448 |
+
ARROW_EXPORT
|
1449 |
+
Result<Datum> Microsecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1450 |
+
|
1451 |
+
/// \brief Nanosecond returns number of nanoseconds since the last full millisecond
|
1452 |
+
/// for each element of `values`
|
1453 |
+
///
|
1454 |
+
/// \param[in] values input to extract nanoseconds from
|
1455 |
+
/// \param[in] ctx the function execution context, optional
|
1456 |
+
/// \return the resulting datum
|
1457 |
+
///
|
1458 |
+
/// \since 5.0.0
|
1459 |
+
/// \note API not yet finalized
|
1460 |
+
ARROW_EXPORT
|
1461 |
+
Result<Datum> Nanosecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1462 |
+
|
1463 |
+
/// \brief Subsecond returns the fraction of second elapsed since last full second
|
1464 |
+
/// as a float for each element of `values`
|
1465 |
+
///
|
1466 |
+
/// \param[in] values input to extract subsecond from
|
1467 |
+
/// \param[in] ctx the function execution context, optional
|
1468 |
+
/// \return the resulting datum
|
1469 |
+
///
|
1470 |
+
/// \since 5.0.0
|
1471 |
+
/// \note API not yet finalized
|
1472 |
+
ARROW_EXPORT Result<Datum> Subsecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1473 |
+
|
1474 |
+
/// \brief Format timestamps according to a format string
|
1475 |
+
///
|
1476 |
+
/// Return formatted time strings according to the format string
|
1477 |
+
/// `StrftimeOptions::format` and to the locale specifier `Strftime::locale`.
|
1478 |
+
///
|
1479 |
+
/// \param[in] values input timestamps
|
1480 |
+
/// \param[in] options for setting format string and locale
|
1481 |
+
/// \param[in] ctx the function execution context, optional
|
1482 |
+
/// \return the resulting datum
|
1483 |
+
///
|
1484 |
+
/// \since 6.0.0
|
1485 |
+
/// \note API not yet finalized
|
1486 |
+
ARROW_EXPORT Result<Datum> Strftime(const Datum& values, StrftimeOptions options,
|
1487 |
+
ExecContext* ctx = NULLPTR);
|
1488 |
+
|
1489 |
+
/// \brief Parse timestamps according to a format string
|
1490 |
+
///
|
1491 |
+
/// Return parsed timestamps according to the format string
|
1492 |
+
/// `StrptimeOptions::format` at time resolution `Strftime::unit`. Parse errors are
|
1493 |
+
/// raised depending on the `Strftime::error_is_null` setting.
|
1494 |
+
///
|
1495 |
+
/// \param[in] values input strings
|
1496 |
+
/// \param[in] options for setting format string, unit and error_is_null
|
1497 |
+
/// \param[in] ctx the function execution context, optional
|
1498 |
+
/// \return the resulting datum
|
1499 |
+
///
|
1500 |
+
/// \since 8.0.0
|
1501 |
+
/// \note API not yet finalized
|
1502 |
+
ARROW_EXPORT Result<Datum> Strptime(const Datum& values, StrptimeOptions options,
|
1503 |
+
ExecContext* ctx = NULLPTR);
|
1504 |
+
|
1505 |
+
/// \brief Converts timestamps from local timestamp without a timezone to a timestamp with
|
1506 |
+
/// timezone, interpreting the local timestamp as being in the specified timezone for each
|
1507 |
+
/// element of `values`
|
1508 |
+
///
|
1509 |
+
/// \param[in] values input to convert
|
1510 |
+
/// \param[in] options for setting source timezone, exception and ambiguous timestamp
|
1511 |
+
/// handling.
|
1512 |
+
/// \param[in] ctx the function execution context, optional
|
1513 |
+
/// \return the resulting datum
|
1514 |
+
///
|
1515 |
+
/// \since 6.0.0
|
1516 |
+
/// \note API not yet finalized
|
1517 |
+
ARROW_EXPORT Result<Datum> AssumeTimezone(const Datum& values,
|
1518 |
+
AssumeTimezoneOptions options,
|
1519 |
+
ExecContext* ctx = NULLPTR);
|
1520 |
+
|
1521 |
+
/// \brief IsDaylightSavings extracts if currently observing daylight savings for each
|
1522 |
+
/// element of `values`
|
1523 |
+
///
|
1524 |
+
/// \param[in] values input to extract daylight savings indicator from
|
1525 |
+
/// \param[in] ctx the function execution context, optional
|
1526 |
+
/// \return the resulting datum
|
1527 |
+
///
|
1528 |
+
/// \since 8.0.0
|
1529 |
+
/// \note API not yet finalized
|
1530 |
+
ARROW_EXPORT Result<Datum> IsDaylightSavings(const Datum& values,
|
1531 |
+
ExecContext* ctx = NULLPTR);
|
1532 |
+
|
1533 |
+
/// \brief LocalTimestamp converts timestamp to timezone naive local timestamp
|
1534 |
+
///
|
1535 |
+
/// \param[in] values input to convert to local time
|
1536 |
+
/// \param[in] ctx the function execution context, optional
|
1537 |
+
/// \return the resulting datum
|
1538 |
+
///
|
1539 |
+
/// \since 12.0.0
|
1540 |
+
/// \note API not yet finalized
|
1541 |
+
ARROW_EXPORT Result<Datum> LocalTimestamp(const Datum& values,
|
1542 |
+
ExecContext* ctx = NULLPTR);
|
1543 |
+
|
1544 |
+
/// \brief Years Between finds the number of years between two values
|
1545 |
+
///
|
1546 |
+
/// \param[in] left input treated as the start time
|
1547 |
+
/// \param[in] right input treated as the end time
|
1548 |
+
/// \param[in] ctx the function execution context, optional
|
1549 |
+
/// \return the resulting datum
|
1550 |
+
///
|
1551 |
+
/// \since 8.0.0
|
1552 |
+
/// \note API not yet finalized
|
1553 |
+
ARROW_EXPORT Result<Datum> YearsBetween(const Datum& left, const Datum& right,
|
1554 |
+
ExecContext* ctx = NULLPTR);
|
1555 |
+
|
1556 |
+
/// \brief Quarters Between finds the number of quarters between two values
|
1557 |
+
///
|
1558 |
+
/// \param[in] left input treated as the start time
|
1559 |
+
/// \param[in] right input treated as the end time
|
1560 |
+
/// \param[in] ctx the function execution context, optional
|
1561 |
+
/// \return the resulting datum
|
1562 |
+
///
|
1563 |
+
/// \since 8.0.0
|
1564 |
+
/// \note API not yet finalized
|
1565 |
+
ARROW_EXPORT Result<Datum> QuartersBetween(const Datum& left, const Datum& right,
|
1566 |
+
ExecContext* ctx = NULLPTR);
|
1567 |
+
|
1568 |
+
/// \brief Months Between finds the number of month between two values
|
1569 |
+
///
|
1570 |
+
/// \param[in] left input treated as the start time
|
1571 |
+
/// \param[in] right input treated as the end time
|
1572 |
+
/// \param[in] ctx the function execution context, optional
|
1573 |
+
/// \return the resulting datum
|
1574 |
+
///
|
1575 |
+
/// \since 8.0.0
|
1576 |
+
/// \note API not yet finalized
|
1577 |
+
ARROW_EXPORT Result<Datum> MonthsBetween(const Datum& left, const Datum& right,
|
1578 |
+
ExecContext* ctx = NULLPTR);
|
1579 |
+
|
1580 |
+
/// \brief Weeks Between finds the number of weeks between two values
|
1581 |
+
///
|
1582 |
+
/// \param[in] left input treated as the start time
|
1583 |
+
/// \param[in] right input treated as the end time
|
1584 |
+
/// \param[in] ctx the function execution context, optional
|
1585 |
+
/// \return the resulting datum
|
1586 |
+
///
|
1587 |
+
/// \since 8.0.0
|
1588 |
+
/// \note API not yet finalized
|
1589 |
+
ARROW_EXPORT Result<Datum> WeeksBetween(const Datum& left, const Datum& right,
|
1590 |
+
ExecContext* ctx = NULLPTR);
|
1591 |
+
|
1592 |
+
/// \brief Month Day Nano Between finds the number of months, days, and nanoseconds
|
1593 |
+
/// between two values
|
1594 |
+
///
|
1595 |
+
/// \param[in] left input treated as the start time
|
1596 |
+
/// \param[in] right input treated as the end time
|
1597 |
+
/// \param[in] ctx the function execution context, optional
|
1598 |
+
/// \return the resulting datum
|
1599 |
+
///
|
1600 |
+
/// \since 8.0.0
|
1601 |
+
/// \note API not yet finalized
|
1602 |
+
ARROW_EXPORT Result<Datum> MonthDayNanoBetween(const Datum& left, const Datum& right,
|
1603 |
+
ExecContext* ctx = NULLPTR);
|
1604 |
+
|
1605 |
+
/// \brief DayTime Between finds the number of days and milliseconds between two values
|
1606 |
+
///
|
1607 |
+
/// \param[in] left input treated as the start time
|
1608 |
+
/// \param[in] right input treated as the end time
|
1609 |
+
/// \param[in] ctx the function execution context, optional
|
1610 |
+
/// \return the resulting datum
|
1611 |
+
///
|
1612 |
+
/// \since 8.0.0
|
1613 |
+
/// \note API not yet finalized
|
1614 |
+
ARROW_EXPORT Result<Datum> DayTimeBetween(const Datum& left, const Datum& right,
|
1615 |
+
ExecContext* ctx = NULLPTR);
|
1616 |
+
|
1617 |
+
/// \brief Days Between finds the number of days between two values
|
1618 |
+
///
|
1619 |
+
/// \param[in] left input treated as the start time
|
1620 |
+
/// \param[in] right input treated as the end time
|
1621 |
+
/// \param[in] ctx the function execution context, optional
|
1622 |
+
/// \return the resulting datum
|
1623 |
+
///
|
1624 |
+
/// \since 8.0.0
|
1625 |
+
/// \note API not yet finalized
|
1626 |
+
ARROW_EXPORT Result<Datum> DaysBetween(const Datum& left, const Datum& right,
|
1627 |
+
ExecContext* ctx = NULLPTR);
|
1628 |
+
|
1629 |
+
/// \brief Hours Between finds the number of hours between two values
|
1630 |
+
///
|
1631 |
+
/// \param[in] left input treated as the start time
|
1632 |
+
/// \param[in] right input treated as the end time
|
1633 |
+
/// \param[in] ctx the function execution context, optional
|
1634 |
+
/// \return the resulting datum
|
1635 |
+
///
|
1636 |
+
/// \since 8.0.0
|
1637 |
+
/// \note API not yet finalized
|
1638 |
+
ARROW_EXPORT Result<Datum> HoursBetween(const Datum& left, const Datum& right,
|
1639 |
+
ExecContext* ctx = NULLPTR);
|
1640 |
+
|
1641 |
+
/// \brief Minutes Between finds the number of minutes between two values
|
1642 |
+
///
|
1643 |
+
/// \param[in] left input treated as the start time
|
1644 |
+
/// \param[in] right input treated as the end time
|
1645 |
+
/// \param[in] ctx the function execution context, optional
|
1646 |
+
/// \return the resulting datum
|
1647 |
+
///
|
1648 |
+
/// \since 8.0.0
|
1649 |
+
/// \note API not yet finalized
|
1650 |
+
ARROW_EXPORT Result<Datum> MinutesBetween(const Datum& left, const Datum& right,
|
1651 |
+
ExecContext* ctx = NULLPTR);
|
1652 |
+
|
1653 |
+
/// \brief Seconds Between finds the number of hours between two values
|
1654 |
+
///
|
1655 |
+
/// \param[in] left input treated as the start time
|
1656 |
+
/// \param[in] right input treated as the end time
|
1657 |
+
/// \param[in] ctx the function execution context, optional
|
1658 |
+
/// \return the resulting datum
|
1659 |
+
///
|
1660 |
+
/// \since 8.0.0
|
1661 |
+
/// \note API not yet finalized
|
1662 |
+
ARROW_EXPORT Result<Datum> SecondsBetween(const Datum& left, const Datum& right,
|
1663 |
+
ExecContext* ctx = NULLPTR);
|
1664 |
+
|
1665 |
+
/// \brief Milliseconds Between finds the number of milliseconds between two values
|
1666 |
+
///
|
1667 |
+
/// \param[in] left input treated as the start time
|
1668 |
+
/// \param[in] right input treated as the end time
|
1669 |
+
/// \param[in] ctx the function execution context, optional
|
1670 |
+
/// \return the resulting datum
|
1671 |
+
///
|
1672 |
+
/// \since 8.0.0
|
1673 |
+
/// \note API not yet finalized
|
1674 |
+
ARROW_EXPORT Result<Datum> MillisecondsBetween(const Datum& left, const Datum& right,
|
1675 |
+
ExecContext* ctx = NULLPTR);
|
1676 |
+
|
1677 |
+
/// \brief Microseconds Between finds the number of microseconds between two values
|
1678 |
+
///
|
1679 |
+
/// \param[in] left input treated as the start time
|
1680 |
+
/// \param[in] right input treated as the end time
|
1681 |
+
/// \param[in] ctx the function execution context, optional
|
1682 |
+
/// \return the resulting datum
|
1683 |
+
///
|
1684 |
+
/// \since 8.0.0
|
1685 |
+
/// \note API not yet finalized
|
1686 |
+
ARROW_EXPORT Result<Datum> MicrosecondsBetween(const Datum& left, const Datum& right,
|
1687 |
+
ExecContext* ctx = NULLPTR);
|
1688 |
+
|
1689 |
+
/// \brief Nanoseconds Between finds the number of nanoseconds between two values
|
1690 |
+
///
|
1691 |
+
/// \param[in] left input treated as the start time
|
1692 |
+
/// \param[in] right input treated as the end time
|
1693 |
+
/// \param[in] ctx the function execution context, optional
|
1694 |
+
/// \return the resulting datum
|
1695 |
+
///
|
1696 |
+
/// \since 8.0.0
|
1697 |
+
/// \note API not yet finalized
|
1698 |
+
ARROW_EXPORT Result<Datum> NanosecondsBetween(const Datum& left, const Datum& right,
|
1699 |
+
ExecContext* ctx = NULLPTR);
|
1700 |
+
|
1701 |
+
/// \brief Finds either the FIRST, LAST, or ALL items with a key that matches the given
|
1702 |
+
/// query key in a map.
|
1703 |
+
///
|
1704 |
+
/// Returns an array of items for FIRST and LAST, and an array of list of items for ALL.
|
1705 |
+
///
|
1706 |
+
/// \param[in] map to look in
|
1707 |
+
/// \param[in] options to pass a query key and choose which matching keys to return
|
1708 |
+
/// (FIRST, LAST or ALL)
|
1709 |
+
/// \param[in] ctx the function execution context, optional
|
1710 |
+
/// \return the resulting datum
|
1711 |
+
///
|
1712 |
+
/// \since 8.0.0
|
1713 |
+
/// \note API not yet finalized
|
1714 |
+
ARROW_EXPORT Result<Datum> MapLookup(const Datum& map, MapLookupOptions options,
|
1715 |
+
ExecContext* ctx = NULLPTR);
|
1716 |
+
} // namespace compute
|
1717 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h
ADDED
@@ -0,0 +1,697 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <utility>
|
22 |
+
|
23 |
+
#include "arrow/compute/function_options.h"
|
24 |
+
#include "arrow/compute/ordering.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/type_fwd.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace compute {
|
30 |
+
|
31 |
+
class ExecContext;
|
32 |
+
|
33 |
+
/// \addtogroup compute-concrete-options
|
34 |
+
/// @{
|
35 |
+
|
36 |
+
class ARROW_EXPORT FilterOptions : public FunctionOptions {
|
37 |
+
public:
|
38 |
+
/// Configure the action taken when a slot of the selection mask is null
|
39 |
+
enum NullSelectionBehavior {
|
40 |
+
/// The corresponding filtered value will be removed in the output.
|
41 |
+
DROP,
|
42 |
+
/// The corresponding filtered value will be null in the output.
|
43 |
+
EMIT_NULL,
|
44 |
+
};
|
45 |
+
|
46 |
+
explicit FilterOptions(NullSelectionBehavior null_selection = DROP);
|
47 |
+
static constexpr char const kTypeName[] = "FilterOptions";
|
48 |
+
static FilterOptions Defaults() { return FilterOptions(); }
|
49 |
+
|
50 |
+
NullSelectionBehavior null_selection_behavior = DROP;
|
51 |
+
};
|
52 |
+
|
53 |
+
class ARROW_EXPORT TakeOptions : public FunctionOptions {
|
54 |
+
public:
|
55 |
+
explicit TakeOptions(bool boundscheck = true);
|
56 |
+
static constexpr char const kTypeName[] = "TakeOptions";
|
57 |
+
static TakeOptions BoundsCheck() { return TakeOptions(true); }
|
58 |
+
static TakeOptions NoBoundsCheck() { return TakeOptions(false); }
|
59 |
+
static TakeOptions Defaults() { return BoundsCheck(); }
|
60 |
+
|
61 |
+
bool boundscheck = true;
|
62 |
+
};
|
63 |
+
|
64 |
+
/// \brief Options for the dictionary encode function
|
65 |
+
class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions {
|
66 |
+
public:
|
67 |
+
/// Configure how null values will be encoded
|
68 |
+
enum NullEncodingBehavior {
|
69 |
+
/// The null value will be added to the dictionary with a proper index.
|
70 |
+
ENCODE,
|
71 |
+
/// The null value will be masked in the indices array.
|
72 |
+
MASK
|
73 |
+
};
|
74 |
+
|
75 |
+
explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK);
|
76 |
+
static constexpr char const kTypeName[] = "DictionaryEncodeOptions";
|
77 |
+
static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); }
|
78 |
+
|
79 |
+
NullEncodingBehavior null_encoding_behavior = MASK;
|
80 |
+
};
|
81 |
+
|
82 |
+
/// \brief Options for the run-end encode function
|
83 |
+
class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions {
|
84 |
+
public:
|
85 |
+
explicit RunEndEncodeOptions(std::shared_ptr<DataType> run_end_type = int32());
|
86 |
+
static constexpr char const kTypeName[] = "RunEndEncodeOptions";
|
87 |
+
static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); }
|
88 |
+
|
89 |
+
std::shared_ptr<DataType> run_end_type;
|
90 |
+
};
|
91 |
+
|
92 |
+
class ARROW_EXPORT ArraySortOptions : public FunctionOptions {
|
93 |
+
public:
|
94 |
+
explicit ArraySortOptions(SortOrder order = SortOrder::Ascending,
|
95 |
+
NullPlacement null_placement = NullPlacement::AtEnd);
|
96 |
+
static constexpr char const kTypeName[] = "ArraySortOptions";
|
97 |
+
static ArraySortOptions Defaults() { return ArraySortOptions(); }
|
98 |
+
|
99 |
+
/// Sorting order
|
100 |
+
SortOrder order;
|
101 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
102 |
+
NullPlacement null_placement;
|
103 |
+
};
|
104 |
+
|
105 |
+
class ARROW_EXPORT SortOptions : public FunctionOptions {
|
106 |
+
public:
|
107 |
+
explicit SortOptions(std::vector<SortKey> sort_keys = {},
|
108 |
+
NullPlacement null_placement = NullPlacement::AtEnd);
|
109 |
+
explicit SortOptions(const Ordering& ordering);
|
110 |
+
static constexpr char const kTypeName[] = "SortOptions";
|
111 |
+
static SortOptions Defaults() { return SortOptions(); }
|
112 |
+
/// Convenience constructor to create an ordering from SortOptions
|
113 |
+
///
|
114 |
+
/// Note: Both classes contain the exact same information. However,
|
115 |
+
/// sort_options should only be used in a "function options" context while Ordering
|
116 |
+
/// is used more generally.
|
117 |
+
Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); }
|
118 |
+
Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); }
|
119 |
+
|
120 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
121 |
+
std::vector<SortKey> sort_keys;
|
122 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
123 |
+
NullPlacement null_placement;
|
124 |
+
};
|
125 |
+
|
126 |
+
/// \brief SelectK options
|
127 |
+
class ARROW_EXPORT SelectKOptions : public FunctionOptions {
|
128 |
+
public:
|
129 |
+
explicit SelectKOptions(int64_t k = -1, std::vector<SortKey> sort_keys = {});
|
130 |
+
static constexpr char const kTypeName[] = "SelectKOptions";
|
131 |
+
static SelectKOptions Defaults() { return SelectKOptions(); }
|
132 |
+
|
133 |
+
static SelectKOptions TopKDefault(int64_t k, std::vector<std::string> key_names = {}) {
|
134 |
+
std::vector<SortKey> keys;
|
135 |
+
for (const auto& name : key_names) {
|
136 |
+
keys.emplace_back(SortKey(name, SortOrder::Descending));
|
137 |
+
}
|
138 |
+
if (key_names.empty()) {
|
139 |
+
keys.emplace_back(SortKey("not-used", SortOrder::Descending));
|
140 |
+
}
|
141 |
+
return SelectKOptions{k, keys};
|
142 |
+
}
|
143 |
+
static SelectKOptions BottomKDefault(int64_t k,
|
144 |
+
std::vector<std::string> key_names = {}) {
|
145 |
+
std::vector<SortKey> keys;
|
146 |
+
for (const auto& name : key_names) {
|
147 |
+
keys.emplace_back(SortKey(name, SortOrder::Ascending));
|
148 |
+
}
|
149 |
+
if (key_names.empty()) {
|
150 |
+
keys.emplace_back(SortKey("not-used", SortOrder::Ascending));
|
151 |
+
}
|
152 |
+
return SelectKOptions{k, keys};
|
153 |
+
}
|
154 |
+
|
155 |
+
/// The number of `k` elements to keep.
|
156 |
+
int64_t k;
|
157 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
158 |
+
std::vector<SortKey> sort_keys;
|
159 |
+
};
|
160 |
+
|
161 |
+
/// \brief Rank options
|
162 |
+
class ARROW_EXPORT RankOptions : public FunctionOptions {
|
163 |
+
public:
|
164 |
+
/// Configure how ties between equal values are handled
|
165 |
+
enum Tiebreaker {
|
166 |
+
/// Ties get the smallest possible rank in sorted order.
|
167 |
+
Min,
|
168 |
+
/// Ties get the largest possible rank in sorted order.
|
169 |
+
Max,
|
170 |
+
/// Ranks are assigned in order of when ties appear in the input.
|
171 |
+
/// This ensures the ranks are a stable permutation of the input.
|
172 |
+
First,
|
173 |
+
/// The ranks span a dense [1, M] interval where M is the number
|
174 |
+
/// of distinct values in the input.
|
175 |
+
Dense
|
176 |
+
};
|
177 |
+
|
178 |
+
explicit RankOptions(std::vector<SortKey> sort_keys = {},
|
179 |
+
NullPlacement null_placement = NullPlacement::AtEnd,
|
180 |
+
Tiebreaker tiebreaker = RankOptions::First);
|
181 |
+
/// Convenience constructor for array inputs
|
182 |
+
explicit RankOptions(SortOrder order,
|
183 |
+
NullPlacement null_placement = NullPlacement::AtEnd,
|
184 |
+
Tiebreaker tiebreaker = RankOptions::First)
|
185 |
+
: RankOptions({SortKey("", order)}, null_placement, tiebreaker) {}
|
186 |
+
|
187 |
+
static constexpr char const kTypeName[] = "RankOptions";
|
188 |
+
static RankOptions Defaults() { return RankOptions(); }
|
189 |
+
|
190 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
191 |
+
std::vector<SortKey> sort_keys;
|
192 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
193 |
+
NullPlacement null_placement;
|
194 |
+
/// Tiebreaker for dealing with equal values in ranks
|
195 |
+
Tiebreaker tiebreaker;
|
196 |
+
};
|
197 |
+
|
198 |
+
/// \brief Partitioning options for NthToIndices
|
199 |
+
class ARROW_EXPORT PartitionNthOptions : public FunctionOptions {
|
200 |
+
public:
|
201 |
+
explicit PartitionNthOptions(int64_t pivot,
|
202 |
+
NullPlacement null_placement = NullPlacement::AtEnd);
|
203 |
+
PartitionNthOptions() : PartitionNthOptions(0) {}
|
204 |
+
static constexpr char const kTypeName[] = "PartitionNthOptions";
|
205 |
+
|
206 |
+
/// The index into the equivalent sorted array of the partition pivot element.
|
207 |
+
int64_t pivot;
|
208 |
+
/// Whether nulls and NaNs are partitioned at the start or at the end
|
209 |
+
NullPlacement null_placement;
|
210 |
+
};
|
211 |
+
|
212 |
+
/// \brief Options for cumulative functions
|
213 |
+
/// \note Also aliased as CumulativeSumOptions for backward compatibility
|
214 |
+
class ARROW_EXPORT CumulativeOptions : public FunctionOptions {
|
215 |
+
public:
|
216 |
+
explicit CumulativeOptions(bool skip_nulls = false);
|
217 |
+
explicit CumulativeOptions(double start, bool skip_nulls = false);
|
218 |
+
explicit CumulativeOptions(std::shared_ptr<Scalar> start, bool skip_nulls = false);
|
219 |
+
static constexpr char const kTypeName[] = "CumulativeOptions";
|
220 |
+
static CumulativeOptions Defaults() { return CumulativeOptions(); }
|
221 |
+
|
222 |
+
/// Optional starting value for cumulative operation computation, default depends on the
|
223 |
+
/// operation and input type.
|
224 |
+
/// - sum: 0
|
225 |
+
/// - prod: 1
|
226 |
+
/// - min: maximum of the input type
|
227 |
+
/// - max: minimum of the input type
|
228 |
+
/// - mean: start is ignored because it has no meaning for mean
|
229 |
+
std::optional<std::shared_ptr<Scalar>> start;
|
230 |
+
|
231 |
+
/// If true, nulls in the input are ignored and produce a corresponding null output.
|
232 |
+
/// When false, the first null encountered is propagated through the remaining output.
|
233 |
+
bool skip_nulls = false;
|
234 |
+
};
|
235 |
+
using CumulativeSumOptions = CumulativeOptions; // For backward compatibility
|
236 |
+
|
237 |
+
/// \brief Options for pairwise functions
|
238 |
+
class ARROW_EXPORT PairwiseOptions : public FunctionOptions {
|
239 |
+
public:
|
240 |
+
explicit PairwiseOptions(int64_t periods = 1);
|
241 |
+
static constexpr char const kTypeName[] = "PairwiseOptions";
|
242 |
+
static PairwiseOptions Defaults() { return PairwiseOptions(); }
|
243 |
+
|
244 |
+
/// Periods to shift for applying the binary operation, accepts negative values.
|
245 |
+
int64_t periods = 1;
|
246 |
+
};
|
247 |
+
|
248 |
+
/// @}
|
249 |
+
|
250 |
+
/// \brief Filter with a boolean selection filter
|
251 |
+
///
|
252 |
+
/// The output will be populated with values from the input at positions
|
253 |
+
/// where the selection filter is not 0. Nulls in the filter will be handled
|
254 |
+
/// based on options.null_selection_behavior.
|
255 |
+
///
|
256 |
+
/// For example given values = ["a", "b", "c", null, "e", "f"] and
|
257 |
+
/// filter = [0, 1, 1, 0, null, 1], the output will be
|
258 |
+
/// (null_selection_behavior == DROP) = ["b", "c", "f"]
|
259 |
+
/// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"]
|
260 |
+
///
|
261 |
+
/// \param[in] values array to filter
|
262 |
+
/// \param[in] filter indicates which values should be filtered out
|
263 |
+
/// \param[in] options configures null_selection_behavior
|
264 |
+
/// \param[in] ctx the function execution context, optional
|
265 |
+
/// \return the resulting datum
|
266 |
+
ARROW_EXPORT
|
267 |
+
Result<Datum> Filter(const Datum& values, const Datum& filter,
|
268 |
+
const FilterOptions& options = FilterOptions::Defaults(),
|
269 |
+
ExecContext* ctx = NULLPTR);
|
270 |
+
|
271 |
+
namespace internal {
|
272 |
+
|
273 |
+
// These internal functions are implemented in kernels/vector_selection.cc
|
274 |
+
|
275 |
+
/// \brief Return the number of selected indices in the boolean filter
|
276 |
+
///
|
277 |
+
/// \param filter a plain or run-end encoded boolean array with or without nulls
|
278 |
+
/// \param null_selection how to handle nulls in the filter
|
279 |
+
ARROW_EXPORT
|
280 |
+
int64_t GetFilterOutputSize(const ArraySpan& filter,
|
281 |
+
FilterOptions::NullSelectionBehavior null_selection);
|
282 |
+
|
283 |
+
/// \brief Compute uint64 selection indices for use with Take given a boolean
|
284 |
+
/// filter
|
285 |
+
///
|
286 |
+
/// \param filter a plain or run-end encoded boolean array with or without nulls
|
287 |
+
/// \param null_selection how to handle nulls in the filter
|
288 |
+
ARROW_EXPORT
|
289 |
+
Result<std::shared_ptr<ArrayData>> GetTakeIndices(
|
290 |
+
const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection,
|
291 |
+
MemoryPool* memory_pool = default_memory_pool());
|
292 |
+
|
293 |
+
} // namespace internal
|
294 |
+
|
295 |
+
/// \brief ReplaceWithMask replaces each value in the array corresponding
|
296 |
+
/// to a true value in the mask with the next element from `replacements`.
|
297 |
+
///
|
298 |
+
/// \param[in] values Array input to replace
|
299 |
+
/// \param[in] mask Array or Scalar of Boolean mask values
|
300 |
+
/// \param[in] replacements The replacement values to draw from. There must
|
301 |
+
/// be as many replacement values as true values in the mask.
|
302 |
+
/// \param[in] ctx the function execution context, optional
|
303 |
+
///
|
304 |
+
/// \return the resulting datum
|
305 |
+
///
|
306 |
+
/// \since 5.0.0
|
307 |
+
/// \note API not yet finalized
|
308 |
+
ARROW_EXPORT
|
309 |
+
Result<Datum> ReplaceWithMask(const Datum& values, const Datum& mask,
|
310 |
+
const Datum& replacements, ExecContext* ctx = NULLPTR);
|
311 |
+
|
312 |
+
/// \brief FillNullForward fill null values in forward direction
|
313 |
+
///
|
314 |
+
/// The output array will be of the same type as the input values
|
315 |
+
/// array, with replaced null values in forward direction.
|
316 |
+
///
|
317 |
+
/// For example given values = ["a", "b", "c", null, null, "f"],
|
318 |
+
/// the output will be = ["a", "b", "c", "c", "c", "f"]
|
319 |
+
///
|
320 |
+
/// \param[in] values datum from which to take
|
321 |
+
/// \param[in] ctx the function execution context, optional
|
322 |
+
/// \return the resulting datum
|
323 |
+
ARROW_EXPORT
|
324 |
+
Result<Datum> FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR);
|
325 |
+
|
326 |
+
/// \brief FillNullBackward fill null values in backward direction
|
327 |
+
///
|
328 |
+
/// The output array will be of the same type as the input values
|
329 |
+
/// array, with replaced null values in backward direction.
|
330 |
+
///
|
331 |
+
/// For example given values = ["a", "b", "c", null, null, "f"],
|
332 |
+
/// the output will be = ["a", "b", "c", "f", "f", "f"]
|
333 |
+
///
|
334 |
+
/// \param[in] values datum from which to take
|
335 |
+
/// \param[in] ctx the function execution context, optional
|
336 |
+
/// \return the resulting datum
|
337 |
+
ARROW_EXPORT
|
338 |
+
Result<Datum> FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR);
|
339 |
+
|
340 |
+
/// \brief Take from an array of values at indices in another array
|
341 |
+
///
|
342 |
+
/// The output array will be of the same type as the input values
|
343 |
+
/// array, with elements taken from the values array at the given
|
344 |
+
/// indices. If an index is null then the taken element will be null.
|
345 |
+
///
|
346 |
+
/// For example given values = ["a", "b", "c", null, "e", "f"] and
|
347 |
+
/// indices = [2, 1, null, 3], the output will be
|
348 |
+
/// = [values[2], values[1], null, values[3]]
|
349 |
+
/// = ["c", "b", null, null]
|
350 |
+
///
|
351 |
+
/// \param[in] values datum from which to take
|
352 |
+
/// \param[in] indices which values to take
|
353 |
+
/// \param[in] options options
|
354 |
+
/// \param[in] ctx the function execution context, optional
|
355 |
+
/// \return the resulting datum
|
356 |
+
ARROW_EXPORT
|
357 |
+
Result<Datum> Take(const Datum& values, const Datum& indices,
|
358 |
+
const TakeOptions& options = TakeOptions::Defaults(),
|
359 |
+
ExecContext* ctx = NULLPTR);
|
360 |
+
|
361 |
+
/// \brief Take with Array inputs and output
|
362 |
+
ARROW_EXPORT
|
363 |
+
Result<std::shared_ptr<Array>> Take(const Array& values, const Array& indices,
|
364 |
+
const TakeOptions& options = TakeOptions::Defaults(),
|
365 |
+
ExecContext* ctx = NULLPTR);
|
366 |
+
|
367 |
+
/// \brief Drop Null from an array of values
|
368 |
+
///
|
369 |
+
/// The output array will be of the same type as the input values
|
370 |
+
/// array, with elements taken from the values array without nulls.
|
371 |
+
///
|
372 |
+
/// For example given values = ["a", "b", "c", null, "e", "f"],
|
373 |
+
/// the output will be = ["a", "b", "c", "e", "f"]
|
374 |
+
///
|
375 |
+
/// \param[in] values datum from which to take
|
376 |
+
/// \param[in] ctx the function execution context, optional
|
377 |
+
/// \return the resulting datum
|
378 |
+
ARROW_EXPORT
|
379 |
+
Result<Datum> DropNull(const Datum& values, ExecContext* ctx = NULLPTR);
|
380 |
+
|
381 |
+
/// \brief DropNull with Array inputs and output
|
382 |
+
ARROW_EXPORT
|
383 |
+
Result<std::shared_ptr<Array>> DropNull(const Array& values, ExecContext* ctx = NULLPTR);
|
384 |
+
|
385 |
+
/// \brief Return indices that partition an array around n-th sorted element.
|
386 |
+
///
|
387 |
+
/// Find index of n-th(0 based) smallest value and perform indirect
|
388 |
+
/// partition of an array around that element. Output indices[0 ~ n-1]
|
389 |
+
/// holds values no greater than n-th element, and indices[n+1 ~ end]
|
390 |
+
/// holds values no less than n-th element. Elements in each partition
|
391 |
+
/// is not sorted. Nulls will be partitioned to the end of the output.
|
392 |
+
/// Output is not guaranteed to be stable.
|
393 |
+
///
|
394 |
+
/// \param[in] values array to be partitioned
|
395 |
+
/// \param[in] n pivot array around sorted n-th element
|
396 |
+
/// \param[in] ctx the function execution context, optional
|
397 |
+
/// \return offsets indices that would partition an array
|
398 |
+
ARROW_EXPORT
|
399 |
+
Result<std::shared_ptr<Array>> NthToIndices(const Array& values, int64_t n,
|
400 |
+
ExecContext* ctx = NULLPTR);
|
401 |
+
|
402 |
+
/// \brief Return indices that partition an array around n-th sorted element.
|
403 |
+
///
|
404 |
+
/// This overload takes a PartitionNthOptions specifying the pivot index
|
405 |
+
/// and the null handling.
|
406 |
+
///
|
407 |
+
/// \param[in] values array to be partitioned
|
408 |
+
/// \param[in] options options including pivot index and null handling
|
409 |
+
/// \param[in] ctx the function execution context, optional
|
410 |
+
/// \return offsets indices that would partition an array
|
411 |
+
ARROW_EXPORT
|
412 |
+
Result<std::shared_ptr<Array>> NthToIndices(const Array& values,
|
413 |
+
const PartitionNthOptions& options,
|
414 |
+
ExecContext* ctx = NULLPTR);
|
415 |
+
|
416 |
+
/// \brief Return indices that would select the first `k` elements.
|
417 |
+
///
|
418 |
+
/// Perform an indirect sort of the datum, keeping only the first `k` elements. The output
|
419 |
+
/// array will contain indices such that the item indicated by the k-th index will be in
|
420 |
+
/// the position it would be if the datum were sorted by `options.sort_keys`. However,
|
421 |
+
/// indices of null values will not be part of the output. The sort is not guaranteed to
|
422 |
+
/// be stable.
|
423 |
+
///
|
424 |
+
/// \param[in] datum datum to be partitioned
|
425 |
+
/// \param[in] options options
|
426 |
+
/// \param[in] ctx the function execution context, optional
|
427 |
+
/// \return a datum with the same schema as the input
|
428 |
+
ARROW_EXPORT
|
429 |
+
Result<std::shared_ptr<Array>> SelectKUnstable(const Datum& datum,
|
430 |
+
const SelectKOptions& options,
|
431 |
+
ExecContext* ctx = NULLPTR);
|
432 |
+
|
433 |
+
/// \brief Return the indices that would sort an array.
|
434 |
+
///
|
435 |
+
/// Perform an indirect sort of array. The output array will contain
|
436 |
+
/// indices that would sort an array, which would be the same length
|
437 |
+
/// as input. Nulls will be stably partitioned to the end of the output
|
438 |
+
/// regardless of order.
|
439 |
+
///
|
440 |
+
/// For example given array = [null, 1, 3.3, null, 2, 5.3] and order
|
441 |
+
/// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0,
|
442 |
+
/// 3].
|
443 |
+
///
|
444 |
+
/// \param[in] array array to sort
|
445 |
+
/// \param[in] order ascending or descending
|
446 |
+
/// \param[in] ctx the function execution context, optional
|
447 |
+
/// \return offsets indices that would sort an array
|
448 |
+
ARROW_EXPORT
|
449 |
+
Result<std::shared_ptr<Array>> SortIndices(const Array& array,
|
450 |
+
SortOrder order = SortOrder::Ascending,
|
451 |
+
ExecContext* ctx = NULLPTR);
|
452 |
+
|
453 |
+
/// \brief Return the indices that would sort an array.
|
454 |
+
///
|
455 |
+
/// This overload takes a ArraySortOptions specifying the sort order
|
456 |
+
/// and the null handling.
|
457 |
+
///
|
458 |
+
/// \param[in] array array to sort
|
459 |
+
/// \param[in] options options including sort order and null handling
|
460 |
+
/// \param[in] ctx the function execution context, optional
|
461 |
+
/// \return offsets indices that would sort an array
|
462 |
+
ARROW_EXPORT
|
463 |
+
Result<std::shared_ptr<Array>> SortIndices(const Array& array,
|
464 |
+
const ArraySortOptions& options,
|
465 |
+
ExecContext* ctx = NULLPTR);
|
466 |
+
|
467 |
+
/// \brief Return the indices that would sort a chunked array.
|
468 |
+
///
|
469 |
+
/// Perform an indirect sort of chunked array. The output array will
|
470 |
+
/// contain indices that would sort a chunked array, which would be
|
471 |
+
/// the same length as input. Nulls will be stably partitioned to the
|
472 |
+
/// end of the output regardless of order.
|
473 |
+
///
|
474 |
+
/// For example given chunked_array = [[null, 1], [3.3], [null, 2,
|
475 |
+
/// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2,
|
476 |
+
/// 4, 1, 0, 3].
|
477 |
+
///
|
478 |
+
/// \param[in] chunked_array chunked array to sort
|
479 |
+
/// \param[in] order ascending or descending
|
480 |
+
/// \param[in] ctx the function execution context, optional
|
481 |
+
/// \return offsets indices that would sort an array
|
482 |
+
ARROW_EXPORT
|
483 |
+
Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
|
484 |
+
SortOrder order = SortOrder::Ascending,
|
485 |
+
ExecContext* ctx = NULLPTR);
|
486 |
+
|
487 |
+
/// \brief Return the indices that would sort a chunked array.
|
488 |
+
///
|
489 |
+
/// This overload takes a ArraySortOptions specifying the sort order
|
490 |
+
/// and the null handling.
|
491 |
+
///
|
492 |
+
/// \param[in] chunked_array chunked array to sort
|
493 |
+
/// \param[in] options options including sort order and null handling
|
494 |
+
/// \param[in] ctx the function execution context, optional
|
495 |
+
/// \return offsets indices that would sort an array
|
496 |
+
ARROW_EXPORT
|
497 |
+
Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
|
498 |
+
const ArraySortOptions& options,
|
499 |
+
ExecContext* ctx = NULLPTR);
|
500 |
+
|
501 |
+
/// \brief Return the indices that would sort an input in the
|
502 |
+
/// specified order. Input is one of array, chunked array record batch
|
503 |
+
/// or table.
|
504 |
+
///
|
505 |
+
/// Perform an indirect sort of input. The output array will contain
|
506 |
+
/// indices that would sort an input, which would be the same length
|
507 |
+
/// as input. Nulls will be stably partitioned to the start or to the end
|
508 |
+
/// of the output depending on SortOrder::null_placement.
|
509 |
+
///
|
510 |
+
/// For example given input (table) = {
|
511 |
+
/// "column1": [[null, 1], [ 3, null, 2, 1]],
|
512 |
+
/// "column2": [[ 5], [3, null, null, 5, 5]],
|
513 |
+
/// } and options = {
|
514 |
+
/// {"column1", SortOrder::Ascending},
|
515 |
+
/// {"column2", SortOrder::Descending},
|
516 |
+
/// }, the output will be [5, 1, 4, 2, 0, 3].
|
517 |
+
///
|
518 |
+
/// \param[in] datum array, chunked array, record batch or table to sort
|
519 |
+
/// \param[in] options options
|
520 |
+
/// \param[in] ctx the function execution context, optional
|
521 |
+
/// \return offsets indices that would sort a table
|
522 |
+
ARROW_EXPORT
|
523 |
+
Result<std::shared_ptr<Array>> SortIndices(const Datum& datum, const SortOptions& options,
|
524 |
+
ExecContext* ctx = NULLPTR);
|
525 |
+
|
526 |
+
/// \brief Compute unique elements from an array-like object
|
527 |
+
///
|
528 |
+
/// Note if a null occurs in the input it will NOT be included in the output.
|
529 |
+
///
|
530 |
+
/// \param[in] datum array-like input
|
531 |
+
/// \param[in] ctx the function execution context, optional
|
532 |
+
/// \return result as Array
|
533 |
+
///
|
534 |
+
/// \since 1.0.0
|
535 |
+
/// \note API not yet finalized
|
536 |
+
ARROW_EXPORT
|
537 |
+
Result<std::shared_ptr<Array>> Unique(const Datum& datum, ExecContext* ctx = NULLPTR);
|
538 |
+
|
539 |
+
// Constants for accessing the output of ValueCounts
|
540 |
+
ARROW_EXPORT extern const char kValuesFieldName[];
|
541 |
+
ARROW_EXPORT extern const char kCountsFieldName[];
|
542 |
+
ARROW_EXPORT extern const int32_t kValuesFieldIndex;
|
543 |
+
ARROW_EXPORT extern const int32_t kCountsFieldIndex;
|
544 |
+
|
545 |
+
/// \brief Return counts of unique elements from an array-like object.
|
546 |
+
///
|
547 |
+
/// Note that the counts do not include counts for nulls in the array. These can be
|
548 |
+
/// obtained separately from metadata.
|
549 |
+
///
|
550 |
+
/// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values
|
551 |
+
/// which can lead to unexpected results if the input Array has these values.
|
552 |
+
///
|
553 |
+
/// \param[in] value array-like input
|
554 |
+
/// \param[in] ctx the function execution context, optional
|
555 |
+
/// \return counts An array of <input type "Values", int64_t "Counts"> structs.
|
556 |
+
///
|
557 |
+
/// \since 1.0.0
|
558 |
+
/// \note API not yet finalized
|
559 |
+
ARROW_EXPORT
|
560 |
+
Result<std::shared_ptr<StructArray>> ValueCounts(const Datum& value,
|
561 |
+
ExecContext* ctx = NULLPTR);
|
562 |
+
|
563 |
+
/// \brief Dictionary-encode values in an array-like object
|
564 |
+
///
|
565 |
+
/// Any nulls encountered in the dictionary will be handled according to the
|
566 |
+
/// specified null encoding behavior.
|
567 |
+
///
|
568 |
+
/// For example, given values ["a", "b", null, "a", null] the output will be
|
569 |
+
/// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null]
|
570 |
+
/// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"]
|
571 |
+
///
|
572 |
+
/// If the input is already dictionary encoded this function is a no-op unless
|
573 |
+
/// it needs to modify the null_encoding (TODO)
|
574 |
+
///
|
575 |
+
/// \param[in] data array-like input
|
576 |
+
/// \param[in] ctx the function execution context, optional
|
577 |
+
/// \param[in] options configures null encoding behavior
|
578 |
+
/// \return result with same shape and type as input
|
579 |
+
///
|
580 |
+
/// \since 1.0.0
|
581 |
+
/// \note API not yet finalized
|
582 |
+
ARROW_EXPORT
|
583 |
+
Result<Datum> DictionaryEncode(
|
584 |
+
const Datum& data,
|
585 |
+
const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(),
|
586 |
+
ExecContext* ctx = NULLPTR);
|
587 |
+
|
588 |
+
/// \brief Run-end-encode values in an array-like object
|
589 |
+
///
|
590 |
+
/// The returned run-end encoded type uses the same value type of the input and
|
591 |
+
/// run-end type defined in the options.
|
592 |
+
///
|
593 |
+
/// \param[in] value array-like input
|
594 |
+
/// \param[in] options configures encoding behavior
|
595 |
+
/// \param[in] ctx the function execution context, optional
|
596 |
+
/// \return result with same shape but run-end encoded
|
597 |
+
///
|
598 |
+
/// \since 12.0.0
|
599 |
+
/// \note API not yet finalized
|
600 |
+
ARROW_EXPORT
|
601 |
+
Result<Datum> RunEndEncode(
|
602 |
+
const Datum& value,
|
603 |
+
const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(),
|
604 |
+
ExecContext* ctx = NULLPTR);
|
605 |
+
|
606 |
+
/// \brief Decode a Run-End Encoded array to a plain array
|
607 |
+
///
|
608 |
+
/// The output data type is the same as the values array type of run-end encoded
|
609 |
+
/// input.
|
610 |
+
///
|
611 |
+
/// \param[in] value run-end-encoded input
|
612 |
+
/// \param[in] ctx the function execution context, optional
|
613 |
+
/// \return plain array resulting from decoding the run-end encoded input
|
614 |
+
///
|
615 |
+
/// \since 12.0.0
|
616 |
+
/// \note API not yet finalized
|
617 |
+
ARROW_EXPORT
|
618 |
+
Result<Datum> RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR);
|
619 |
+
|
620 |
+
/// \brief Compute the cumulative sum of an array-like object
|
621 |
+
///
|
622 |
+
/// \param[in] values array-like input
|
623 |
+
/// \param[in] options configures cumulative sum behavior
|
624 |
+
/// \param[in] check_overflow whether to check for overflow, if true, return Invalid
|
625 |
+
/// status on overflow, otherwise wrap around on overflow
|
626 |
+
/// \param[in] ctx the function execution context, optional
|
627 |
+
ARROW_EXPORT
|
628 |
+
Result<Datum> CumulativeSum(
|
629 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
630 |
+
bool check_overflow = false, ExecContext* ctx = NULLPTR);
|
631 |
+
|
632 |
+
/// \brief Compute the cumulative product of an array-like object
|
633 |
+
///
|
634 |
+
/// \param[in] values array-like input
|
635 |
+
/// \param[in] options configures cumulative prod behavior
|
636 |
+
/// \param[in] check_overflow whether to check for overflow, if true, return Invalid
|
637 |
+
/// status on overflow, otherwise wrap around on overflow
|
638 |
+
/// \param[in] ctx the function execution context, optional
|
639 |
+
ARROW_EXPORT
|
640 |
+
Result<Datum> CumulativeProd(
|
641 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
642 |
+
bool check_overflow = false, ExecContext* ctx = NULLPTR);
|
643 |
+
|
644 |
+
/// \brief Compute the cumulative max of an array-like object
|
645 |
+
///
|
646 |
+
/// \param[in] values array-like input
|
647 |
+
/// \param[in] options configures cumulative max behavior
|
648 |
+
/// \param[in] ctx the function execution context, optional
|
649 |
+
ARROW_EXPORT
|
650 |
+
Result<Datum> CumulativeMax(
|
651 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
652 |
+
ExecContext* ctx = NULLPTR);
|
653 |
+
|
654 |
+
/// \brief Compute the cumulative min of an array-like object
|
655 |
+
///
|
656 |
+
/// \param[in] values array-like input
|
657 |
+
/// \param[in] options configures cumulative min behavior
|
658 |
+
/// \param[in] ctx the function execution context, optional
|
659 |
+
ARROW_EXPORT
|
660 |
+
Result<Datum> CumulativeMin(
|
661 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
662 |
+
ExecContext* ctx = NULLPTR);
|
663 |
+
|
664 |
+
/// \brief Compute the cumulative mean of an array-like object
|
665 |
+
///
|
666 |
+
/// \param[in] values array-like input
|
667 |
+
/// \param[in] options configures cumulative mean behavior, `start` is ignored
|
668 |
+
/// \param[in] ctx the function execution context, optional
|
669 |
+
ARROW_EXPORT
|
670 |
+
Result<Datum> CumulativeMean(
|
671 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
672 |
+
ExecContext* ctx = NULLPTR);
|
673 |
+
|
674 |
+
/// \brief Return the first order difference of an array.
|
675 |
+
///
|
676 |
+
/// Computes the first order difference of an array, i.e.
|
677 |
+
/// output[i] = input[i] - input[i - p] if i >= p
|
678 |
+
/// output[i] = null otherwise
|
679 |
+
/// where p is the period. For example, with p = 1,
|
680 |
+
/// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5].
|
681 |
+
/// With p = 2,
|
682 |
+
/// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6]
|
683 |
+
/// p can also be negative, in which case the diff is computed in
|
684 |
+
/// the opposite direction.
|
685 |
+
/// \param[in] array array input
|
686 |
+
/// \param[in] options options, specifying overflow behavior and period
|
687 |
+
/// \param[in] check_overflow whether to return error on overflow
|
688 |
+
/// \param[in] ctx the function execution context, optional
|
689 |
+
/// \return result as array
|
690 |
+
ARROW_EXPORT
|
691 |
+
Result<std::shared_ptr<Array>> PairwiseDiff(const Array& array,
|
692 |
+
const PairwiseOptions& options,
|
693 |
+
bool check_overflow = false,
|
694 |
+
ExecContext* ctx = NULLPTR);
|
695 |
+
|
696 |
+
} // namespace compute
|
697 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/compute/function.h"
|
25 |
+
#include "arrow/compute/function_options.h"
|
26 |
+
#include "arrow/compute/type_fwd.h"
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/type.h"
|
30 |
+
#include "arrow/util/macros.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
class Array;
|
36 |
+
|
37 |
+
namespace compute {
|
38 |
+
|
39 |
+
class ExecContext;
|
40 |
+
|
41 |
+
/// \addtogroup compute-concrete-options
|
42 |
+
/// @{
|
43 |
+
|
44 |
+
class ARROW_EXPORT CastOptions : public FunctionOptions {
|
45 |
+
public:
|
46 |
+
explicit CastOptions(bool safe = true);
|
47 |
+
|
48 |
+
static constexpr char const kTypeName[] = "CastOptions";
|
49 |
+
static CastOptions Safe(TypeHolder to_type = {}) {
|
50 |
+
CastOptions safe(true);
|
51 |
+
safe.to_type = std::move(to_type);
|
52 |
+
return safe;
|
53 |
+
}
|
54 |
+
|
55 |
+
static CastOptions Unsafe(TypeHolder to_type = {}) {
|
56 |
+
CastOptions unsafe(false);
|
57 |
+
unsafe.to_type = std::move(to_type);
|
58 |
+
return unsafe;
|
59 |
+
}
|
60 |
+
|
61 |
+
// Type being casted to. May be passed separate to eager function
|
62 |
+
// compute::Cast
|
63 |
+
TypeHolder to_type;
|
64 |
+
|
65 |
+
bool allow_int_overflow;
|
66 |
+
bool allow_time_truncate;
|
67 |
+
bool allow_time_overflow;
|
68 |
+
bool allow_decimal_truncate;
|
69 |
+
bool allow_float_truncate;
|
70 |
+
// Indicate if conversions from Binary/FixedSizeBinary to string must
|
71 |
+
// validate the utf8 payload.
|
72 |
+
bool allow_invalid_utf8;
|
73 |
+
|
74 |
+
/// true if the safety options all match CastOptions::Safe
|
75 |
+
///
|
76 |
+
/// Note, if this returns false it does not mean is_unsafe will return true
|
77 |
+
bool is_safe() const;
|
78 |
+
/// true if the safety options all match CastOptions::Unsafe
|
79 |
+
///
|
80 |
+
/// Note, if this returns false it does not mean is_safe will return true
|
81 |
+
bool is_unsafe() const;
|
82 |
+
};
|
83 |
+
|
84 |
+
/// @}
|
85 |
+
|
86 |
+
/// \brief Return true if a cast function is defined
|
87 |
+
ARROW_EXPORT
|
88 |
+
bool CanCast(const DataType& from_type, const DataType& to_type);
|
89 |
+
|
90 |
+
// ----------------------------------------------------------------------
|
91 |
+
// Convenience invocation APIs for a number of kernels
|
92 |
+
|
93 |
+
/// \brief Cast from one array type to another
|
94 |
+
/// \param[in] value array to cast
|
95 |
+
/// \param[in] to_type type to cast to
|
96 |
+
/// \param[in] options casting options
|
97 |
+
/// \param[in] ctx the function execution context, optional
|
98 |
+
/// \return the resulting array
|
99 |
+
///
|
100 |
+
/// \since 1.0.0
|
101 |
+
/// \note API not yet finalized
|
102 |
+
ARROW_EXPORT
|
103 |
+
Result<std::shared_ptr<Array>> Cast(const Array& value, const TypeHolder& to_type,
|
104 |
+
const CastOptions& options = CastOptions::Safe(),
|
105 |
+
ExecContext* ctx = NULLPTR);
|
106 |
+
|
107 |
+
/// \brief Cast from one array type to another
|
108 |
+
/// \param[in] value array to cast
|
109 |
+
/// \param[in] options casting options. The "to_type" field must be populated
|
110 |
+
/// \param[in] ctx the function execution context, optional
|
111 |
+
/// \return the resulting array
|
112 |
+
///
|
113 |
+
/// \since 1.0.0
|
114 |
+
/// \note API not yet finalized
|
115 |
+
ARROW_EXPORT
|
116 |
+
Result<Datum> Cast(const Datum& value, const CastOptions& options,
|
117 |
+
ExecContext* ctx = NULLPTR);
|
118 |
+
|
119 |
+
/// \brief Cast from one value to another
|
120 |
+
/// \param[in] value datum to cast
|
121 |
+
/// \param[in] to_type type to cast to
|
122 |
+
/// \param[in] options casting options
|
123 |
+
/// \param[in] ctx the function execution context, optional
|
124 |
+
/// \return the resulting datum
|
125 |
+
///
|
126 |
+
/// \since 1.0.0
|
127 |
+
/// \note API not yet finalized
|
128 |
+
ARROW_EXPORT
|
129 |
+
Result<Datum> Cast(const Datum& value, const TypeHolder& to_type,
|
130 |
+
const CastOptions& options = CastOptions::Safe(),
|
131 |
+
ExecContext* ctx = NULLPTR);
|
132 |
+
|
133 |
+
} // namespace compute
|
134 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h
ADDED
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <atomic>
|
24 |
+
#include <cstdint>
|
25 |
+
#include <limits>
|
26 |
+
#include <memory>
|
27 |
+
#include <optional>
|
28 |
+
#include <string>
|
29 |
+
#include <utility>
|
30 |
+
#include <vector>
|
31 |
+
|
32 |
+
#include "arrow/array/data.h"
|
33 |
+
#include "arrow/compute/expression.h"
|
34 |
+
#include "arrow/compute/type_fwd.h"
|
35 |
+
#include "arrow/datum.h"
|
36 |
+
#include "arrow/result.h"
|
37 |
+
#include "arrow/type_fwd.h"
|
38 |
+
#include "arrow/util/macros.h"
|
39 |
+
#include "arrow/util/type_fwd.h"
|
40 |
+
#include "arrow/util/visibility.h"
|
41 |
+
|
42 |
+
namespace arrow {
|
43 |
+
namespace compute {
|
44 |
+
|
45 |
+
// It seems like 64K might be a good default chunksize to use for execution
|
46 |
+
// based on the experience of other query processing systems. The current
|
47 |
+
// default is not to chunk contiguous arrays, though, but this may change in
|
48 |
+
// the future once parallel execution is implemented
|
49 |
+
static constexpr int64_t kDefaultExecChunksize = UINT16_MAX;
|
50 |
+
|
51 |
+
/// \brief Context for expression-global variables and options used by
|
52 |
+
/// function evaluation
|
53 |
+
class ARROW_EXPORT ExecContext {
|
54 |
+
public:
|
55 |
+
// If no function registry passed, the default is used.
|
56 |
+
explicit ExecContext(MemoryPool* pool = default_memory_pool(),
|
57 |
+
::arrow::internal::Executor* executor = NULLPTR,
|
58 |
+
FunctionRegistry* func_registry = NULLPTR);
|
59 |
+
|
60 |
+
/// \brief The MemoryPool used for allocations, default is
|
61 |
+
/// default_memory_pool().
|
62 |
+
MemoryPool* memory_pool() const { return pool_; }
|
63 |
+
|
64 |
+
const ::arrow::internal::CpuInfo* cpu_info() const;
|
65 |
+
|
66 |
+
/// \brief An Executor which may be used to parallelize execution.
|
67 |
+
::arrow::internal::Executor* executor() const { return executor_; }
|
68 |
+
|
69 |
+
/// \brief The FunctionRegistry for looking up functions by name and
|
70 |
+
/// selecting kernels for execution. Defaults to the library-global function
|
71 |
+
/// registry provided by GetFunctionRegistry.
|
72 |
+
FunctionRegistry* func_registry() const { return func_registry_; }
|
73 |
+
|
74 |
+
// \brief Set maximum length unit of work for kernel execution. Larger
|
75 |
+
// contiguous array inputs will be split into smaller chunks, and, if
|
76 |
+
// possible and enabled, processed in parallel. The default chunksize is
|
77 |
+
// INT64_MAX, so contiguous arrays are not split.
|
78 |
+
void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; }
|
79 |
+
|
80 |
+
// \brief Maximum length for ExecBatch data chunks processed by
|
81 |
+
// kernels. Contiguous array inputs with longer length will be split into
|
82 |
+
// smaller chunks.
|
83 |
+
int64_t exec_chunksize() const { return exec_chunksize_; }
|
84 |
+
|
85 |
+
/// \brief Set whether to use multiple threads for function execution. This
|
86 |
+
/// is not yet used.
|
87 |
+
void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; }
|
88 |
+
|
89 |
+
/// \brief If true, then utilize multiple threads where relevant for function
|
90 |
+
/// execution. This is not yet used.
|
91 |
+
bool use_threads() const { return use_threads_; }
|
92 |
+
|
93 |
+
// Set the preallocation strategy for kernel execution as it relates to
|
94 |
+
// chunked execution. For chunked execution, whether via ChunkedArray inputs
|
95 |
+
// or splitting larger Array arguments into smaller pieces, contiguous
|
96 |
+
// allocation (if permitted by the kernel) will allocate one large array to
|
97 |
+
// write output into yielding it to the caller at the end. If this option is
|
98 |
+
// set to off, then preallocations will be performed independently for each
|
99 |
+
// chunk of execution
|
100 |
+
//
|
101 |
+
// TODO: At some point we might want the limit the size of contiguous
|
102 |
+
// preallocations. For example, even if the exec_chunksize is 64K or less, we
|
103 |
+
// might limit contiguous allocations to 1M records, say.
|
104 |
+
void set_preallocate_contiguous(bool preallocate) {
|
105 |
+
preallocate_contiguous_ = preallocate;
|
106 |
+
}
|
107 |
+
|
108 |
+
/// \brief If contiguous preallocations should be used when doing chunked
|
109 |
+
/// execution as specified by exec_chunksize(). See
|
110 |
+
/// set_preallocate_contiguous() for more information.
|
111 |
+
bool preallocate_contiguous() const { return preallocate_contiguous_; }
|
112 |
+
|
113 |
+
private:
|
114 |
+
MemoryPool* pool_;
|
115 |
+
::arrow::internal::Executor* executor_;
|
116 |
+
FunctionRegistry* func_registry_;
|
117 |
+
int64_t exec_chunksize_ = std::numeric_limits<int64_t>::max();
|
118 |
+
bool preallocate_contiguous_ = true;
|
119 |
+
bool use_threads_ = true;
|
120 |
+
};
|
121 |
+
|
122 |
+
// TODO: Consider standardizing on uint16 selection vectors and only use them
|
123 |
+
// when we can ensure that each value is 64K length or smaller
|
124 |
+
|
125 |
+
/// \brief Container for an array of value selection indices that were
|
126 |
+
/// materialized from a filter.
|
127 |
+
///
|
128 |
+
/// Columnar query engines (see e.g. [1]) have found that rather than
|
129 |
+
/// materializing filtered data, the filter can instead be converted to an
|
130 |
+
/// array of the "on" indices and then "fusing" these indices in operator
|
131 |
+
/// implementations. This is especially relevant for aggregations but also
|
132 |
+
/// applies to scalar operations.
|
133 |
+
///
|
134 |
+
/// We are not yet using this so this is mostly a placeholder for now.
|
135 |
+
///
|
136 |
+
/// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf
|
137 |
+
class ARROW_EXPORT SelectionVector {
|
138 |
+
public:
|
139 |
+
explicit SelectionVector(std::shared_ptr<ArrayData> data);
|
140 |
+
|
141 |
+
explicit SelectionVector(const Array& arr);
|
142 |
+
|
143 |
+
/// \brief Create SelectionVector from boolean mask
|
144 |
+
static Result<std::shared_ptr<SelectionVector>> FromMask(const BooleanArray& arr);
|
145 |
+
|
146 |
+
const int32_t* indices() const { return indices_; }
|
147 |
+
int32_t length() const;
|
148 |
+
|
149 |
+
private:
|
150 |
+
std::shared_ptr<ArrayData> data_;
|
151 |
+
const int32_t* indices_;
|
152 |
+
};
|
153 |
+
|
154 |
+
/// An index to represent that a batch does not belong to an ordered stream
|
155 |
+
constexpr int64_t kUnsequencedIndex = -1;
|
156 |
+
|
157 |
+
/// \brief A unit of work for kernel execution. It contains a collection of
|
158 |
+
/// Array and Scalar values and an optional SelectionVector indicating that
|
159 |
+
/// there is an unmaterialized filter that either must be materialized, or (if
|
160 |
+
/// the kernel supports it) pushed down into the kernel implementation.
|
161 |
+
///
|
162 |
+
/// ExecBatch is semantically similar to RecordBatch in that in a SQL context
|
163 |
+
/// it represents a collection of records, but constant "columns" are
|
164 |
+
/// represented by Scalar values rather than having to be converted into arrays
|
165 |
+
/// with repeated values.
|
166 |
+
///
|
167 |
+
/// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight
|
168 |
+
/// than is desirable for this class. Microbenchmarks would help determine for
|
169 |
+
/// sure. See ARROW-8928.
|
170 |
+
|
171 |
+
/// \addtogroup acero-internals
|
172 |
+
/// @{
|
173 |
+
|
174 |
+
struct ARROW_EXPORT ExecBatch {
|
175 |
+
ExecBatch() = default;
|
176 |
+
ExecBatch(std::vector<Datum> values, int64_t length)
|
177 |
+
: values(std::move(values)), length(length) {}
|
178 |
+
|
179 |
+
explicit ExecBatch(const RecordBatch& batch);
|
180 |
+
|
181 |
+
/// \brief Infer the ExecBatch length from values.
|
182 |
+
static Result<int64_t> InferLength(const std::vector<Datum>& values);
|
183 |
+
|
184 |
+
/// Creates an ExecBatch with length-validation.
|
185 |
+
///
|
186 |
+
/// If any value is given, then all values must have a common length. If the given
|
187 |
+
/// length is negative, then the length of the ExecBatch is set to this common length,
|
188 |
+
/// or to 1 if no values are given. Otherwise, the given length must equal the common
|
189 |
+
/// length, if any value is given.
|
190 |
+
static Result<ExecBatch> Make(std::vector<Datum> values, int64_t length = -1);
|
191 |
+
|
192 |
+
Result<std::shared_ptr<RecordBatch>> ToRecordBatch(
|
193 |
+
std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool()) const;
|
194 |
+
|
195 |
+
/// The values representing positional arguments to be passed to a kernel's
|
196 |
+
/// exec function for processing.
|
197 |
+
std::vector<Datum> values;
|
198 |
+
|
199 |
+
/// A deferred filter represented as an array of indices into the values.
|
200 |
+
///
|
201 |
+
/// For example, the filter [true, true, false, true] would be represented as
|
202 |
+
/// the selection vector [0, 1, 3]. When the selection vector is set,
|
203 |
+
/// ExecBatch::length is equal to the length of this array.
|
204 |
+
std::shared_ptr<SelectionVector> selection_vector;
|
205 |
+
|
206 |
+
/// A predicate Expression guaranteed to evaluate to true for all rows in this batch.
|
207 |
+
Expression guarantee = literal(true);
|
208 |
+
|
209 |
+
/// The semantic length of the ExecBatch. When the values are all scalars,
|
210 |
+
/// the length should be set to 1 for non-aggregate kernels, otherwise the
|
211 |
+
/// length is taken from the array values, except when there is a selection
|
212 |
+
/// vector. When there is a selection vector set, the length of the batch is
|
213 |
+
/// the length of the selection. Aggregate kernels can have an ExecBatch
|
214 |
+
/// formed by projecting just the partition columns from a batch in which
|
215 |
+
/// case, it would have scalar rows with length greater than 1.
|
216 |
+
///
|
217 |
+
/// If the array values are of length 0 then the length is 0 regardless of
|
218 |
+
/// whether any values are Scalar.
|
219 |
+
int64_t length = 0;
|
220 |
+
|
221 |
+
/// \brief index of this batch in a sorted stream of batches
|
222 |
+
///
|
223 |
+
/// This index must be strictly monotonic starting at 0 without gaps or
|
224 |
+
/// it can be set to kUnsequencedIndex if there is no meaningful order
|
225 |
+
int64_t index = kUnsequencedIndex;
|
226 |
+
|
227 |
+
/// \brief The sum of bytes in each buffer referenced by the batch
|
228 |
+
///
|
229 |
+
/// Note: Scalars are not counted
|
230 |
+
/// Note: Some values may referenced only part of a buffer, for
|
231 |
+
/// example, an array with an offset. The actual data
|
232 |
+
/// visible to this batch will be smaller than the total
|
233 |
+
/// buffer size in this case.
|
234 |
+
int64_t TotalBufferSize() const;
|
235 |
+
|
236 |
+
/// \brief Return the value at the i-th index
|
237 |
+
template <typename index_type>
|
238 |
+
inline const Datum& operator[](index_type i) const {
|
239 |
+
return values[i];
|
240 |
+
}
|
241 |
+
|
242 |
+
bool Equals(const ExecBatch& other) const;
|
243 |
+
|
244 |
+
/// \brief A convenience for the number of values / arguments.
|
245 |
+
int num_values() const { return static_cast<int>(values.size()); }
|
246 |
+
|
247 |
+
ExecBatch Slice(int64_t offset, int64_t length) const;
|
248 |
+
|
249 |
+
Result<ExecBatch> SelectValues(const std::vector<int>& ids) const;
|
250 |
+
|
251 |
+
/// \brief A convenience for returning the types from the batch.
|
252 |
+
std::vector<TypeHolder> GetTypes() const {
|
253 |
+
std::vector<TypeHolder> result;
|
254 |
+
for (const auto& value : this->values) {
|
255 |
+
result.emplace_back(value.type());
|
256 |
+
}
|
257 |
+
return result;
|
258 |
+
}
|
259 |
+
|
260 |
+
std::string ToString() const;
|
261 |
+
};
|
262 |
+
|
263 |
+
inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); }
|
264 |
+
inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); }
|
265 |
+
|
266 |
+
ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*);
|
267 |
+
|
268 |
+
/// @}
|
269 |
+
|
270 |
+
/// \defgroup compute-internals Utilities for calling functions, useful for those
|
271 |
+
/// extending the function registry
|
272 |
+
///
|
273 |
+
/// @{
|
274 |
+
|
275 |
+
struct ExecValue {
|
276 |
+
ArraySpan array = {};
|
277 |
+
const Scalar* scalar = NULLPTR;
|
278 |
+
|
279 |
+
ExecValue(Scalar* scalar) // NOLINT implicit conversion
|
280 |
+
: scalar(scalar) {}
|
281 |
+
|
282 |
+
ExecValue(ArraySpan array) // NOLINT implicit conversion
|
283 |
+
: array(std::move(array)) {}
|
284 |
+
|
285 |
+
ExecValue(const ArrayData& array) { // NOLINT implicit conversion
|
286 |
+
this->array.SetMembers(array);
|
287 |
+
}
|
288 |
+
|
289 |
+
ExecValue() = default;
|
290 |
+
ExecValue(const ExecValue& other) = default;
|
291 |
+
ExecValue& operator=(const ExecValue& other) = default;
|
292 |
+
ExecValue(ExecValue&& other) = default;
|
293 |
+
ExecValue& operator=(ExecValue&& other) = default;
|
294 |
+
|
295 |
+
int64_t length() const { return this->is_array() ? this->array.length : 1; }
|
296 |
+
|
297 |
+
bool is_array() const { return this->scalar == NULLPTR; }
|
298 |
+
bool is_scalar() const { return !this->is_array(); }
|
299 |
+
|
300 |
+
void SetArray(const ArrayData& array) {
|
301 |
+
this->array.SetMembers(array);
|
302 |
+
this->scalar = NULLPTR;
|
303 |
+
}
|
304 |
+
|
305 |
+
void SetScalar(const Scalar* scalar) { this->scalar = scalar; }
|
306 |
+
|
307 |
+
template <typename ExactType>
|
308 |
+
const ExactType& scalar_as() const {
|
309 |
+
return ::arrow::internal::checked_cast<const ExactType&>(*this->scalar);
|
310 |
+
}
|
311 |
+
|
312 |
+
/// XXX: here temporarily for compatibility with datum, see
|
313 |
+
/// e.g. MakeStructExec in scalar_nested.cc
|
314 |
+
int64_t null_count() const {
|
315 |
+
if (this->is_array()) {
|
316 |
+
return this->array.GetNullCount();
|
317 |
+
} else {
|
318 |
+
return this->scalar->is_valid ? 0 : 1;
|
319 |
+
}
|
320 |
+
}
|
321 |
+
|
322 |
+
const DataType* type() const {
|
323 |
+
if (this->is_array()) {
|
324 |
+
return array.type;
|
325 |
+
} else {
|
326 |
+
return scalar->type.get();
|
327 |
+
}
|
328 |
+
}
|
329 |
+
};
|
330 |
+
|
331 |
+
struct ARROW_EXPORT ExecResult {
|
332 |
+
// The default value of the variant is ArraySpan
|
333 |
+
std::variant<ArraySpan, std::shared_ptr<ArrayData>> value;
|
334 |
+
|
335 |
+
int64_t length() const {
|
336 |
+
if (this->is_array_span()) {
|
337 |
+
return this->array_span()->length;
|
338 |
+
} else {
|
339 |
+
return this->array_data()->length;
|
340 |
+
}
|
341 |
+
}
|
342 |
+
|
343 |
+
const DataType* type() const {
|
344 |
+
if (this->is_array_span()) {
|
345 |
+
return this->array_span()->type;
|
346 |
+
} else {
|
347 |
+
return this->array_data()->type.get();
|
348 |
+
}
|
349 |
+
}
|
350 |
+
|
351 |
+
const ArraySpan* array_span() const { return &std::get<ArraySpan>(this->value); }
|
352 |
+
ArraySpan* array_span_mutable() { return &std::get<ArraySpan>(this->value); }
|
353 |
+
|
354 |
+
bool is_array_span() const { return this->value.index() == 0; }
|
355 |
+
|
356 |
+
const std::shared_ptr<ArrayData>& array_data() const {
|
357 |
+
return std::get<std::shared_ptr<ArrayData>>(this->value);
|
358 |
+
}
|
359 |
+
ArrayData* array_data_mutable() {
|
360 |
+
return std::get<std::shared_ptr<ArrayData>>(this->value).get();
|
361 |
+
}
|
362 |
+
|
363 |
+
bool is_array_data() const { return this->value.index() == 1; }
|
364 |
+
};
|
365 |
+
|
366 |
+
/// \brief A "lightweight" column batch object which contains no
|
367 |
+
/// std::shared_ptr objects and does not have any memory ownership
|
368 |
+
/// semantics. Can represent a view onto an "owning" ExecBatch.
|
369 |
+
struct ARROW_EXPORT ExecSpan {
|
370 |
+
ExecSpan() = default;
|
371 |
+
ExecSpan(const ExecSpan& other) = default;
|
372 |
+
ExecSpan& operator=(const ExecSpan& other) = default;
|
373 |
+
ExecSpan(ExecSpan&& other) = default;
|
374 |
+
ExecSpan& operator=(ExecSpan&& other) = default;
|
375 |
+
|
376 |
+
explicit ExecSpan(std::vector<ExecValue> values, int64_t length)
|
377 |
+
: length(length), values(std::move(values)) {}
|
378 |
+
|
379 |
+
explicit ExecSpan(const ExecBatch& batch) {
|
380 |
+
this->length = batch.length;
|
381 |
+
this->values.resize(batch.values.size());
|
382 |
+
for (size_t i = 0; i < batch.values.size(); ++i) {
|
383 |
+
const Datum& in_value = batch[i];
|
384 |
+
ExecValue* out_value = &this->values[i];
|
385 |
+
if (in_value.is_array()) {
|
386 |
+
out_value->SetArray(*in_value.array());
|
387 |
+
} else {
|
388 |
+
out_value->SetScalar(in_value.scalar().get());
|
389 |
+
}
|
390 |
+
}
|
391 |
+
}
|
392 |
+
|
393 |
+
/// \brief Return the value at the i-th index
|
394 |
+
template <typename index_type>
|
395 |
+
inline const ExecValue& operator[](index_type i) const {
|
396 |
+
return values[i];
|
397 |
+
}
|
398 |
+
|
399 |
+
/// \brief A convenience for the number of values / arguments.
|
400 |
+
int num_values() const { return static_cast<int>(values.size()); }
|
401 |
+
|
402 |
+
std::vector<TypeHolder> GetTypes() const {
|
403 |
+
std::vector<TypeHolder> result;
|
404 |
+
for (const auto& value : this->values) {
|
405 |
+
result.emplace_back(value.type());
|
406 |
+
}
|
407 |
+
return result;
|
408 |
+
}
|
409 |
+
|
410 |
+
ExecBatch ToExecBatch() const {
|
411 |
+
ExecBatch result;
|
412 |
+
result.length = this->length;
|
413 |
+
for (const ExecValue& value : this->values) {
|
414 |
+
if (value.is_array()) {
|
415 |
+
result.values.push_back(value.array.ToArrayData());
|
416 |
+
} else {
|
417 |
+
result.values.push_back(value.scalar->GetSharedPtr());
|
418 |
+
}
|
419 |
+
}
|
420 |
+
return result;
|
421 |
+
}
|
422 |
+
|
423 |
+
int64_t length = 0;
|
424 |
+
std::vector<ExecValue> values;
|
425 |
+
};
|
426 |
+
|
427 |
+
/// \defgroup compute-call-function One-shot calls to compute functions
|
428 |
+
///
|
429 |
+
/// @{
|
430 |
+
|
431 |
+
/// \brief One-shot invoker for all types of functions.
|
432 |
+
///
|
433 |
+
/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
|
434 |
+
/// and wrapping of outputs.
|
435 |
+
ARROW_EXPORT
|
436 |
+
Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
|
437 |
+
const FunctionOptions* options, ExecContext* ctx = NULLPTR);
|
438 |
+
|
439 |
+
/// \brief Variant of CallFunction which uses a function's default options.
|
440 |
+
///
|
441 |
+
/// NB: Some functions require FunctionOptions be provided.
|
442 |
+
ARROW_EXPORT
|
443 |
+
Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
|
444 |
+
ExecContext* ctx = NULLPTR);
|
445 |
+
|
446 |
+
/// \brief One-shot invoker for all types of functions.
|
447 |
+
///
|
448 |
+
/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
|
449 |
+
/// and wrapping of outputs.
|
450 |
+
ARROW_EXPORT
|
451 |
+
Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
|
452 |
+
const FunctionOptions* options, ExecContext* ctx = NULLPTR);
|
453 |
+
|
454 |
+
/// \brief Variant of CallFunction which uses a function's default options.
|
455 |
+
///
|
456 |
+
/// NB: Some functions require FunctionOptions be provided.
|
457 |
+
ARROW_EXPORT
|
458 |
+
Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
|
459 |
+
ExecContext* ctx = NULLPTR);
|
460 |
+
|
461 |
+
/// @}
|
462 |
+
|
463 |
+
/// \defgroup compute-function-executor One-shot calls to obtain function executors
|
464 |
+
///
|
465 |
+
/// @{
|
466 |
+
|
467 |
+
/// \brief One-shot executor provider for all types of functions.
|
468 |
+
///
|
469 |
+
/// This function creates and initializes a `FunctionExecutor` appropriate
|
470 |
+
/// for the given function name, input types and function options.
|
471 |
+
ARROW_EXPORT
|
472 |
+
Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
|
473 |
+
const std::string& func_name, std::vector<TypeHolder> in_types,
|
474 |
+
const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
|
475 |
+
|
476 |
+
/// \brief One-shot executor provider for all types of functions.
|
477 |
+
///
|
478 |
+
/// This function creates and initializes a `FunctionExecutor` appropriate
|
479 |
+
/// for the given function name, input types (taken from the Datum arguments)
|
480 |
+
/// and function options.
|
481 |
+
ARROW_EXPORT
|
482 |
+
Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
|
483 |
+
const std::string& func_name, const std::vector<Datum>& args,
|
484 |
+
const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
|
485 |
+
|
486 |
+
/// @}
|
487 |
+
|
488 |
+
} // namespace compute
|
489 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h
ADDED
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle.
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <string>
|
24 |
+
#include <utility>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/compute/kernel.h"
|
28 |
+
#include "arrow/compute/type_fwd.h"
|
29 |
+
#include "arrow/datum.h"
|
30 |
+
#include "arrow/result.h"
|
31 |
+
#include "arrow/status.h"
|
32 |
+
#include "arrow/util/compare.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
namespace compute {
|
38 |
+
|
39 |
+
/// \addtogroup compute-functions
|
40 |
+
/// @{
|
41 |
+
|
42 |
+
/// \brief Contains the number of required arguments for the function.
|
43 |
+
///
|
44 |
+
/// Naming conventions taken from https://en.wikipedia.org/wiki/Arity.
|
45 |
+
struct ARROW_EXPORT Arity {
|
46 |
+
/// \brief A function taking no arguments
|
47 |
+
static Arity Nullary() { return Arity(0, false); }
|
48 |
+
|
49 |
+
/// \brief A function taking 1 argument
|
50 |
+
static Arity Unary() { return Arity(1, false); }
|
51 |
+
|
52 |
+
/// \brief A function taking 2 arguments
|
53 |
+
static Arity Binary() { return Arity(2, false); }
|
54 |
+
|
55 |
+
/// \brief A function taking 3 arguments
|
56 |
+
static Arity Ternary() { return Arity(3, false); }
|
57 |
+
|
58 |
+
/// \brief A function taking a variable number of arguments
|
59 |
+
///
|
60 |
+
/// \param[in] min_args the minimum number of arguments required when
|
61 |
+
/// invoking the function
|
62 |
+
static Arity VarArgs(int min_args = 0) { return Arity(min_args, true); }
|
63 |
+
|
64 |
+
// NOTE: the 0-argument form (default constructor) is required for Cython
|
65 |
+
explicit Arity(int num_args = 0, bool is_varargs = false)
|
66 |
+
: num_args(num_args), is_varargs(is_varargs) {}
|
67 |
+
|
68 |
+
/// The number of required arguments (or the minimum number for varargs
|
69 |
+
/// functions).
|
70 |
+
int num_args;
|
71 |
+
|
72 |
+
/// If true, then the num_args is the minimum number of required arguments.
|
73 |
+
bool is_varargs = false;
|
74 |
+
};
|
75 |
+
|
76 |
+
struct ARROW_EXPORT FunctionDoc {
|
77 |
+
/// \brief A one-line summary of the function, using a verb.
|
78 |
+
///
|
79 |
+
/// For example, "Add two numeric arrays or scalars".
|
80 |
+
std::string summary;
|
81 |
+
|
82 |
+
/// \brief A detailed description of the function, meant to follow the summary.
|
83 |
+
std::string description;
|
84 |
+
|
85 |
+
/// \brief Symbolic names (identifiers) for the function arguments.
|
86 |
+
///
|
87 |
+
/// Some bindings may use this to generate nicer function signatures.
|
88 |
+
std::vector<std::string> arg_names;
|
89 |
+
|
90 |
+
// TODO add argument descriptions?
|
91 |
+
|
92 |
+
/// \brief Name of the options class, if any.
|
93 |
+
std::string options_class;
|
94 |
+
|
95 |
+
/// \brief Whether options are required for function execution
|
96 |
+
///
|
97 |
+
/// If false, then either the function does not have an options class
|
98 |
+
/// or there is a usable default options value.
|
99 |
+
bool options_required;
|
100 |
+
|
101 |
+
FunctionDoc() = default;
|
102 |
+
|
103 |
+
FunctionDoc(std::string summary, std::string description,
|
104 |
+
std::vector<std::string> arg_names, std::string options_class = "",
|
105 |
+
bool options_required = false)
|
106 |
+
: summary(std::move(summary)),
|
107 |
+
description(std::move(description)),
|
108 |
+
arg_names(std::move(arg_names)),
|
109 |
+
options_class(std::move(options_class)),
|
110 |
+
options_required(options_required) {}
|
111 |
+
|
112 |
+
static const FunctionDoc& Empty();
|
113 |
+
};
|
114 |
+
|
115 |
+
/// \brief An executor of a function with a preconfigured kernel
|
116 |
+
class ARROW_EXPORT FunctionExecutor {
|
117 |
+
public:
|
118 |
+
virtual ~FunctionExecutor() = default;
|
119 |
+
/// \brief Initialize or re-initialize the preconfigured kernel
|
120 |
+
///
|
121 |
+
/// This method may be called zero or more times. Depending on how
|
122 |
+
/// the FunctionExecutor was obtained, it may already have been initialized.
|
123 |
+
virtual Status Init(const FunctionOptions* options = NULLPTR,
|
124 |
+
ExecContext* exec_ctx = NULLPTR) = 0;
|
125 |
+
/// \brief Execute the preconfigured kernel with arguments that must fit it
|
126 |
+
///
|
127 |
+
/// The method requires the arguments be castable to the preconfigured types.
|
128 |
+
///
|
129 |
+
/// \param[in] args Arguments to execute the function on
|
130 |
+
/// \param[in] length Length of arguments batch or -1 to default it. If the
|
131 |
+
/// function has no parameters, this determines the batch length, defaulting
|
132 |
+
/// to 0. Otherwise, if the function is scalar, this must equal the argument
|
133 |
+
/// batch's inferred length or be -1 to default to it. This is ignored for
|
134 |
+
/// vector functions.
|
135 |
+
virtual Result<Datum> Execute(const std::vector<Datum>& args, int64_t length = -1) = 0;
|
136 |
+
};
|
137 |
+
|
138 |
+
/// \brief Base class for compute functions. Function implementations contain a
|
139 |
+
/// collection of "kernels" which are implementations of the function for
|
140 |
+
/// specific argument types. Selecting a viable kernel for executing a function
|
141 |
+
/// is referred to as "dispatching".
|
142 |
+
class ARROW_EXPORT Function {
|
143 |
+
public:
|
144 |
+
/// \brief The kind of function, which indicates in what contexts it is
|
145 |
+
/// valid for use.
|
146 |
+
enum Kind {
|
147 |
+
/// A function that performs scalar data operations on whole arrays of
|
148 |
+
/// data. Can generally process Array or Scalar values. The size of the
|
149 |
+
/// output will be the same as the size (or broadcasted size, in the case
|
150 |
+
/// of mixing Array and Scalar inputs) of the input.
|
151 |
+
SCALAR,
|
152 |
+
|
153 |
+
/// A function with array input and output whose behavior depends on the
|
154 |
+
/// values of the entire arrays passed, rather than the value of each scalar
|
155 |
+
/// value.
|
156 |
+
VECTOR,
|
157 |
+
|
158 |
+
/// A function that computes scalar summary statistics from array input.
|
159 |
+
SCALAR_AGGREGATE,
|
160 |
+
|
161 |
+
/// A function that computes grouped summary statistics from array input
|
162 |
+
/// and an array of group identifiers.
|
163 |
+
HASH_AGGREGATE,
|
164 |
+
|
165 |
+
/// A function that dispatches to other functions and does not contain its
|
166 |
+
/// own kernels.
|
167 |
+
META
|
168 |
+
};
|
169 |
+
|
170 |
+
virtual ~Function() = default;
|
171 |
+
|
172 |
+
/// \brief The name of the kernel. The registry enforces uniqueness of names.
|
173 |
+
const std::string& name() const { return name_; }
|
174 |
+
|
175 |
+
/// \brief The kind of kernel, which indicates in what contexts it is valid
|
176 |
+
/// for use.
|
177 |
+
Function::Kind kind() const { return kind_; }
|
178 |
+
|
179 |
+
/// \brief Contains the number of arguments the function requires, or if the
|
180 |
+
/// function accepts variable numbers of arguments.
|
181 |
+
const Arity& arity() const { return arity_; }
|
182 |
+
|
183 |
+
/// \brief Return the function documentation
|
184 |
+
const FunctionDoc& doc() const { return doc_; }
|
185 |
+
|
186 |
+
/// \brief Returns the number of registered kernels for this function.
|
187 |
+
virtual int num_kernels() const = 0;
|
188 |
+
|
189 |
+
/// \brief Return a kernel that can execute the function given the exact
|
190 |
+
/// argument types (without implicit type casts).
|
191 |
+
///
|
192 |
+
/// NB: This function is overridden in CastFunction.
|
193 |
+
virtual Result<const Kernel*> DispatchExact(const std::vector<TypeHolder>& types) const;
|
194 |
+
|
195 |
+
/// \brief Return a best-match kernel that can execute the function given the argument
|
196 |
+
/// types, after implicit casts are applied.
|
197 |
+
///
|
198 |
+
/// \param[in,out] values Argument types. An element may be modified to
|
199 |
+
/// indicate that the returned kernel only approximately matches the input
|
200 |
+
/// value descriptors; callers are responsible for casting inputs to the type
|
201 |
+
/// required by the kernel.
|
202 |
+
virtual Result<const Kernel*> DispatchBest(std::vector<TypeHolder>* values) const;
|
203 |
+
|
204 |
+
/// \brief Get a function executor with a best-matching kernel
|
205 |
+
///
|
206 |
+
/// The returned executor will by default work with the default FunctionOptions
|
207 |
+
/// and KernelContext. If you want to change that, call `FunctionExecutor::Init`.
|
208 |
+
virtual Result<std::shared_ptr<FunctionExecutor>> GetBestExecutor(
|
209 |
+
std::vector<TypeHolder> inputs) const;
|
210 |
+
|
211 |
+
/// \brief Execute the function eagerly with the passed input arguments with
|
212 |
+
/// kernel dispatch, batch iteration, and memory allocation details taken
|
213 |
+
/// care of.
|
214 |
+
///
|
215 |
+
/// If the `options` pointer is null, then `default_options()` will be used.
|
216 |
+
///
|
217 |
+
/// This function can be overridden in subclasses.
|
218 |
+
virtual Result<Datum> Execute(const std::vector<Datum>& args,
|
219 |
+
const FunctionOptions* options, ExecContext* ctx) const;
|
220 |
+
|
221 |
+
virtual Result<Datum> Execute(const ExecBatch& batch, const FunctionOptions* options,
|
222 |
+
ExecContext* ctx) const;
|
223 |
+
|
224 |
+
/// \brief Returns the default options for this function.
|
225 |
+
///
|
226 |
+
/// Whatever option semantics a Function has, implementations must guarantee
|
227 |
+
/// that default_options() is valid to pass to Execute as options.
|
228 |
+
const FunctionOptions* default_options() const { return default_options_; }
|
229 |
+
|
230 |
+
virtual Status Validate() const;
|
231 |
+
|
232 |
+
/// \brief Returns the pure property for this function.
|
233 |
+
///
|
234 |
+
/// Impure functions are those that may return different results for the same
|
235 |
+
/// input arguments. For example, a function that returns a random number is
|
236 |
+
/// not pure. An expression containing only pure functions can be simplified by
|
237 |
+
/// pre-evaluating any sub-expressions that have constant arguments.
|
238 |
+
virtual bool is_pure() const { return true; }
|
239 |
+
|
240 |
+
protected:
|
241 |
+
Function(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc,
|
242 |
+
const FunctionOptions* default_options)
|
243 |
+
: name_(std::move(name)),
|
244 |
+
kind_(kind),
|
245 |
+
arity_(arity),
|
246 |
+
doc_(std::move(doc)),
|
247 |
+
default_options_(default_options) {}
|
248 |
+
|
249 |
+
Status CheckArity(size_t num_args) const;
|
250 |
+
|
251 |
+
std::string name_;
|
252 |
+
Function::Kind kind_;
|
253 |
+
Arity arity_;
|
254 |
+
const FunctionDoc doc_;
|
255 |
+
const FunctionOptions* default_options_ = NULLPTR;
|
256 |
+
};
|
257 |
+
|
258 |
+
namespace detail {
|
259 |
+
|
260 |
+
template <typename KernelType>
|
261 |
+
class FunctionImpl : public Function {
|
262 |
+
public:
|
263 |
+
/// \brief Return pointers to current-available kernels for inspection
|
264 |
+
std::vector<const KernelType*> kernels() const {
|
265 |
+
std::vector<const KernelType*> result;
|
266 |
+
for (const auto& kernel : kernels_) {
|
267 |
+
result.push_back(&kernel);
|
268 |
+
}
|
269 |
+
return result;
|
270 |
+
}
|
271 |
+
|
272 |
+
int num_kernels() const override { return static_cast<int>(kernels_.size()); }
|
273 |
+
|
274 |
+
protected:
|
275 |
+
FunctionImpl(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc,
|
276 |
+
const FunctionOptions* default_options)
|
277 |
+
: Function(std::move(name), kind, arity, std::move(doc), default_options) {}
|
278 |
+
|
279 |
+
std::vector<KernelType> kernels_;
|
280 |
+
};
|
281 |
+
|
282 |
+
/// \brief Look up a kernel in a function. If no Kernel is found, nullptr is returned.
|
283 |
+
ARROW_EXPORT
|
284 |
+
const Kernel* DispatchExactImpl(const Function* func, const std::vector<TypeHolder>&);
|
285 |
+
|
286 |
+
/// \brief Return an error message if no Kernel is found.
|
287 |
+
ARROW_EXPORT
|
288 |
+
Status NoMatchingKernel(const Function* func, const std::vector<TypeHolder>&);
|
289 |
+
|
290 |
+
} // namespace detail
|
291 |
+
|
292 |
+
/// \brief A function that executes elementwise operations on arrays or
|
293 |
+
/// scalars, and therefore whose results generally do not depend on the order
|
294 |
+
/// of the values in the arguments. Accepts and returns arrays that are all of
|
295 |
+
/// the same size. These functions roughly correspond to the functions used in
|
296 |
+
/// SQL expressions.
|
297 |
+
class ARROW_EXPORT ScalarFunction : public detail::FunctionImpl<ScalarKernel> {
|
298 |
+
public:
|
299 |
+
using KernelType = ScalarKernel;
|
300 |
+
|
301 |
+
ScalarFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
302 |
+
const FunctionOptions* default_options = NULLPTR, bool is_pure = true)
|
303 |
+
: detail::FunctionImpl<ScalarKernel>(std::move(name), Function::SCALAR, arity,
|
304 |
+
std::move(doc), default_options),
|
305 |
+
is_pure_(is_pure) {}
|
306 |
+
|
307 |
+
/// \brief Add a kernel with given input/output types, no required state
|
308 |
+
/// initialization, preallocation for fixed-width types, and default null
|
309 |
+
/// handling (intersect validity bitmaps of inputs).
|
310 |
+
Status AddKernel(std::vector<InputType> in_types, OutputType out_type,
|
311 |
+
ArrayKernelExec exec, KernelInit init = NULLPTR);
|
312 |
+
|
313 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
314 |
+
/// kernel's signature does not match the function's arity.
|
315 |
+
Status AddKernel(ScalarKernel kernel);
|
316 |
+
|
317 |
+
/// \brief Returns the pure property for this function.
|
318 |
+
bool is_pure() const override { return is_pure_; }
|
319 |
+
|
320 |
+
private:
|
321 |
+
const bool is_pure_;
|
322 |
+
};
|
323 |
+
|
324 |
+
/// \brief A function that executes general array operations that may yield
|
325 |
+
/// outputs of different sizes or have results that depend on the whole array
|
326 |
+
/// contents. These functions roughly correspond to the functions found in
|
327 |
+
/// non-SQL array languages like APL and its derivatives.
|
328 |
+
class ARROW_EXPORT VectorFunction : public detail::FunctionImpl<VectorKernel> {
|
329 |
+
public:
|
330 |
+
using KernelType = VectorKernel;
|
331 |
+
|
332 |
+
VectorFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
333 |
+
const FunctionOptions* default_options = NULLPTR)
|
334 |
+
: detail::FunctionImpl<VectorKernel>(std::move(name), Function::VECTOR, arity,
|
335 |
+
std::move(doc), default_options) {}
|
336 |
+
|
337 |
+
/// \brief Add a simple kernel with given input/output types, no required
|
338 |
+
/// state initialization, no data preallocation, and no preallocation of the
|
339 |
+
/// validity bitmap.
|
340 |
+
Status AddKernel(std::vector<InputType> in_types, OutputType out_type,
|
341 |
+
ArrayKernelExec exec, KernelInit init = NULLPTR);
|
342 |
+
|
343 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
344 |
+
/// kernel's signature does not match the function's arity.
|
345 |
+
Status AddKernel(VectorKernel kernel);
|
346 |
+
};
|
347 |
+
|
348 |
+
class ARROW_EXPORT ScalarAggregateFunction
|
349 |
+
: public detail::FunctionImpl<ScalarAggregateKernel> {
|
350 |
+
public:
|
351 |
+
using KernelType = ScalarAggregateKernel;
|
352 |
+
|
353 |
+
ScalarAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
354 |
+
const FunctionOptions* default_options = NULLPTR)
|
355 |
+
: detail::FunctionImpl<ScalarAggregateKernel>(std::move(name),
|
356 |
+
Function::SCALAR_AGGREGATE, arity,
|
357 |
+
std::move(doc), default_options) {}
|
358 |
+
|
359 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
360 |
+
/// kernel's signature does not match the function's arity.
|
361 |
+
Status AddKernel(ScalarAggregateKernel kernel);
|
362 |
+
};
|
363 |
+
|
364 |
+
class ARROW_EXPORT HashAggregateFunction
|
365 |
+
: public detail::FunctionImpl<HashAggregateKernel> {
|
366 |
+
public:
|
367 |
+
using KernelType = HashAggregateKernel;
|
368 |
+
|
369 |
+
HashAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
370 |
+
const FunctionOptions* default_options = NULLPTR)
|
371 |
+
: detail::FunctionImpl<HashAggregateKernel>(std::move(name),
|
372 |
+
Function::HASH_AGGREGATE, arity,
|
373 |
+
std::move(doc), default_options) {}
|
374 |
+
|
375 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
376 |
+
/// kernel's signature does not match the function's arity.
|
377 |
+
Status AddKernel(HashAggregateKernel kernel);
|
378 |
+
};
|
379 |
+
|
380 |
+
/// \brief A function that dispatches to other functions. Must implement
|
381 |
+
/// MetaFunction::ExecuteImpl.
|
382 |
+
///
|
383 |
+
/// For Array, ChunkedArray, and Scalar Datum kinds, may rely on the execution
|
384 |
+
/// of concrete Function types, but must handle other Datum kinds on its own.
|
385 |
+
class ARROW_EXPORT MetaFunction : public Function {
|
386 |
+
public:
|
387 |
+
int num_kernels() const override { return 0; }
|
388 |
+
|
389 |
+
Result<Datum> Execute(const std::vector<Datum>& args, const FunctionOptions* options,
|
390 |
+
ExecContext* ctx) const override;
|
391 |
+
|
392 |
+
Result<Datum> Execute(const ExecBatch& batch, const FunctionOptions* options,
|
393 |
+
ExecContext* ctx) const override;
|
394 |
+
|
395 |
+
protected:
|
396 |
+
virtual Result<Datum> ExecuteImpl(const std::vector<Datum>& args,
|
397 |
+
const FunctionOptions* options,
|
398 |
+
ExecContext* ctx) const = 0;
|
399 |
+
|
400 |
+
MetaFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
401 |
+
const FunctionOptions* default_options = NULLPTR)
|
402 |
+
: Function(std::move(name), Function::META, arity, std::move(doc),
|
403 |
+
default_options) {}
|
404 |
+
};
|
405 |
+
|
406 |
+
/// @}
|
407 |
+
|
408 |
+
} // namespace compute
|
409 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle.
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include "arrow/compute/type_fwd.h"
|
24 |
+
#include "arrow/result.h"
|
25 |
+
#include "arrow/status.h"
|
26 |
+
#include "arrow/type_fwd.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace compute {
|
31 |
+
|
32 |
+
/// \addtogroup compute-functions
|
33 |
+
/// @{
|
34 |
+
|
35 |
+
/// \brief Extension point for defining options outside libarrow (but
|
36 |
+
/// still within this project).
|
37 |
+
class ARROW_EXPORT FunctionOptionsType {
|
38 |
+
public:
|
39 |
+
virtual ~FunctionOptionsType() = default;
|
40 |
+
|
41 |
+
virtual const char* type_name() const = 0;
|
42 |
+
virtual std::string Stringify(const FunctionOptions&) const = 0;
|
43 |
+
virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0;
|
44 |
+
virtual Result<std::shared_ptr<Buffer>> Serialize(const FunctionOptions&) const;
|
45 |
+
virtual Result<std::unique_ptr<FunctionOptions>> Deserialize(
|
46 |
+
const Buffer& buffer) const;
|
47 |
+
virtual std::unique_ptr<FunctionOptions> Copy(const FunctionOptions&) const = 0;
|
48 |
+
};
|
49 |
+
|
50 |
+
/// \brief Base class for specifying options configuring a function's behavior,
|
51 |
+
/// such as error handling.
|
52 |
+
class ARROW_EXPORT FunctionOptions : public util::EqualityComparable<FunctionOptions> {
|
53 |
+
public:
|
54 |
+
virtual ~FunctionOptions() = default;
|
55 |
+
|
56 |
+
const FunctionOptionsType* options_type() const { return options_type_; }
|
57 |
+
const char* type_name() const { return options_type()->type_name(); }
|
58 |
+
|
59 |
+
bool Equals(const FunctionOptions& other) const;
|
60 |
+
std::string ToString() const;
|
61 |
+
std::unique_ptr<FunctionOptions> Copy() const;
|
62 |
+
/// \brief Serialize an options struct to a buffer.
|
63 |
+
Result<std::shared_ptr<Buffer>> Serialize() const;
|
64 |
+
/// \brief Deserialize an options struct from a buffer.
|
65 |
+
/// Note: this will only look for `type_name` in the default FunctionRegistry;
|
66 |
+
/// to use a custom FunctionRegistry, look up the FunctionOptionsType, then
|
67 |
+
/// call FunctionOptionsType::Deserialize().
|
68 |
+
static Result<std::unique_ptr<FunctionOptions>> Deserialize(
|
69 |
+
const std::string& type_name, const Buffer& buffer);
|
70 |
+
|
71 |
+
protected:
|
72 |
+
explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {}
|
73 |
+
const FunctionOptionsType* options_type_;
|
74 |
+
};
|
75 |
+
|
76 |
+
ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*);
|
77 |
+
|
78 |
+
/// @}
|
79 |
+
|
80 |
+
} // namespace compute
|
81 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <string>
|
21 |
+
#include <vector>
|
22 |
+
|
23 |
+
#include "arrow/type.h"
|
24 |
+
#include "arrow/util/compare.h"
|
25 |
+
#include "arrow/util/visibility.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
namespace compute {
|
29 |
+
|
30 |
+
enum class SortOrder {
|
31 |
+
/// Arrange values in increasing order
|
32 |
+
Ascending,
|
33 |
+
/// Arrange values in decreasing order
|
34 |
+
Descending,
|
35 |
+
};
|
36 |
+
|
37 |
+
enum class NullPlacement {
|
38 |
+
/// Place nulls and NaNs before any non-null values.
|
39 |
+
/// NaNs will come after nulls.
|
40 |
+
AtStart,
|
41 |
+
/// Place nulls and NaNs after any non-null values.
|
42 |
+
/// NaNs will come before nulls.
|
43 |
+
AtEnd,
|
44 |
+
};
|
45 |
+
|
46 |
+
/// \brief One sort key for PartitionNthIndices (TODO) and SortIndices
|
47 |
+
class ARROW_EXPORT SortKey : public util::EqualityComparable<SortKey> {
|
48 |
+
public:
|
49 |
+
explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending)
|
50 |
+
: target(std::move(target)), order(order) {}
|
51 |
+
|
52 |
+
bool Equals(const SortKey& other) const;
|
53 |
+
std::string ToString() const;
|
54 |
+
|
55 |
+
/// A FieldRef targeting the sort column.
|
56 |
+
FieldRef target;
|
57 |
+
/// How to order by this sort key.
|
58 |
+
SortOrder order;
|
59 |
+
};
|
60 |
+
|
61 |
+
class ARROW_EXPORT Ordering : public util::EqualityComparable<Ordering> {
|
62 |
+
public:
|
63 |
+
Ordering(std::vector<SortKey> sort_keys,
|
64 |
+
NullPlacement null_placement = NullPlacement::AtStart)
|
65 |
+
: sort_keys_(std::move(sort_keys)), null_placement_(null_placement) {}
|
66 |
+
/// true if data ordered by other is also ordered by this
|
67 |
+
///
|
68 |
+
/// For example, if data is ordered by [a, b, c] then it is also ordered
|
69 |
+
/// by [a, b] but not by [b, c] or [a, b, c, d].
|
70 |
+
///
|
71 |
+
/// [a, b].IsSuborderOf([a, b, c]) - true
|
72 |
+
/// [a, b, c].IsSuborderOf([a, b, c]) - true
|
73 |
+
/// [b, c].IsSuborderOf([a, b, c]) - false
|
74 |
+
/// [a, b, c, d].IsSuborderOf([a, b, c]) - false
|
75 |
+
///
|
76 |
+
/// The implicit ordering is not a suborder of any other ordering and
|
77 |
+
/// no other ordering is a suborder of it. The implicit ordering is not a
|
78 |
+
/// suborder of itself.
|
79 |
+
///
|
80 |
+
/// The unordered ordering is a suborder of all other orderings but no
|
81 |
+
/// other ordering is a suborder of it. The unordered ordering is a suborder
|
82 |
+
/// of itself.
|
83 |
+
///
|
84 |
+
/// The unordered ordering is a suborder of the implicit ordering.
|
85 |
+
bool IsSuborderOf(const Ordering& other) const;
|
86 |
+
|
87 |
+
bool Equals(const Ordering& other) const;
|
88 |
+
std::string ToString() const;
|
89 |
+
|
90 |
+
bool is_implicit() const { return is_implicit_; }
|
91 |
+
bool is_unordered() const { return !is_implicit_ && sort_keys_.empty(); }
|
92 |
+
|
93 |
+
const std::vector<SortKey>& sort_keys() const { return sort_keys_; }
|
94 |
+
NullPlacement null_placement() const { return null_placement_; }
|
95 |
+
|
96 |
+
static const Ordering& Implicit() {
|
97 |
+
static const Ordering kImplicit(true);
|
98 |
+
return kImplicit;
|
99 |
+
}
|
100 |
+
|
101 |
+
static const Ordering& Unordered() {
|
102 |
+
static const Ordering kUnordered(false);
|
103 |
+
// It is also possible to get an unordered ordering by passing in an empty vector
|
104 |
+
// using the normal constructor. This is ok and useful when ordering comes from user
|
105 |
+
// input.
|
106 |
+
return kUnordered;
|
107 |
+
}
|
108 |
+
|
109 |
+
private:
|
110 |
+
explicit Ordering(bool is_implicit)
|
111 |
+
: null_placement_(NullPlacement::AtStart), is_implicit_(is_implicit) {}
|
112 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
113 |
+
std::vector<SortKey> sort_keys_;
|
114 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
115 |
+
NullPlacement null_placement_;
|
116 |
+
bool is_implicit_ = false;
|
117 |
+
};
|
118 |
+
|
119 |
+
} // namespace compute
|
120 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <memory>
|
24 |
+
#include <string>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/util/visibility.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
namespace compute {
|
33 |
+
|
34 |
+
class Function;
|
35 |
+
class FunctionOptionsType;
|
36 |
+
|
37 |
+
/// \brief A mutable central function registry for built-in functions as well
|
38 |
+
/// as user-defined functions. Functions are implementations of
|
39 |
+
/// arrow::compute::Function.
|
40 |
+
///
|
41 |
+
/// Generally, each function contains kernels which are implementations of a
|
42 |
+
/// function for a specific argument signature. After looking up a function in
|
43 |
+
/// the registry, one can either execute it eagerly with Function::Execute or
|
44 |
+
/// use one of the function's dispatch methods to pick a suitable kernel for
|
45 |
+
/// lower-level function execution.
|
46 |
+
class ARROW_EXPORT FunctionRegistry {
|
47 |
+
public:
|
48 |
+
~FunctionRegistry();
|
49 |
+
|
50 |
+
/// \brief Construct a new registry.
|
51 |
+
///
|
52 |
+
/// Most users only need to use the global registry.
|
53 |
+
static std::unique_ptr<FunctionRegistry> Make();
|
54 |
+
|
55 |
+
/// \brief Construct a new nested registry with the given parent.
|
56 |
+
///
|
57 |
+
/// Most users only need to use the global registry. The returned registry never changes
|
58 |
+
/// its parent, even when an operation allows overwriting.
|
59 |
+
static std::unique_ptr<FunctionRegistry> Make(FunctionRegistry* parent);
|
60 |
+
|
61 |
+
/// \brief Check whether a new function can be added to the registry.
|
62 |
+
///
|
63 |
+
/// \returns Status::KeyError if a function with the same name is already registered.
|
64 |
+
Status CanAddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
|
65 |
+
|
66 |
+
/// \brief Add a new function to the registry.
|
67 |
+
///
|
68 |
+
/// \returns Status::KeyError if a function with the same name is already registered.
|
69 |
+
Status AddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
|
70 |
+
|
71 |
+
/// \brief Check whether an alias can be added for the given function name.
|
72 |
+
///
|
73 |
+
/// \returns Status::KeyError if the function with the given name is not registered.
|
74 |
+
Status CanAddAlias(const std::string& target_name, const std::string& source_name);
|
75 |
+
|
76 |
+
/// \brief Add alias for the given function name.
|
77 |
+
///
|
78 |
+
/// \returns Status::KeyError if the function with the given name is not registered.
|
79 |
+
Status AddAlias(const std::string& target_name, const std::string& source_name);
|
80 |
+
|
81 |
+
/// \brief Check whether a new function options type can be added to the registry.
|
82 |
+
///
|
83 |
+
/// \return Status::KeyError if a function options type with the same name is already
|
84 |
+
/// registered.
|
85 |
+
Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type,
|
86 |
+
bool allow_overwrite = false);
|
87 |
+
|
88 |
+
/// \brief Add a new function options type to the registry.
|
89 |
+
///
|
90 |
+
/// \returns Status::KeyError if a function options type with the same name is already
|
91 |
+
/// registered.
|
92 |
+
Status AddFunctionOptionsType(const FunctionOptionsType* options_type,
|
93 |
+
bool allow_overwrite = false);
|
94 |
+
|
95 |
+
/// \brief Retrieve a function by name from the registry.
|
96 |
+
Result<std::shared_ptr<Function>> GetFunction(const std::string& name) const;
|
97 |
+
|
98 |
+
/// \brief Return vector of all entry names in the registry.
|
99 |
+
///
|
100 |
+
/// Helpful for displaying a manifest of available functions.
|
101 |
+
std::vector<std::string> GetFunctionNames() const;
|
102 |
+
|
103 |
+
/// \brief Retrieve a function options type by name from the registry.
|
104 |
+
Result<const FunctionOptionsType*> GetFunctionOptionsType(
|
105 |
+
const std::string& name) const;
|
106 |
+
|
107 |
+
/// \brief The number of currently registered functions.
|
108 |
+
int num_functions() const;
|
109 |
+
|
110 |
+
/// \brief The cast function object registered in AddFunction.
|
111 |
+
///
|
112 |
+
/// Helpful for get cast function as needed.
|
113 |
+
const Function* cast_function() const;
|
114 |
+
|
115 |
+
private:
|
116 |
+
FunctionRegistry();
|
117 |
+
|
118 |
+
// Use PIMPL pattern to not have std::unordered_map here
|
119 |
+
class FunctionRegistryImpl;
|
120 |
+
std::unique_ptr<FunctionRegistryImpl> impl_;
|
121 |
+
|
122 |
+
explicit FunctionRegistry(FunctionRegistryImpl* impl);
|
123 |
+
};
|
124 |
+
|
125 |
+
} // namespace compute
|
126 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/util/visibility.h"
|
21 |
+
|
22 |
+
namespace arrow {
|
23 |
+
|
24 |
+
struct Datum;
|
25 |
+
struct TypeHolder;
|
26 |
+
|
27 |
+
namespace compute {
|
28 |
+
|
29 |
+
class Function;
|
30 |
+
class ScalarAggregateFunction;
|
31 |
+
class FunctionExecutor;
|
32 |
+
class FunctionOptions;
|
33 |
+
class FunctionRegistry;
|
34 |
+
|
35 |
+
/// \brief Return the process-global function registry.
|
36 |
+
// Defined in registry.cc
|
37 |
+
ARROW_EXPORT FunctionRegistry* GetFunctionRegistry();
|
38 |
+
|
39 |
+
class CastOptions;
|
40 |
+
|
41 |
+
struct ExecBatch;
|
42 |
+
class ExecContext;
|
43 |
+
class KernelContext;
|
44 |
+
|
45 |
+
struct Kernel;
|
46 |
+
struct ScalarKernel;
|
47 |
+
struct ScalarAggregateKernel;
|
48 |
+
struct VectorKernel;
|
49 |
+
|
50 |
+
struct KernelState;
|
51 |
+
|
52 |
+
class Expression;
|
53 |
+
|
54 |
+
ARROW_EXPORT ExecContext* default_exec_context();
|
55 |
+
ARROW_EXPORT ExecContext* threaded_exec_context();
|
56 |
+
|
57 |
+
} // namespace compute
|
58 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/config.h
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <optional>
|
21 |
+
#include <string>
|
22 |
+
|
23 |
+
#include "arrow/status.h"
|
24 |
+
#include "arrow/util/config.h" // IWYU pragma: export
|
25 |
+
#include "arrow/util/visibility.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
|
29 |
+
struct BuildInfo {
|
30 |
+
/// The packed version number, e.g. 1002003 (decimal) for Arrow 1.2.3
|
31 |
+
int version;
|
32 |
+
/// The "major" version number, e.g. 1 for Arrow 1.2.3
|
33 |
+
int version_major;
|
34 |
+
/// The "minor" version number, e.g. 2 for Arrow 1.2.3
|
35 |
+
int version_minor;
|
36 |
+
/// The "patch" version number, e.g. 3 for Arrow 1.2.3
|
37 |
+
int version_patch;
|
38 |
+
/// The version string, e.g. "1.2.3"
|
39 |
+
std::string version_string;
|
40 |
+
std::string so_version;
|
41 |
+
std::string full_so_version;
|
42 |
+
|
43 |
+
/// The CMake compiler identifier, e.g. "GNU"
|
44 |
+
std::string compiler_id;
|
45 |
+
std::string compiler_version;
|
46 |
+
std::string compiler_flags;
|
47 |
+
|
48 |
+
/// The git changeset id, if available
|
49 |
+
std::string git_id;
|
50 |
+
/// The git changeset description, if available
|
51 |
+
std::string git_description;
|
52 |
+
std::string package_kind;
|
53 |
+
|
54 |
+
/// The uppercase build type, e.g. "DEBUG" or "RELEASE"
|
55 |
+
std::string build_type;
|
56 |
+
};
|
57 |
+
|
58 |
+
struct RuntimeInfo {
|
59 |
+
/// The enabled SIMD level
|
60 |
+
///
|
61 |
+
/// This can be less than `detected_simd_level` if the ARROW_USER_SIMD_LEVEL
|
62 |
+
/// environment variable is set to another value.
|
63 |
+
std::string simd_level;
|
64 |
+
|
65 |
+
/// The SIMD level available on the OS and CPU
|
66 |
+
std::string detected_simd_level;
|
67 |
+
|
68 |
+
/// Whether using the OS-based timezone database
|
69 |
+
/// This is set at compile-time.
|
70 |
+
bool using_os_timezone_db;
|
71 |
+
|
72 |
+
/// The path to the timezone database; by default None.
|
73 |
+
std::optional<std::string> timezone_db_path;
|
74 |
+
};
|
75 |
+
|
76 |
+
/// \brief Get runtime build info.
|
77 |
+
///
|
78 |
+
/// The returned values correspond to exact loaded version of the Arrow library,
|
79 |
+
/// rather than the values frozen at application compile-time through the `ARROW_*`
|
80 |
+
/// preprocessor definitions.
|
81 |
+
ARROW_EXPORT
|
82 |
+
const BuildInfo& GetBuildInfo();
|
83 |
+
|
84 |
+
/// \brief Get runtime info.
|
85 |
+
///
|
86 |
+
ARROW_EXPORT
|
87 |
+
RuntimeInfo GetRuntimeInfo();
|
88 |
+
|
89 |
+
struct GlobalOptions {
|
90 |
+
/// Path to text timezone database. This is only configurable on Windows,
|
91 |
+
/// which does not have a compatible OS timezone database.
|
92 |
+
std::optional<std::string> timezone_db_path;
|
93 |
+
};
|
94 |
+
|
95 |
+
ARROW_EXPORT
|
96 |
+
Status Initialize(const GlobalOptions& options) noexcept;
|
97 |
+
|
98 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <type_traits>
|
24 |
+
#include <utility>
|
25 |
+
#include <variant>
|
26 |
+
#include <vector>
|
27 |
+
|
28 |
+
#include "arrow/array/data.h"
|
29 |
+
#include "arrow/scalar.h"
|
30 |
+
#include "arrow/type.h"
|
31 |
+
#include "arrow/type_traits.h"
|
32 |
+
#include "arrow/util/checked_cast.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
|
38 |
+
class Array;
|
39 |
+
class ChunkedArray;
|
40 |
+
class RecordBatch;
|
41 |
+
class Table;
|
42 |
+
|
43 |
+
/// \class Datum
|
44 |
+
/// \brief Variant type for various Arrow C++ data structures
|
45 |
+
struct ARROW_EXPORT Datum {
|
46 |
+
/// \brief The kind of datum stored
|
47 |
+
enum Kind { NONE, SCALAR, ARRAY, CHUNKED_ARRAY, RECORD_BATCH, TABLE };
|
48 |
+
|
49 |
+
/// \brief A placeholder type to represent empty datum
|
50 |
+
struct Empty {};
|
51 |
+
|
52 |
+
/// \brief Datums variants may have a length. This special value indicate that the
|
53 |
+
/// current variant does not have a length.
|
54 |
+
static constexpr int64_t kUnknownLength = -1;
|
55 |
+
|
56 |
+
/// \brief Storage of the actual datum.
|
57 |
+
///
|
58 |
+
/// Note: For arrays, ArrayData is stored instead of Array for easier processing
|
59 |
+
std::variant<Empty, std::shared_ptr<Scalar>, std::shared_ptr<ArrayData>,
|
60 |
+
std::shared_ptr<ChunkedArray>, std::shared_ptr<RecordBatch>,
|
61 |
+
std::shared_ptr<Table>>
|
62 |
+
value;
|
63 |
+
|
64 |
+
/// \brief Empty datum, to be populated elsewhere
|
65 |
+
Datum() = default;
|
66 |
+
|
67 |
+
Datum(const Datum& other) = default;
|
68 |
+
Datum& operator=(const Datum& other) = default;
|
69 |
+
Datum(Datum&& other) = default;
|
70 |
+
Datum& operator=(Datum&& other) = default;
|
71 |
+
|
72 |
+
/// \brief Construct from a Scalar
|
73 |
+
Datum(std::shared_ptr<Scalar> value) // NOLINT implicit conversion
|
74 |
+
: value(std::move(value)) {}
|
75 |
+
|
76 |
+
/// \brief Construct from an ArrayData
|
77 |
+
Datum(std::shared_ptr<ArrayData> value) // NOLINT implicit conversion
|
78 |
+
: value(std::move(value)) {}
|
79 |
+
|
80 |
+
/// \brief Construct from an ArrayData
|
81 |
+
Datum(ArrayData arg) // NOLINT implicit conversion
|
82 |
+
: value(std::make_shared<ArrayData>(std::move(arg))) {}
|
83 |
+
|
84 |
+
/// \brief Construct from an Array
|
85 |
+
Datum(const Array& value); // NOLINT implicit conversion
|
86 |
+
|
87 |
+
/// \brief Construct from an Array
|
88 |
+
Datum(const std::shared_ptr<Array>& value); // NOLINT implicit conversion
|
89 |
+
|
90 |
+
/// \brief Construct from a ChunkedArray
|
91 |
+
Datum(std::shared_ptr<ChunkedArray> value); // NOLINT implicit conversion
|
92 |
+
|
93 |
+
/// \brief Construct from a RecordBatch
|
94 |
+
Datum(std::shared_ptr<RecordBatch> value); // NOLINT implicit conversion
|
95 |
+
|
96 |
+
/// \brief Construct from a Table
|
97 |
+
Datum(std::shared_ptr<Table> value); // NOLINT implicit conversion
|
98 |
+
|
99 |
+
/// \brief Construct from a ChunkedArray.
|
100 |
+
///
|
101 |
+
/// This can be expensive, prefer the shared_ptr<ChunkedArray> constructor
|
102 |
+
explicit Datum(const ChunkedArray& value);
|
103 |
+
|
104 |
+
/// \brief Construct from a RecordBatch.
|
105 |
+
///
|
106 |
+
/// This can be expensive, prefer the shared_ptr<RecordBatch> constructor
|
107 |
+
explicit Datum(const RecordBatch& value);
|
108 |
+
|
109 |
+
/// \brief Construct from a Table.
|
110 |
+
///
|
111 |
+
/// This can be expensive, prefer the shared_ptr<Table> constructor
|
112 |
+
explicit Datum(const Table& value);
|
113 |
+
|
114 |
+
/// \brief Cast from concrete subtypes of Array or Scalar to Datum
|
115 |
+
template <typename T, bool IsArray = std::is_base_of_v<Array, T>,
|
116 |
+
bool IsScalar = std::is_base_of_v<Scalar, T>,
|
117 |
+
typename = enable_if_t<IsArray || IsScalar>>
|
118 |
+
Datum(std::shared_ptr<T> value) // NOLINT implicit conversion
|
119 |
+
: Datum(std::shared_ptr<typename std::conditional<IsArray, Array, Scalar>::type>(
|
120 |
+
std::move(value))) {}
|
121 |
+
|
122 |
+
/// \brief Cast from concrete subtypes of Array or Scalar to Datum
|
123 |
+
template <typename T, typename TV = typename std::remove_reference_t<T>,
|
124 |
+
bool IsArray = std::is_base_of_v<Array, T>,
|
125 |
+
bool IsScalar = std::is_base_of_v<Scalar, T>,
|
126 |
+
typename = enable_if_t<IsArray || IsScalar>>
|
127 |
+
Datum(T&& value) // NOLINT implicit conversion
|
128 |
+
: Datum(std::make_shared<TV>(std::forward<T>(value))) {}
|
129 |
+
|
130 |
+
/// \brief Copy from concrete subtypes of Scalar.
|
131 |
+
///
|
132 |
+
/// The concrete scalar type must be copyable (not all of them are).
|
133 |
+
template <typename T, typename = enable_if_t<std::is_base_of_v<Scalar, T>>>
|
134 |
+
Datum(const T& value) // NOLINT implicit conversion
|
135 |
+
: Datum(std::make_shared<T>(value)) {}
|
136 |
+
|
137 |
+
// Convenience constructors
|
138 |
+
/// \brief Convenience constructor storing a bool scalar.
|
139 |
+
explicit Datum(bool value);
|
140 |
+
/// \brief Convenience constructor storing an int8 scalar.
|
141 |
+
explicit Datum(int8_t value);
|
142 |
+
/// \brief Convenience constructor storing a uint8 scalar.
|
143 |
+
explicit Datum(uint8_t value);
|
144 |
+
/// \brief Convenience constructor storing an int16 scalar.
|
145 |
+
explicit Datum(int16_t value);
|
146 |
+
/// \brief Convenience constructor storing a uint16 scalar.
|
147 |
+
explicit Datum(uint16_t value);
|
148 |
+
/// \brief Convenience constructor storing an int32 scalar.
|
149 |
+
explicit Datum(int32_t value);
|
150 |
+
/// \brief Convenience constructor storing a uint32 scalar.
|
151 |
+
explicit Datum(uint32_t value);
|
152 |
+
/// \brief Convenience constructor storing an int64 scalar.
|
153 |
+
explicit Datum(int64_t value);
|
154 |
+
/// \brief Convenience constructor storing a uint64 scalar.
|
155 |
+
explicit Datum(uint64_t value);
|
156 |
+
/// \brief Convenience constructor storing a float scalar.
|
157 |
+
explicit Datum(float value);
|
158 |
+
/// \brief Convenience constructor storing a double scalar.
|
159 |
+
explicit Datum(double value);
|
160 |
+
/// \brief Convenience constructor storing a string scalar.
|
161 |
+
explicit Datum(std::string value);
|
162 |
+
/// \brief Convenience constructor storing a string scalar.
|
163 |
+
explicit Datum(const char* value);
|
164 |
+
|
165 |
+
/// \brief Convenience constructor for a DurationScalar from std::chrono::duration
|
166 |
+
template <template <typename, typename> class StdDuration, typename Rep,
|
167 |
+
typename Period,
|
168 |
+
typename = decltype(DurationScalar{StdDuration<Rep, Period>{}})>
|
169 |
+
explicit Datum(StdDuration<Rep, Period> d) : Datum{DurationScalar(d)} {}
|
170 |
+
|
171 |
+
/// \brief The kind of data stored in Datum
|
172 |
+
Datum::Kind kind() const {
|
173 |
+
switch (this->value.index()) {
|
174 |
+
case 0:
|
175 |
+
return Datum::NONE;
|
176 |
+
case 1:
|
177 |
+
return Datum::SCALAR;
|
178 |
+
case 2:
|
179 |
+
return Datum::ARRAY;
|
180 |
+
case 3:
|
181 |
+
return Datum::CHUNKED_ARRAY;
|
182 |
+
case 4:
|
183 |
+
return Datum::RECORD_BATCH;
|
184 |
+
case 5:
|
185 |
+
return Datum::TABLE;
|
186 |
+
default:
|
187 |
+
return Datum::NONE;
|
188 |
+
}
|
189 |
+
}
|
190 |
+
|
191 |
+
/// \brief Retrieve the stored array as ArrayData
|
192 |
+
///
|
193 |
+
/// Use make_array() if an Array is desired (which is more expensive).
|
194 |
+
/// \throws std::bad_variant_access if the datum is not an array
|
195 |
+
const std::shared_ptr<ArrayData>& array() const {
|
196 |
+
return std::get<std::shared_ptr<ArrayData>>(this->value);
|
197 |
+
}
|
198 |
+
|
199 |
+
/// \brief The sum of bytes in each buffer referenced by the datum
|
200 |
+
/// Note: Scalars report a size of 0
|
201 |
+
/// \see arrow::util::TotalBufferSize for caveats
|
202 |
+
int64_t TotalBufferSize() const;
|
203 |
+
|
204 |
+
/// \brief Get the stored ArrayData in mutable form
|
205 |
+
///
|
206 |
+
/// For internal use primarily. Keep in mind a shared_ptr<Datum> may have multiple
|
207 |
+
/// owners.
|
208 |
+
ArrayData* mutable_array() const { return this->array().get(); }
|
209 |
+
|
210 |
+
/// \brief Retrieve the stored array as Array
|
211 |
+
/// \throws std::bad_variant_access if the datum is not an array
|
212 |
+
std::shared_ptr<Array> make_array() const;
|
213 |
+
|
214 |
+
/// \brief Retrieve the chunked array stored
|
215 |
+
/// \throws std::bad_variant_access if the datum is not a chunked array
|
216 |
+
const std::shared_ptr<ChunkedArray>& chunked_array() const {
|
217 |
+
return std::get<std::shared_ptr<ChunkedArray>>(this->value);
|
218 |
+
}
|
219 |
+
|
220 |
+
/// \brief Retrieve the record batch stored
|
221 |
+
/// \throws std::bad_variant_access if the datum is not a record batch
|
222 |
+
const std::shared_ptr<RecordBatch>& record_batch() const {
|
223 |
+
return std::get<std::shared_ptr<RecordBatch>>(this->value);
|
224 |
+
}
|
225 |
+
|
226 |
+
/// \brief Retrieve the table stored
|
227 |
+
/// \throws std::bad_variant_access if the datum is not a table
|
228 |
+
const std::shared_ptr<Table>& table() const {
|
229 |
+
return std::get<std::shared_ptr<Table>>(this->value);
|
230 |
+
}
|
231 |
+
|
232 |
+
/// \brief Retrieve the scalar stored
|
233 |
+
/// \throws std::bad_variant_access if the datum is not a scalar
|
234 |
+
const std::shared_ptr<Scalar>& scalar() const {
|
235 |
+
return std::get<std::shared_ptr<Scalar>>(this->value);
|
236 |
+
}
|
237 |
+
|
238 |
+
/// \brief Retrieve the datum as its concrete array type
|
239 |
+
/// \throws std::bad_variant_access if the datum is not an array
|
240 |
+
/// \tparam ExactType the expected array type, may cause undefined behavior if it is not
|
241 |
+
/// the type of the stored array
|
242 |
+
template <typename ExactType>
|
243 |
+
std::shared_ptr<ExactType> array_as() const {
|
244 |
+
return internal::checked_pointer_cast<ExactType>(this->make_array());
|
245 |
+
}
|
246 |
+
|
247 |
+
/// \brief Retrieve the datum as its concrete scalar type
|
248 |
+
/// \throws std::bad_variant_access if the datum is not a scalar
|
249 |
+
/// \tparam ExactType the expected scalar type, may cause undefined behavior if it is
|
250 |
+
/// not the type of the stored scalar
|
251 |
+
template <typename ExactType>
|
252 |
+
const ExactType& scalar_as() const {
|
253 |
+
return internal::checked_cast<const ExactType&>(*this->scalar());
|
254 |
+
}
|
255 |
+
|
256 |
+
/// \brief True if Datum contains an array
|
257 |
+
bool is_array() const { return this->kind() == Datum::ARRAY; }
|
258 |
+
|
259 |
+
/// \brief True if Datum contains a chunked array
|
260 |
+
bool is_chunked_array() const { return this->kind() == Datum::CHUNKED_ARRAY; }
|
261 |
+
|
262 |
+
/// \brief True if Datum contains an array or a chunked array
|
263 |
+
bool is_arraylike() const {
|
264 |
+
return this->kind() == Datum::ARRAY || this->kind() == Datum::CHUNKED_ARRAY;
|
265 |
+
}
|
266 |
+
|
267 |
+
/// \brief True if Datum contains a scalar
|
268 |
+
bool is_scalar() const { return this->kind() == Datum::SCALAR; }
|
269 |
+
|
270 |
+
/// \brief True if Datum contains a scalar or array-like data
|
271 |
+
bool is_value() const { return this->is_arraylike() || this->is_scalar(); }
|
272 |
+
|
273 |
+
/// \brief Return the null count.
|
274 |
+
///
|
275 |
+
/// Only valid for scalar and array-like data.
|
276 |
+
int64_t null_count() const;
|
277 |
+
|
278 |
+
/// \brief The value type of the variant, if any
|
279 |
+
///
|
280 |
+
/// \return nullptr if no type
|
281 |
+
const std::shared_ptr<DataType>& type() const;
|
282 |
+
|
283 |
+
/// \brief The schema of the variant, if any
|
284 |
+
///
|
285 |
+
/// \return nullptr if no schema
|
286 |
+
const std::shared_ptr<Schema>& schema() const;
|
287 |
+
|
288 |
+
/// \brief The value length of the variant, if any
|
289 |
+
///
|
290 |
+
/// \return kUnknownLength if no type
|
291 |
+
int64_t length() const;
|
292 |
+
|
293 |
+
/// \brief The array chunks of the variant, if any
|
294 |
+
///
|
295 |
+
/// \return empty if not arraylike
|
296 |
+
ArrayVector chunks() const;
|
297 |
+
|
298 |
+
/// \brief True if the two data are equal
|
299 |
+
bool Equals(const Datum& other) const;
|
300 |
+
|
301 |
+
bool operator==(const Datum& other) const { return Equals(other); }
|
302 |
+
bool operator!=(const Datum& other) const { return !Equals(other); }
|
303 |
+
|
304 |
+
std::string ToString() const;
|
305 |
+
};
|
306 |
+
|
307 |
+
ARROW_EXPORT void PrintTo(const Datum&, std::ostream*);
|
308 |
+
|
309 |
+
ARROW_EXPORT std::string ToString(Datum::Kind kind);
|
310 |
+
|
311 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/device.h
ADDED
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <functional>
|
22 |
+
#include <memory>
|
23 |
+
#include <string>
|
24 |
+
|
25 |
+
#include "arrow/io/type_fwd.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/type_fwd.h"
|
29 |
+
#include "arrow/util/compare.h"
|
30 |
+
#include "arrow/util/macros.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
/// \brief EXPERIMENTAL: Device type enum which matches up with C Data Device types
|
36 |
+
enum class DeviceAllocationType : char {
|
37 |
+
kCPU = 1,
|
38 |
+
kCUDA = 2,
|
39 |
+
kCUDA_HOST = 3,
|
40 |
+
kOPENCL = 4,
|
41 |
+
kVULKAN = 7,
|
42 |
+
kMETAL = 8,
|
43 |
+
kVPI = 9,
|
44 |
+
kROCM = 10,
|
45 |
+
kROCM_HOST = 11,
|
46 |
+
kEXT_DEV = 12,
|
47 |
+
kCUDA_MANAGED = 13,
|
48 |
+
kONEAPI = 14,
|
49 |
+
kWEBGPU = 15,
|
50 |
+
kHEXAGON = 16,
|
51 |
+
};
|
52 |
+
|
53 |
+
class MemoryManager;
|
54 |
+
|
55 |
+
/// \brief EXPERIMENTAL: Abstract interface for hardware devices
|
56 |
+
///
|
57 |
+
/// This object represents a device with access to some memory spaces.
|
58 |
+
/// When handling a Buffer or raw memory address, it allows deciding in which
|
59 |
+
/// context the raw memory address should be interpreted
|
60 |
+
/// (e.g. CPU-accessible memory, or embedded memory on some particular GPU).
|
61 |
+
class ARROW_EXPORT Device : public std::enable_shared_from_this<Device>,
|
62 |
+
public util::EqualityComparable<Device> {
|
63 |
+
public:
|
64 |
+
virtual ~Device();
|
65 |
+
|
66 |
+
/// \brief A shorthand for this device's type.
|
67 |
+
///
|
68 |
+
/// The returned value is different for each device class, but is the
|
69 |
+
/// same for all instances of a given class. It can be used as a replacement
|
70 |
+
/// for RTTI.
|
71 |
+
virtual const char* type_name() const = 0;
|
72 |
+
|
73 |
+
/// \brief A human-readable description of the device.
|
74 |
+
///
|
75 |
+
/// The returned value should be detailed enough to distinguish between
|
76 |
+
/// different instances, where necessary.
|
77 |
+
virtual std::string ToString() const = 0;
|
78 |
+
|
79 |
+
/// \brief Whether this instance points to the same device as another one.
|
80 |
+
virtual bool Equals(const Device&) const = 0;
|
81 |
+
|
82 |
+
/// \brief A device ID to identify this device if there are multiple of this type.
|
83 |
+
///
|
84 |
+
/// If there is no "device_id" equivalent (such as for the main CPU device on
|
85 |
+
/// non-numa systems) returns -1.
|
86 |
+
virtual int64_t device_id() const { return -1; }
|
87 |
+
|
88 |
+
/// \brief Whether this device is the main CPU device.
|
89 |
+
///
|
90 |
+
/// This shorthand method is very useful when deciding whether a memory address
|
91 |
+
/// is CPU-accessible.
|
92 |
+
bool is_cpu() const { return is_cpu_; }
|
93 |
+
|
94 |
+
/// \brief Return a MemoryManager instance tied to this device
|
95 |
+
///
|
96 |
+
/// The returned instance uses default parameters for this device type's
|
97 |
+
/// MemoryManager implementation. Some devices also allow constructing
|
98 |
+
/// MemoryManager instances with non-default parameters.
|
99 |
+
virtual std::shared_ptr<MemoryManager> default_memory_manager() = 0;
|
100 |
+
|
101 |
+
/// \brief Return the DeviceAllocationType of this device
|
102 |
+
virtual DeviceAllocationType device_type() const = 0;
|
103 |
+
|
104 |
+
class SyncEvent;
|
105 |
+
|
106 |
+
/// \brief EXPERIMENTAL: An opaque wrapper for Device-specific streams
|
107 |
+
///
|
108 |
+
/// In essence this is just a wrapper around a void* to represent the
|
109 |
+
/// standard concept of a stream/queue on a device. Derived classes
|
110 |
+
/// should be trivially constructible from it's device-specific counterparts.
|
111 |
+
class ARROW_EXPORT Stream {
|
112 |
+
public:
|
113 |
+
using release_fn_t = std::function<void(void*)>;
|
114 |
+
|
115 |
+
virtual ~Stream() = default;
|
116 |
+
|
117 |
+
virtual const void* get_raw() const { return stream_.get(); }
|
118 |
+
|
119 |
+
/// \brief Make the stream wait on the provided event.
|
120 |
+
///
|
121 |
+
/// Tells the stream that it should wait until the synchronization
|
122 |
+
/// event is completed without blocking the CPU.
|
123 |
+
virtual Status WaitEvent(const SyncEvent&) = 0;
|
124 |
+
|
125 |
+
/// \brief Blocks the current thread until a stream's remaining tasks are completed
|
126 |
+
virtual Status Synchronize() const = 0;
|
127 |
+
|
128 |
+
protected:
|
129 |
+
explicit Stream(void* stream, release_fn_t release_stream)
|
130 |
+
: stream_{stream, release_stream} {}
|
131 |
+
|
132 |
+
std::unique_ptr<void, release_fn_t> stream_;
|
133 |
+
};
|
134 |
+
|
135 |
+
virtual Result<std::shared_ptr<Stream>> MakeStream() { return NULLPTR; }
|
136 |
+
|
137 |
+
/// \brief Create a new device stream
|
138 |
+
///
|
139 |
+
/// This should create the appropriate stream type for the device,
|
140 |
+
/// derived from Device::Stream to allow for stream ordered events
|
141 |
+
/// and memory allocations.
|
142 |
+
virtual Result<std::shared_ptr<Stream>> MakeStream(unsigned int flags) {
|
143 |
+
return NULLPTR;
|
144 |
+
}
|
145 |
+
|
146 |
+
/// @brief Wrap an existing device stream alongside a release function
|
147 |
+
///
|
148 |
+
/// @param device_stream a pointer to the stream to wrap
|
149 |
+
/// @param release_fn a function to call during destruction, `nullptr` or
|
150 |
+
/// a no-op function can be passed to indicate ownership is maintained
|
151 |
+
/// externally
|
152 |
+
virtual Result<std::shared_ptr<Stream>> WrapStream(void* device_stream,
|
153 |
+
Stream::release_fn_t release_fn) {
|
154 |
+
return NULLPTR;
|
155 |
+
}
|
156 |
+
|
157 |
+
/// \brief EXPERIMENTAL: An object that provides event/stream sync primitives
|
158 |
+
class ARROW_EXPORT SyncEvent {
|
159 |
+
public:
|
160 |
+
using release_fn_t = std::function<void(void*)>;
|
161 |
+
|
162 |
+
virtual ~SyncEvent() = default;
|
163 |
+
|
164 |
+
void* get_raw() { return sync_event_.get(); }
|
165 |
+
|
166 |
+
/// @brief Block until sync event is completed.
|
167 |
+
virtual Status Wait() = 0;
|
168 |
+
|
169 |
+
/// @brief Record the wrapped event on the stream so it triggers
|
170 |
+
/// the event when the stream gets to that point in its queue.
|
171 |
+
virtual Status Record(const Stream&) = 0;
|
172 |
+
|
173 |
+
protected:
|
174 |
+
/// If creating this with a passed in event, the caller must ensure
|
175 |
+
/// that the event lives until clear_event is called on this as it
|
176 |
+
/// won't own it.
|
177 |
+
explicit SyncEvent(void* sync_event, release_fn_t release_sync_event)
|
178 |
+
: sync_event_{sync_event, release_sync_event} {}
|
179 |
+
|
180 |
+
std::unique_ptr<void, release_fn_t> sync_event_;
|
181 |
+
};
|
182 |
+
|
183 |
+
protected:
|
184 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Device);
|
185 |
+
explicit Device(bool is_cpu = false) : is_cpu_(is_cpu) {}
|
186 |
+
|
187 |
+
bool is_cpu_;
|
188 |
+
};
|
189 |
+
|
190 |
+
/// \brief EXPERIMENTAL: An object that provides memory management primitives
|
191 |
+
///
|
192 |
+
/// A MemoryManager is always tied to a particular Device instance.
|
193 |
+
/// It can also have additional parameters (such as a MemoryPool to
|
194 |
+
/// allocate CPU memory).
|
195 |
+
class ARROW_EXPORT MemoryManager : public std::enable_shared_from_this<MemoryManager> {
|
196 |
+
public:
|
197 |
+
virtual ~MemoryManager();
|
198 |
+
|
199 |
+
/// \brief The device this MemoryManager is tied to
|
200 |
+
const std::shared_ptr<Device>& device() const { return device_; }
|
201 |
+
|
202 |
+
/// \brief Whether this MemoryManager is tied to the main CPU device.
|
203 |
+
///
|
204 |
+
/// This shorthand method is very useful when deciding whether a memory address
|
205 |
+
/// is CPU-accessible.
|
206 |
+
bool is_cpu() const { return device_->is_cpu(); }
|
207 |
+
|
208 |
+
/// \brief Create a RandomAccessFile to read a particular buffer.
|
209 |
+
///
|
210 |
+
/// The given buffer must be tied to this MemoryManager.
|
211 |
+
///
|
212 |
+
/// See also the Buffer::GetReader shorthand.
|
213 |
+
virtual Result<std::shared_ptr<io::RandomAccessFile>> GetBufferReader(
|
214 |
+
std::shared_ptr<Buffer> buf) = 0;
|
215 |
+
|
216 |
+
/// \brief Create a OutputStream to write to a particular buffer.
|
217 |
+
///
|
218 |
+
/// The given buffer must be mutable and tied to this MemoryManager.
|
219 |
+
/// The returned stream object writes into the buffer's underlying memory
|
220 |
+
/// (but it won't resize it).
|
221 |
+
///
|
222 |
+
/// See also the Buffer::GetWriter shorthand.
|
223 |
+
virtual Result<std::shared_ptr<io::OutputStream>> GetBufferWriter(
|
224 |
+
std::shared_ptr<Buffer> buf) = 0;
|
225 |
+
|
226 |
+
/// \brief Allocate a (mutable) Buffer
|
227 |
+
///
|
228 |
+
/// The buffer will be allocated in the device's memory.
|
229 |
+
virtual Result<std::unique_ptr<Buffer>> AllocateBuffer(int64_t size) = 0;
|
230 |
+
|
231 |
+
/// \brief Copy a Buffer to a destination MemoryManager
|
232 |
+
///
|
233 |
+
/// See also the Buffer::Copy shorthand.
|
234 |
+
static Result<std::shared_ptr<Buffer>> CopyBuffer(
|
235 |
+
const std::shared_ptr<Buffer>& source, const std::shared_ptr<MemoryManager>& to);
|
236 |
+
|
237 |
+
/// \brief Copy a non-owned Buffer to a destination MemoryManager
|
238 |
+
///
|
239 |
+
/// This is useful for cases where the source memory area is externally managed
|
240 |
+
/// (its lifetime not tied to the source Buffer), otherwise please use CopyBuffer().
|
241 |
+
static Result<std::unique_ptr<Buffer>> CopyNonOwned(
|
242 |
+
const Buffer& source, const std::shared_ptr<MemoryManager>& to);
|
243 |
+
|
244 |
+
/// \brief Make a no-copy Buffer view in a destination MemoryManager
|
245 |
+
///
|
246 |
+
/// See also the Buffer::View shorthand.
|
247 |
+
static Result<std::shared_ptr<Buffer>> ViewBuffer(
|
248 |
+
const std::shared_ptr<Buffer>& source, const std::shared_ptr<MemoryManager>& to);
|
249 |
+
|
250 |
+
/// \brief Create a new SyncEvent.
|
251 |
+
///
|
252 |
+
/// This version should construct the appropriate event for the device and
|
253 |
+
/// provide the unique_ptr with the correct deleter for the event type.
|
254 |
+
/// If the device does not require or work with any synchronization, it is
|
255 |
+
/// allowed for it to return a nullptr.
|
256 |
+
virtual Result<std::shared_ptr<Device::SyncEvent>> MakeDeviceSyncEvent();
|
257 |
+
|
258 |
+
/// \brief Wrap an event into a SyncEvent.
|
259 |
+
///
|
260 |
+
/// @param sync_event passed in sync_event (should be a pointer to the appropriate type)
|
261 |
+
/// @param release_sync_event destructor to free sync_event. `nullptr` may be
|
262 |
+
/// passed to indicate that no destruction/freeing is necessary
|
263 |
+
virtual Result<std::shared_ptr<Device::SyncEvent>> WrapDeviceSyncEvent(
|
264 |
+
void* sync_event, Device::SyncEvent::release_fn_t release_sync_event);
|
265 |
+
|
266 |
+
protected:
|
267 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(MemoryManager);
|
268 |
+
|
269 |
+
explicit MemoryManager(const std::shared_ptr<Device>& device) : device_(device) {}
|
270 |
+
|
271 |
+
// Default implementations always return nullptr, should be overridden
|
272 |
+
// by subclasses that support data transfer.
|
273 |
+
// (returning nullptr means unsupported copy / view)
|
274 |
+
// In CopyBufferFrom and ViewBufferFrom, the `from` parameter is guaranteed to
|
275 |
+
// be equal to `buf->memory_manager()`.
|
276 |
+
virtual Result<std::shared_ptr<Buffer>> CopyBufferFrom(
|
277 |
+
const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& from);
|
278 |
+
virtual Result<std::shared_ptr<Buffer>> CopyBufferTo(
|
279 |
+
const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& to);
|
280 |
+
virtual Result<std::unique_ptr<Buffer>> CopyNonOwnedFrom(
|
281 |
+
const Buffer& buf, const std::shared_ptr<MemoryManager>& from);
|
282 |
+
virtual Result<std::unique_ptr<Buffer>> CopyNonOwnedTo(
|
283 |
+
const Buffer& buf, const std::shared_ptr<MemoryManager>& to);
|
284 |
+
virtual Result<std::shared_ptr<Buffer>> ViewBufferFrom(
|
285 |
+
const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& from);
|
286 |
+
virtual Result<std::shared_ptr<Buffer>> ViewBufferTo(
|
287 |
+
const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& to);
|
288 |
+
|
289 |
+
std::shared_ptr<Device> device_;
|
290 |
+
};
|
291 |
+
|
292 |
+
// ----------------------------------------------------------------------
|
293 |
+
// CPU backend implementation
|
294 |
+
|
295 |
+
class ARROW_EXPORT CPUDevice : public Device {
|
296 |
+
public:
|
297 |
+
const char* type_name() const override;
|
298 |
+
std::string ToString() const override;
|
299 |
+
bool Equals(const Device&) const override;
|
300 |
+
DeviceAllocationType device_type() const override { return DeviceAllocationType::kCPU; }
|
301 |
+
|
302 |
+
std::shared_ptr<MemoryManager> default_memory_manager() override;
|
303 |
+
|
304 |
+
/// \brief Return the global CPUDevice instance
|
305 |
+
static std::shared_ptr<Device> Instance();
|
306 |
+
|
307 |
+
/// \brief Create a MemoryManager
|
308 |
+
///
|
309 |
+
/// The returned MemoryManager will use the given MemoryPool for allocations.
|
310 |
+
static std::shared_ptr<MemoryManager> memory_manager(MemoryPool* pool);
|
311 |
+
|
312 |
+
protected:
|
313 |
+
CPUDevice() : Device(true) {}
|
314 |
+
};
|
315 |
+
|
316 |
+
class ARROW_EXPORT CPUMemoryManager : public MemoryManager {
|
317 |
+
public:
|
318 |
+
Result<std::shared_ptr<io::RandomAccessFile>> GetBufferReader(
|
319 |
+
std::shared_ptr<Buffer> buf) override;
|
320 |
+
Result<std::shared_ptr<io::OutputStream>> GetBufferWriter(
|
321 |
+
std::shared_ptr<Buffer> buf) override;
|
322 |
+
|
323 |
+
Result<std::unique_ptr<Buffer>> AllocateBuffer(int64_t size) override;
|
324 |
+
|
325 |
+
/// \brief Return the MemoryPool associated with this MemoryManager.
|
326 |
+
MemoryPool* pool() const { return pool_; }
|
327 |
+
|
328 |
+
protected:
|
329 |
+
CPUMemoryManager(const std::shared_ptr<Device>& device, MemoryPool* pool)
|
330 |
+
: MemoryManager(device), pool_(pool) {}
|
331 |
+
|
332 |
+
static std::shared_ptr<MemoryManager> Make(const std::shared_ptr<Device>& device,
|
333 |
+
MemoryPool* pool = default_memory_pool());
|
334 |
+
|
335 |
+
Result<std::shared_ptr<Buffer>> CopyBufferFrom(
|
336 |
+
const std::shared_ptr<Buffer>& buf,
|
337 |
+
const std::shared_ptr<MemoryManager>& from) override;
|
338 |
+
Result<std::shared_ptr<Buffer>> CopyBufferTo(
|
339 |
+
const std::shared_ptr<Buffer>& buf,
|
340 |
+
const std::shared_ptr<MemoryManager>& to) override;
|
341 |
+
Result<std::unique_ptr<Buffer>> CopyNonOwnedFrom(
|
342 |
+
const Buffer& buf, const std::shared_ptr<MemoryManager>& from) override;
|
343 |
+
Result<std::unique_ptr<Buffer>> CopyNonOwnedTo(
|
344 |
+
const Buffer& buf, const std::shared_ptr<MemoryManager>& to) override;
|
345 |
+
Result<std::shared_ptr<Buffer>> ViewBufferFrom(
|
346 |
+
const std::shared_ptr<Buffer>& buf,
|
347 |
+
const std::shared_ptr<MemoryManager>& from) override;
|
348 |
+
Result<std::shared_ptr<Buffer>> ViewBufferTo(
|
349 |
+
const std::shared_ptr<Buffer>& buf,
|
350 |
+
const std::shared_ptr<MemoryManager>& to) override;
|
351 |
+
|
352 |
+
MemoryPool* pool_;
|
353 |
+
|
354 |
+
friend std::shared_ptr<MemoryManager> CPUDevice::memory_manager(MemoryPool* pool);
|
355 |
+
ARROW_FRIEND_EXPORT friend std::shared_ptr<MemoryManager> default_cpu_memory_manager();
|
356 |
+
};
|
357 |
+
|
358 |
+
/// \brief Return the default CPU MemoryManager instance
|
359 |
+
///
|
360 |
+
/// The returned singleton instance uses the default MemoryPool.
|
361 |
+
/// This function is a faster spelling of
|
362 |
+
/// `CPUDevice::Instance()->default_memory_manager()`.
|
363 |
+
ARROW_EXPORT
|
364 |
+
std::shared_ptr<MemoryManager> default_cpu_memory_manager();
|
365 |
+
|
366 |
+
using DeviceMapper =
|
367 |
+
std::function<Result<std::shared_ptr<MemoryManager>>(int64_t device_id)>;
|
368 |
+
|
369 |
+
/// \brief Register a function to retrieve a MemoryManager for a Device type
|
370 |
+
///
|
371 |
+
/// This registers the device type globally. A specific device type can only
|
372 |
+
/// be registered once. This method is thread-safe.
|
373 |
+
///
|
374 |
+
/// Currently, this registry is only used for importing data through the C Device
|
375 |
+
/// Data Interface (for the default Device to MemoryManager mapper in
|
376 |
+
/// arrow::ImportDeviceArray/ImportDeviceRecordBatch).
|
377 |
+
///
|
378 |
+
/// \param[in] device_type the device type for which to register a MemoryManager
|
379 |
+
/// \param[in] mapper function that takes a device id and returns the appropriate
|
380 |
+
/// MemoryManager for the registered device type and given device id
|
381 |
+
/// \return Status
|
382 |
+
ARROW_EXPORT
|
383 |
+
Status RegisterDeviceMapper(DeviceAllocationType device_type, DeviceMapper mapper);
|
384 |
+
|
385 |
+
/// \brief Get the registered function to retrieve a MemoryManager for the
|
386 |
+
/// given Device type
|
387 |
+
///
|
388 |
+
/// \param[in] device_type the device type
|
389 |
+
/// \return function that takes a device id and returns the appropriate
|
390 |
+
/// MemoryManager for the registered device type and given device id
|
391 |
+
ARROW_EXPORT
|
392 |
+
Result<DeviceMapper> GetDeviceMapper(DeviceAllocationType device_type);
|
393 |
+
|
394 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/extension_type.h
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
/// User-defined extension types.
|
19 |
+
/// \since 0.13.0
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <memory>
|
24 |
+
#include <string>
|
25 |
+
|
26 |
+
#include "arrow/array/array_base.h"
|
27 |
+
#include "arrow/array/data.h"
|
28 |
+
#include "arrow/result.h"
|
29 |
+
#include "arrow/status.h"
|
30 |
+
#include "arrow/type.h"
|
31 |
+
#include "arrow/type_fwd.h"
|
32 |
+
#include "arrow/util/checked_cast.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
|
38 |
+
/// \brief The base class for custom / user-defined types.
|
39 |
+
class ARROW_EXPORT ExtensionType : public DataType {
|
40 |
+
public:
|
41 |
+
static constexpr Type::type type_id = Type::EXTENSION;
|
42 |
+
|
43 |
+
static constexpr const char* type_name() { return "extension"; }
|
44 |
+
|
45 |
+
/// \brief The type of array used to represent this extension type's data
|
46 |
+
const std::shared_ptr<DataType>& storage_type() const { return storage_type_; }
|
47 |
+
|
48 |
+
/// \brief Return the type category of the storage type
|
49 |
+
Type::type storage_id() const override { return storage_type_->id(); }
|
50 |
+
|
51 |
+
DataTypeLayout layout() const override;
|
52 |
+
|
53 |
+
std::string ToString(bool show_metadata = false) const override;
|
54 |
+
|
55 |
+
std::string name() const override { return "extension"; }
|
56 |
+
|
57 |
+
/// \brief Unique name of extension type used to identify type for
|
58 |
+
/// serialization
|
59 |
+
/// \return the string name of the extension
|
60 |
+
virtual std::string extension_name() const = 0;
|
61 |
+
|
62 |
+
/// \brief Determine if two instances of the same extension types are
|
63 |
+
/// equal. Invoked from ExtensionType::Equals
|
64 |
+
/// \param[in] other the type to compare this type with
|
65 |
+
/// \return bool true if type instances are equal
|
66 |
+
virtual bool ExtensionEquals(const ExtensionType& other) const = 0;
|
67 |
+
|
68 |
+
/// \brief Wrap built-in Array type in a user-defined ExtensionArray instance
|
69 |
+
/// \param[in] data the physical storage for the extension type
|
70 |
+
virtual std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const = 0;
|
71 |
+
|
72 |
+
/// \brief Create an instance of the ExtensionType given the actual storage
|
73 |
+
/// type and the serialized representation
|
74 |
+
/// \param[in] storage_type the physical storage type of the extension
|
75 |
+
/// \param[in] serialized_data the serialized representation produced by
|
76 |
+
/// Serialize
|
77 |
+
virtual Result<std::shared_ptr<DataType>> Deserialize(
|
78 |
+
std::shared_ptr<DataType> storage_type,
|
79 |
+
const std::string& serialized_data) const = 0;
|
80 |
+
|
81 |
+
/// \brief Create a serialized representation of the extension type's
|
82 |
+
/// metadata. The storage type will be handled automatically in IPC code
|
83 |
+
/// paths
|
84 |
+
/// \return the serialized representation
|
85 |
+
virtual std::string Serialize() const = 0;
|
86 |
+
|
87 |
+
/// \brief Wrap the given storage array as an extension array
|
88 |
+
static std::shared_ptr<Array> WrapArray(const std::shared_ptr<DataType>& ext_type,
|
89 |
+
const std::shared_ptr<Array>& storage);
|
90 |
+
|
91 |
+
/// \brief Wrap the given chunked storage array as a chunked extension array
|
92 |
+
static std::shared_ptr<ChunkedArray> WrapArray(
|
93 |
+
const std::shared_ptr<DataType>& ext_type,
|
94 |
+
const std::shared_ptr<ChunkedArray>& storage);
|
95 |
+
|
96 |
+
protected:
|
97 |
+
explicit ExtensionType(std::shared_ptr<DataType> storage_type)
|
98 |
+
: DataType(Type::EXTENSION), storage_type_(storage_type) {}
|
99 |
+
|
100 |
+
std::shared_ptr<DataType> storage_type_;
|
101 |
+
};
|
102 |
+
|
103 |
+
/// \brief Base array class for user-defined extension types
|
104 |
+
class ARROW_EXPORT ExtensionArray : public Array {
|
105 |
+
public:
|
106 |
+
using TypeClass = ExtensionType;
|
107 |
+
/// \brief Construct an ExtensionArray from an ArrayData.
|
108 |
+
///
|
109 |
+
/// The ArrayData must have the right ExtensionType.
|
110 |
+
explicit ExtensionArray(const std::shared_ptr<ArrayData>& data);
|
111 |
+
|
112 |
+
/// \brief Construct an ExtensionArray from a type and the underlying storage.
|
113 |
+
ExtensionArray(const std::shared_ptr<DataType>& type,
|
114 |
+
const std::shared_ptr<Array>& storage);
|
115 |
+
|
116 |
+
const ExtensionType* extension_type() const {
|
117 |
+
return internal::checked_cast<const ExtensionType*>(data_->type.get());
|
118 |
+
}
|
119 |
+
|
120 |
+
/// \brief The physical storage for the extension array
|
121 |
+
const std::shared_ptr<Array>& storage() const { return storage_; }
|
122 |
+
|
123 |
+
protected:
|
124 |
+
void SetData(const std::shared_ptr<ArrayData>& data);
|
125 |
+
std::shared_ptr<Array> storage_;
|
126 |
+
};
|
127 |
+
|
128 |
+
class ARROW_EXPORT ExtensionTypeRegistry {
|
129 |
+
public:
|
130 |
+
/// \brief Provide access to the global registry to allow code to control for
|
131 |
+
/// race conditions in registry teardown when some types need to be
|
132 |
+
/// unregistered and destroyed first
|
133 |
+
static std::shared_ptr<ExtensionTypeRegistry> GetGlobalRegistry();
|
134 |
+
|
135 |
+
virtual ~ExtensionTypeRegistry() = default;
|
136 |
+
|
137 |
+
virtual Status RegisterType(std::shared_ptr<ExtensionType> type) = 0;
|
138 |
+
virtual Status UnregisterType(const std::string& type_name) = 0;
|
139 |
+
virtual std::shared_ptr<ExtensionType> GetType(const std::string& type_name) = 0;
|
140 |
+
};
|
141 |
+
|
142 |
+
/// \brief Register an extension type globally. The name returned by the type's
|
143 |
+
/// extension_name() method should be unique. This method is thread-safe
|
144 |
+
/// \param[in] type an instance of the extension type
|
145 |
+
/// \return Status
|
146 |
+
ARROW_EXPORT
|
147 |
+
Status RegisterExtensionType(std::shared_ptr<ExtensionType> type);
|
148 |
+
|
149 |
+
/// \brief Delete an extension type from the global registry. This method is
|
150 |
+
/// thread-safe
|
151 |
+
/// \param[in] type_name the unique name of a registered extension type
|
152 |
+
/// \return Status error if the type name is unknown
|
153 |
+
ARROW_EXPORT
|
154 |
+
Status UnregisterExtensionType(const std::string& type_name);
|
155 |
+
|
156 |
+
/// \brief Retrieve an extension type from the global registry. Returns nullptr
|
157 |
+
/// if not found. This method is thread-safe
|
158 |
+
/// \return the globally-registered extension type
|
159 |
+
ARROW_EXPORT
|
160 |
+
std::shared_ptr<ExtensionType> GetExtensionType(const std::string& type_name);
|
161 |
+
|
162 |
+
ARROW_EXPORT extern const char kExtensionTypeKeyName[];
|
163 |
+
ARROW_EXPORT extern const char kExtensionMetadataKeyName[];
|
164 |
+
|
165 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h
ADDED
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <functional>
|
23 |
+
#include <memory>
|
24 |
+
#include <string>
|
25 |
+
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/type_fwd.h"
|
29 |
+
#include "arrow/util/visibility.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
|
33 |
+
namespace internal {
|
34 |
+
|
35 |
+
///////////////////////////////////////////////////////////////////////
|
36 |
+
// Helper tracking memory statistics
|
37 |
+
|
38 |
+
/// \brief Memory pool statistics
|
39 |
+
///
|
40 |
+
/// 64-byte aligned so that all atomic values are on the same cache line.
|
41 |
+
class alignas(64) MemoryPoolStats {
|
42 |
+
private:
|
43 |
+
// All atomics are updated according to Acquire-Release ordering.
|
44 |
+
// https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
|
45 |
+
//
|
46 |
+
// max_memory_, total_allocated_bytes_, and num_allocs_ only go up (they are
|
47 |
+
// monotonically increasing) which can allow some optimizations.
|
48 |
+
std::atomic<int64_t> max_memory_{0};
|
49 |
+
std::atomic<int64_t> bytes_allocated_{0};
|
50 |
+
std::atomic<int64_t> total_allocated_bytes_{0};
|
51 |
+
std::atomic<int64_t> num_allocs_{0};
|
52 |
+
|
53 |
+
public:
|
54 |
+
int64_t max_memory() const { return max_memory_.load(std::memory_order_acquire); }
|
55 |
+
|
56 |
+
int64_t bytes_allocated() const {
|
57 |
+
return bytes_allocated_.load(std::memory_order_acquire);
|
58 |
+
}
|
59 |
+
|
60 |
+
int64_t total_bytes_allocated() const {
|
61 |
+
return total_allocated_bytes_.load(std::memory_order_acquire);
|
62 |
+
}
|
63 |
+
|
64 |
+
int64_t num_allocations() const { return num_allocs_.load(std::memory_order_acquire); }
|
65 |
+
|
66 |
+
inline void DidAllocateBytes(int64_t size) {
|
67 |
+
// Issue the load before everything else. max_memory_ is monotonically increasing,
|
68 |
+
// so we can use a relaxed load before the read-modify-write.
|
69 |
+
auto max_memory = max_memory_.load(std::memory_order_relaxed);
|
70 |
+
const auto old_bytes_allocated =
|
71 |
+
bytes_allocated_.fetch_add(size, std::memory_order_acq_rel);
|
72 |
+
// Issue store operations on values that we don't depend on to proceed
|
73 |
+
// with execution. When done, max_memory and old_bytes_allocated have
|
74 |
+
// a higher chance of being available on CPU registers. This also has the
|
75 |
+
// nice side-effect of putting 3 atomic stores close to each other in the
|
76 |
+
// instruction stream.
|
77 |
+
total_allocated_bytes_.fetch_add(size, std::memory_order_acq_rel);
|
78 |
+
num_allocs_.fetch_add(1, std::memory_order_acq_rel);
|
79 |
+
|
80 |
+
// If other threads are updating max_memory_ concurrently we leave the loop without
|
81 |
+
// updating knowing that it already reached a value even higher than ours.
|
82 |
+
const auto allocated = old_bytes_allocated + size;
|
83 |
+
while (max_memory < allocated && !max_memory_.compare_exchange_weak(
|
84 |
+
/*expected=*/max_memory, /*desired=*/allocated,
|
85 |
+
std::memory_order_acq_rel)) {
|
86 |
+
}
|
87 |
+
}
|
88 |
+
|
89 |
+
inline void DidReallocateBytes(int64_t old_size, int64_t new_size) {
|
90 |
+
if (new_size > old_size) {
|
91 |
+
DidAllocateBytes(new_size - old_size);
|
92 |
+
} else {
|
93 |
+
DidFreeBytes(old_size - new_size);
|
94 |
+
}
|
95 |
+
}
|
96 |
+
|
97 |
+
inline void DidFreeBytes(int64_t size) {
|
98 |
+
bytes_allocated_.fetch_sub(size, std::memory_order_acq_rel);
|
99 |
+
}
|
100 |
+
};
|
101 |
+
|
102 |
+
} // namespace internal
|
103 |
+
|
104 |
+
/// Base class for memory allocation on the CPU.
|
105 |
+
///
|
106 |
+
/// Besides tracking the number of allocated bytes, the allocator also should
|
107 |
+
/// take care of the required 64-byte alignment.
|
108 |
+
class ARROW_EXPORT MemoryPool {
|
109 |
+
public:
|
110 |
+
virtual ~MemoryPool() = default;
|
111 |
+
|
112 |
+
/// \brief EXPERIMENTAL. Create a new instance of the default MemoryPool
|
113 |
+
static std::unique_ptr<MemoryPool> CreateDefault();
|
114 |
+
|
115 |
+
/// Allocate a new memory region of at least size bytes.
|
116 |
+
///
|
117 |
+
/// The allocated region shall be 64-byte aligned.
|
118 |
+
Status Allocate(int64_t size, uint8_t** out) {
|
119 |
+
return Allocate(size, kDefaultBufferAlignment, out);
|
120 |
+
}
|
121 |
+
|
122 |
+
/// Allocate a new memory region of at least size bytes aligned to alignment.
|
123 |
+
virtual Status Allocate(int64_t size, int64_t alignment, uint8_t** out) = 0;
|
124 |
+
|
125 |
+
/// Resize an already allocated memory section.
|
126 |
+
///
|
127 |
+
/// As by default most default allocators on a platform don't support aligned
|
128 |
+
/// reallocation, this function can involve a copy of the underlying data.
|
129 |
+
virtual Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
|
130 |
+
uint8_t** ptr) = 0;
|
131 |
+
Status Reallocate(int64_t old_size, int64_t new_size, uint8_t** ptr) {
|
132 |
+
return Reallocate(old_size, new_size, kDefaultBufferAlignment, ptr);
|
133 |
+
}
|
134 |
+
|
135 |
+
/// Free an allocated region.
|
136 |
+
///
|
137 |
+
/// @param buffer Pointer to the start of the allocated memory region
|
138 |
+
/// @param size Allocated size located at buffer. An allocator implementation
|
139 |
+
/// may use this for tracking the amount of allocated bytes as well as for
|
140 |
+
/// faster deallocation if supported by its backend.
|
141 |
+
/// @param alignment The alignment of the allocation. Defaults to 64 bytes.
|
142 |
+
virtual void Free(uint8_t* buffer, int64_t size, int64_t alignment) = 0;
|
143 |
+
void Free(uint8_t* buffer, int64_t size) {
|
144 |
+
Free(buffer, size, kDefaultBufferAlignment);
|
145 |
+
}
|
146 |
+
|
147 |
+
/// Return unused memory to the OS
|
148 |
+
///
|
149 |
+
/// Only applies to allocators that hold onto unused memory. This will be
|
150 |
+
/// best effort, a memory pool may not implement this feature or may be
|
151 |
+
/// unable to fulfill the request due to fragmentation.
|
152 |
+
virtual void ReleaseUnused() {}
|
153 |
+
|
154 |
+
/// The number of bytes that were allocated and not yet free'd through
|
155 |
+
/// this allocator.
|
156 |
+
virtual int64_t bytes_allocated() const = 0;
|
157 |
+
|
158 |
+
/// Return peak memory allocation in this memory pool
|
159 |
+
///
|
160 |
+
/// \return Maximum bytes allocated. If not known (or not implemented),
|
161 |
+
/// returns -1
|
162 |
+
virtual int64_t max_memory() const;
|
163 |
+
|
164 |
+
/// The number of bytes that were allocated.
|
165 |
+
virtual int64_t total_bytes_allocated() const = 0;
|
166 |
+
|
167 |
+
/// The number of allocations or reallocations that were requested.
|
168 |
+
virtual int64_t num_allocations() const = 0;
|
169 |
+
|
170 |
+
/// The name of the backend used by this MemoryPool (e.g. "system" or "jemalloc").
|
171 |
+
virtual std::string backend_name() const = 0;
|
172 |
+
|
173 |
+
protected:
|
174 |
+
MemoryPool() = default;
|
175 |
+
};
|
176 |
+
|
177 |
+
class ARROW_EXPORT LoggingMemoryPool : public MemoryPool {
|
178 |
+
public:
|
179 |
+
explicit LoggingMemoryPool(MemoryPool* pool);
|
180 |
+
~LoggingMemoryPool() override = default;
|
181 |
+
|
182 |
+
using MemoryPool::Allocate;
|
183 |
+
using MemoryPool::Free;
|
184 |
+
using MemoryPool::Reallocate;
|
185 |
+
|
186 |
+
Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override;
|
187 |
+
Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
|
188 |
+
uint8_t** ptr) override;
|
189 |
+
void Free(uint8_t* buffer, int64_t size, int64_t alignment) override;
|
190 |
+
|
191 |
+
int64_t bytes_allocated() const override;
|
192 |
+
|
193 |
+
int64_t max_memory() const override;
|
194 |
+
|
195 |
+
int64_t total_bytes_allocated() const override;
|
196 |
+
|
197 |
+
int64_t num_allocations() const override;
|
198 |
+
|
199 |
+
std::string backend_name() const override;
|
200 |
+
|
201 |
+
private:
|
202 |
+
MemoryPool* pool_;
|
203 |
+
};
|
204 |
+
|
205 |
+
/// Derived class for memory allocation.
|
206 |
+
///
|
207 |
+
/// Tracks the number of bytes and maximum memory allocated through its direct
|
208 |
+
/// calls. Actual allocation is delegated to MemoryPool class.
|
209 |
+
class ARROW_EXPORT ProxyMemoryPool : public MemoryPool {
|
210 |
+
public:
|
211 |
+
explicit ProxyMemoryPool(MemoryPool* pool);
|
212 |
+
~ProxyMemoryPool() override;
|
213 |
+
|
214 |
+
using MemoryPool::Allocate;
|
215 |
+
using MemoryPool::Free;
|
216 |
+
using MemoryPool::Reallocate;
|
217 |
+
|
218 |
+
Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override;
|
219 |
+
Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
|
220 |
+
uint8_t** ptr) override;
|
221 |
+
void Free(uint8_t* buffer, int64_t size, int64_t alignment) override;
|
222 |
+
|
223 |
+
int64_t bytes_allocated() const override;
|
224 |
+
|
225 |
+
int64_t max_memory() const override;
|
226 |
+
|
227 |
+
int64_t total_bytes_allocated() const override;
|
228 |
+
|
229 |
+
int64_t num_allocations() const override;
|
230 |
+
|
231 |
+
std::string backend_name() const override;
|
232 |
+
|
233 |
+
private:
|
234 |
+
class ProxyMemoryPoolImpl;
|
235 |
+
std::unique_ptr<ProxyMemoryPoolImpl> impl_;
|
236 |
+
};
|
237 |
+
|
238 |
+
/// \brief Return a process-wide memory pool based on the system allocator.
|
239 |
+
ARROW_EXPORT MemoryPool* system_memory_pool();
|
240 |
+
|
241 |
+
/// \brief Return a process-wide memory pool based on jemalloc.
|
242 |
+
///
|
243 |
+
/// May return NotImplemented if jemalloc is not available.
|
244 |
+
ARROW_EXPORT Status jemalloc_memory_pool(MemoryPool** out);
|
245 |
+
|
246 |
+
/// \brief Set jemalloc memory page purging behavior for future-created arenas
|
247 |
+
/// to the indicated number of milliseconds. See dirty_decay_ms and
|
248 |
+
/// muzzy_decay_ms options in jemalloc for a description of what these do. The
|
249 |
+
/// default is configured to 1000 (1 second) which releases memory more
|
250 |
+
/// aggressively to the operating system than the jemalloc default of 10
|
251 |
+
/// seconds. If you set the value to 0, dirty / muzzy pages will be released
|
252 |
+
/// immediately rather than with a time decay, but this may reduce application
|
253 |
+
/// performance.
|
254 |
+
ARROW_EXPORT
|
255 |
+
Status jemalloc_set_decay_ms(int ms);
|
256 |
+
|
257 |
+
/// \brief Get basic statistics from jemalloc's mallctl.
|
258 |
+
/// See the MALLCTL NAMESPACE section in jemalloc project documentation for
|
259 |
+
/// available stats.
|
260 |
+
ARROW_EXPORT
|
261 |
+
Result<int64_t> jemalloc_get_stat(const char* name);
|
262 |
+
|
263 |
+
/// \brief Reset the counter for peak bytes allocated in the calling thread to zero.
|
264 |
+
/// This affects subsequent calls to thread.peak.read, but not the values returned by
|
265 |
+
/// thread.allocated or thread.deallocated.
|
266 |
+
ARROW_EXPORT
|
267 |
+
Status jemalloc_peak_reset();
|
268 |
+
|
269 |
+
/// \brief Print summary statistics in human-readable form to stderr.
|
270 |
+
/// See malloc_stats_print documentation in jemalloc project documentation for
|
271 |
+
/// available opt flags.
|
272 |
+
ARROW_EXPORT
|
273 |
+
Status jemalloc_stats_print(const char* opts = "");
|
274 |
+
|
275 |
+
/// \brief Print summary statistics in human-readable form using a callback
|
276 |
+
/// See malloc_stats_print documentation in jemalloc project documentation for
|
277 |
+
/// available opt flags.
|
278 |
+
ARROW_EXPORT
|
279 |
+
Status jemalloc_stats_print(std::function<void(const char*)> write_cb,
|
280 |
+
const char* opts = "");
|
281 |
+
|
282 |
+
/// \brief Get summary statistics in human-readable form.
|
283 |
+
/// See malloc_stats_print documentation in jemalloc project documentation for
|
284 |
+
/// available opt flags.
|
285 |
+
ARROW_EXPORT
|
286 |
+
Result<std::string> jemalloc_stats_string(const char* opts = "");
|
287 |
+
|
288 |
+
/// \brief Return a process-wide memory pool based on mimalloc.
|
289 |
+
///
|
290 |
+
/// May return NotImplemented if mimalloc is not available.
|
291 |
+
ARROW_EXPORT Status mimalloc_memory_pool(MemoryPool** out);
|
292 |
+
|
293 |
+
/// \brief Return the names of the backends supported by this Arrow build.
|
294 |
+
ARROW_EXPORT std::vector<std::string> SupportedMemoryBackendNames();
|
295 |
+
|
296 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool_test.h
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <algorithm>
|
21 |
+
#include <cstddef>
|
22 |
+
#include <cstdint>
|
23 |
+
#include <limits>
|
24 |
+
|
25 |
+
#include <gtest/gtest.h>
|
26 |
+
|
27 |
+
#include "arrow/memory_pool.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/testing/gtest_util.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
|
33 |
+
class TestMemoryPoolBase : public ::testing::Test {
|
34 |
+
public:
|
35 |
+
virtual ::arrow::MemoryPool* memory_pool() = 0;
|
36 |
+
|
37 |
+
void TestMemoryTracking() {
|
38 |
+
auto pool = memory_pool();
|
39 |
+
|
40 |
+
uint8_t* data;
|
41 |
+
const auto old_bytes_allocated = pool->bytes_allocated();
|
42 |
+
ASSERT_OK(pool->Allocate(100, &data));
|
43 |
+
EXPECT_EQ(static_cast<uint64_t>(0), reinterpret_cast<uint64_t>(data) % 64);
|
44 |
+
ASSERT_EQ(old_bytes_allocated + 100, pool->bytes_allocated());
|
45 |
+
|
46 |
+
uint8_t* data2;
|
47 |
+
ASSERT_OK(pool->Allocate(27, &data2));
|
48 |
+
EXPECT_EQ(static_cast<uint64_t>(0), reinterpret_cast<uint64_t>(data2) % 64);
|
49 |
+
ASSERT_EQ(old_bytes_allocated + 127, pool->bytes_allocated());
|
50 |
+
|
51 |
+
pool->Free(data, 100);
|
52 |
+
ASSERT_EQ(old_bytes_allocated + 27, pool->bytes_allocated());
|
53 |
+
pool->Free(data2, 27);
|
54 |
+
ASSERT_EQ(old_bytes_allocated, pool->bytes_allocated());
|
55 |
+
}
|
56 |
+
|
57 |
+
void TestOOM() {
|
58 |
+
auto pool = memory_pool();
|
59 |
+
|
60 |
+
uint8_t* data;
|
61 |
+
int64_t max_alloc = std::min<uint64_t>(std::numeric_limits<int64_t>::max(),
|
62 |
+
std::numeric_limits<size_t>::max());
|
63 |
+
// subtract 63 to prevent overflow after the size is aligned
|
64 |
+
for (int64_t to_alloc : {max_alloc, max_alloc - 63, max_alloc - 127}) {
|
65 |
+
ASSERT_RAISES(OutOfMemory, pool->Allocate(to_alloc, &data));
|
66 |
+
}
|
67 |
+
}
|
68 |
+
|
69 |
+
void TestReallocate() {
|
70 |
+
auto pool = memory_pool();
|
71 |
+
|
72 |
+
uint8_t* data;
|
73 |
+
ASSERT_OK(pool->Allocate(10, &data));
|
74 |
+
ASSERT_EQ(10, pool->bytes_allocated());
|
75 |
+
data[0] = 35;
|
76 |
+
data[9] = 12;
|
77 |
+
|
78 |
+
// Expand
|
79 |
+
ASSERT_OK(pool->Reallocate(10, 20, &data));
|
80 |
+
ASSERT_EQ(data[9], 12);
|
81 |
+
ASSERT_EQ(20, pool->bytes_allocated());
|
82 |
+
|
83 |
+
// Shrink
|
84 |
+
ASSERT_OK(pool->Reallocate(20, 5, &data));
|
85 |
+
ASSERT_EQ(data[0], 35);
|
86 |
+
ASSERT_EQ(5, pool->bytes_allocated());
|
87 |
+
|
88 |
+
// Free
|
89 |
+
pool->Free(data, 5);
|
90 |
+
ASSERT_EQ(0, pool->bytes_allocated());
|
91 |
+
}
|
92 |
+
|
93 |
+
void TestAlignment() {
|
94 |
+
auto pool = memory_pool();
|
95 |
+
{
|
96 |
+
uint8_t* data64;
|
97 |
+
ASSERT_OK(pool->Allocate(10, &data64));
|
98 |
+
ASSERT_EQ(reinterpret_cast<uintptr_t>(data64) % kDefaultBufferAlignment, 0);
|
99 |
+
pool->Free(data64, 10);
|
100 |
+
}
|
101 |
+
|
102 |
+
{
|
103 |
+
uint8_t* data512;
|
104 |
+
ASSERT_OK(pool->Allocate(10, 512, &data512));
|
105 |
+
ASSERT_EQ(reinterpret_cast<uintptr_t>(data512) % 512, 0);
|
106 |
+
pool->Free(data512, 10, 512);
|
107 |
+
}
|
108 |
+
}
|
109 |
+
};
|
110 |
+
|
111 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/pch.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Often-used headers, for precompiling.
|
19 |
+
// If updating this header, please make sure you check compilation speed
|
20 |
+
// before checking in. Adding headers which are not used extremely often
|
21 |
+
// may incur a slowdown, since it makes the precompiled header heavier to load.
|
22 |
+
|
23 |
+
#include "arrow/array.h"
|
24 |
+
#include "arrow/buffer.h"
|
25 |
+
#include "arrow/record_batch.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/table.h"
|
29 |
+
#include "arrow/type.h"
|
30 |
+
#include "arrow/type_traits.h"
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/pretty_print.h
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <iosfwd>
|
21 |
+
#include <string>
|
22 |
+
#include <utility>
|
23 |
+
|
24 |
+
#include "arrow/util/visibility.h"
|
25 |
+
|
26 |
+
namespace arrow {
|
27 |
+
|
28 |
+
class Array;
|
29 |
+
class ChunkedArray;
|
30 |
+
class RecordBatch;
|
31 |
+
class Schema;
|
32 |
+
class Status;
|
33 |
+
class Table;
|
34 |
+
|
35 |
+
/// \class PrettyPrintDelimiters
|
36 |
+
/// \brief Options for controlling which delimiters to use when printing
|
37 |
+
/// an Array or ChunkedArray.
|
38 |
+
struct ARROW_EXPORT PrettyPrintDelimiters {
|
39 |
+
/// Delimiter to use when opening an Array or ChunkedArray (e.g. "[")
|
40 |
+
std::string open = "[";
|
41 |
+
|
42 |
+
/// Delimiter to use when closing an Array or ChunkedArray (e.g. "]")
|
43 |
+
std::string close = "]";
|
44 |
+
|
45 |
+
/// Delimiter for separating individual elements of an Array (e.g. ","),
|
46 |
+
/// or individual chunks of a ChunkedArray
|
47 |
+
std::string element = ",";
|
48 |
+
|
49 |
+
/// Create a PrettyPrintDelimiters instance with default values
|
50 |
+
static PrettyPrintDelimiters Defaults() { return PrettyPrintDelimiters(); }
|
51 |
+
};
|
52 |
+
|
53 |
+
/// \class PrettyPrintOptions
|
54 |
+
/// \brief Options for controlling how various Arrow types should be printed.
|
55 |
+
struct ARROW_EXPORT PrettyPrintOptions {
|
56 |
+
PrettyPrintOptions() = default;
|
57 |
+
|
58 |
+
PrettyPrintOptions(int indent, // NOLINT runtime/explicit
|
59 |
+
int window = 10, int indent_size = 2, std::string null_rep = "null",
|
60 |
+
bool skip_new_lines = false, bool truncate_metadata = true,
|
61 |
+
int container_window = 2)
|
62 |
+
: indent(indent),
|
63 |
+
indent_size(indent_size),
|
64 |
+
window(window),
|
65 |
+
container_window(container_window),
|
66 |
+
null_rep(std::move(null_rep)),
|
67 |
+
skip_new_lines(skip_new_lines),
|
68 |
+
truncate_metadata(truncate_metadata) {}
|
69 |
+
|
70 |
+
/// Create a PrettyPrintOptions instance with default values
|
71 |
+
static PrettyPrintOptions Defaults() { return PrettyPrintOptions(); }
|
72 |
+
|
73 |
+
/// Number of spaces to shift entire formatted object to the right
|
74 |
+
int indent = 0;
|
75 |
+
|
76 |
+
/// Size of internal indents
|
77 |
+
int indent_size = 2;
|
78 |
+
|
79 |
+
/// Maximum number of elements to show at the beginning and at the end.
|
80 |
+
int window = 10;
|
81 |
+
|
82 |
+
/// Maximum number of elements to show at the beginning and at the end, for elements
|
83 |
+
/// that are containers (that is, list in ListArray and chunks in ChunkedArray)
|
84 |
+
int container_window = 2;
|
85 |
+
|
86 |
+
/// String to use for representing a null value, defaults to "null"
|
87 |
+
std::string null_rep = "null";
|
88 |
+
|
89 |
+
/// Skip new lines between elements, defaults to false
|
90 |
+
bool skip_new_lines = false;
|
91 |
+
|
92 |
+
/// Limit display of each KeyValueMetadata key/value pair to a single line at
|
93 |
+
/// 80 character width
|
94 |
+
bool truncate_metadata = true;
|
95 |
+
|
96 |
+
/// If true, display field metadata when pretty-printing a Schema
|
97 |
+
bool show_field_metadata = true;
|
98 |
+
|
99 |
+
/// If true, display schema metadata when pretty-printing a Schema
|
100 |
+
bool show_schema_metadata = true;
|
101 |
+
|
102 |
+
/// Delimiters to use when printing an Array
|
103 |
+
PrettyPrintDelimiters array_delimiters = PrettyPrintDelimiters::Defaults();
|
104 |
+
|
105 |
+
/// Delimiters to use when printing a ChunkedArray
|
106 |
+
PrettyPrintDelimiters chunked_array_delimiters = PrettyPrintDelimiters::Defaults();
|
107 |
+
};
|
108 |
+
|
109 |
+
/// \brief Print human-readable representation of RecordBatch
|
110 |
+
ARROW_EXPORT
|
111 |
+
Status PrettyPrint(const RecordBatch& batch, int indent, std::ostream* sink);
|
112 |
+
|
113 |
+
ARROW_EXPORT
|
114 |
+
Status PrettyPrint(const RecordBatch& batch, const PrettyPrintOptions& options,
|
115 |
+
std::ostream* sink);
|
116 |
+
|
117 |
+
/// \brief Print human-readable representation of Table
|
118 |
+
ARROW_EXPORT
|
119 |
+
Status PrettyPrint(const Table& table, const PrettyPrintOptions& options,
|
120 |
+
std::ostream* sink);
|
121 |
+
|
122 |
+
/// \brief Print human-readable representation of Array
|
123 |
+
ARROW_EXPORT
|
124 |
+
Status PrettyPrint(const Array& arr, int indent, std::ostream* sink);
|
125 |
+
|
126 |
+
/// \brief Print human-readable representation of Array
|
127 |
+
ARROW_EXPORT
|
128 |
+
Status PrettyPrint(const Array& arr, const PrettyPrintOptions& options,
|
129 |
+
std::ostream* sink);
|
130 |
+
|
131 |
+
/// \brief Print human-readable representation of Array
|
132 |
+
ARROW_EXPORT
|
133 |
+
Status PrettyPrint(const Array& arr, const PrettyPrintOptions& options,
|
134 |
+
std::string* result);
|
135 |
+
|
136 |
+
/// \brief Print human-readable representation of ChunkedArray
|
137 |
+
ARROW_EXPORT
|
138 |
+
Status PrettyPrint(const ChunkedArray& chunked_arr, const PrettyPrintOptions& options,
|
139 |
+
std::ostream* sink);
|
140 |
+
|
141 |
+
/// \brief Print human-readable representation of ChunkedArray
|
142 |
+
ARROW_EXPORT
|
143 |
+
Status PrettyPrint(const ChunkedArray& chunked_arr, const PrettyPrintOptions& options,
|
144 |
+
std::string* result);
|
145 |
+
|
146 |
+
ARROW_EXPORT
|
147 |
+
Status PrettyPrint(const Schema& schema, const PrettyPrintOptions& options,
|
148 |
+
std::ostream* sink);
|
149 |
+
|
150 |
+
ARROW_EXPORT
|
151 |
+
Status PrettyPrint(const Schema& schema, const PrettyPrintOptions& options,
|
152 |
+
std::string* result);
|
153 |
+
|
154 |
+
ARROW_EXPORT
|
155 |
+
Status DebugPrint(const Array& arr, int indent);
|
156 |
+
|
157 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h
ADDED
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/compare.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/type_fwd.h"
|
29 |
+
#include "arrow/util/iterator.h"
|
30 |
+
#include "arrow/util/macros.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
/// \class RecordBatch
|
36 |
+
/// \brief Collection of equal-length arrays matching a particular Schema
|
37 |
+
///
|
38 |
+
/// A record batch is table-like data structure that is semantically a sequence
|
39 |
+
/// of fields, each a contiguous Arrow array
|
40 |
+
class ARROW_EXPORT RecordBatch {
|
41 |
+
public:
|
42 |
+
virtual ~RecordBatch() = default;
|
43 |
+
|
44 |
+
/// \param[in] schema The record batch schema
|
45 |
+
/// \param[in] num_rows length of fields in the record batch. Each array
|
46 |
+
/// should have the same length as num_rows
|
47 |
+
/// \param[in] columns the record batch fields as vector of arrays
|
48 |
+
static std::shared_ptr<RecordBatch> Make(std::shared_ptr<Schema> schema,
|
49 |
+
int64_t num_rows,
|
50 |
+
std::vector<std::shared_ptr<Array>> columns);
|
51 |
+
|
52 |
+
/// \brief Construct record batch from vector of internal data structures
|
53 |
+
/// \since 0.5.0
|
54 |
+
///
|
55 |
+
/// This class is intended for internal use, or advanced users.
|
56 |
+
///
|
57 |
+
/// \param schema the record batch schema
|
58 |
+
/// \param num_rows the number of semantic rows in the record batch. This
|
59 |
+
/// should be equal to the length of each field
|
60 |
+
/// \param columns the data for the batch's columns
|
61 |
+
static std::shared_ptr<RecordBatch> Make(
|
62 |
+
std::shared_ptr<Schema> schema, int64_t num_rows,
|
63 |
+
std::vector<std::shared_ptr<ArrayData>> columns);
|
64 |
+
|
65 |
+
/// \brief Create an empty RecordBatch of a given schema
|
66 |
+
///
|
67 |
+
/// The output RecordBatch will be created with DataTypes from
|
68 |
+
/// the given schema.
|
69 |
+
///
|
70 |
+
/// \param[in] schema the schema of the empty RecordBatch
|
71 |
+
/// \param[in] pool the memory pool to allocate memory from
|
72 |
+
/// \return the resulting RecordBatch
|
73 |
+
static Result<std::shared_ptr<RecordBatch>> MakeEmpty(
|
74 |
+
std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool());
|
75 |
+
|
76 |
+
/// \brief Convert record batch to struct array
|
77 |
+
///
|
78 |
+
/// Create a struct array whose child arrays are the record batch's columns.
|
79 |
+
/// Note that the record batch's top-level field metadata cannot be reflected
|
80 |
+
/// in the resulting struct array.
|
81 |
+
Result<std::shared_ptr<StructArray>> ToStructArray() const;
|
82 |
+
|
83 |
+
/// \brief Convert record batch with one data type to Tensor
|
84 |
+
///
|
85 |
+
/// Create a Tensor object with shape (number of rows, number of columns) and
|
86 |
+
/// strides (type size in bytes, type size in bytes * number of rows).
|
87 |
+
/// Generated Tensor will have column-major layout.
|
88 |
+
///
|
89 |
+
/// \param[in] null_to_nan if true, convert nulls to NaN
|
90 |
+
/// \param[in] row_major if true, create row-major Tensor else column-major Tensor
|
91 |
+
/// \param[in] pool the memory pool to allocate the tensor buffer
|
92 |
+
/// \return the resulting Tensor
|
93 |
+
Result<std::shared_ptr<Tensor>> ToTensor(
|
94 |
+
bool null_to_nan = false, bool row_major = true,
|
95 |
+
MemoryPool* pool = default_memory_pool()) const;
|
96 |
+
|
97 |
+
/// \brief Construct record batch from struct array
|
98 |
+
///
|
99 |
+
/// This constructs a record batch using the child arrays of the given
|
100 |
+
/// array, which must be a struct array.
|
101 |
+
///
|
102 |
+
/// \param[in] array the source array, must be a StructArray
|
103 |
+
/// \param[in] pool the memory pool to allocate new validity bitmaps
|
104 |
+
///
|
105 |
+
/// This operation will usually be zero-copy. However, if the struct array has an
|
106 |
+
/// offset or a validity bitmap then these will need to be pushed into the child arrays.
|
107 |
+
/// Pushing the offset is zero-copy but pushing the validity bitmap is not.
|
108 |
+
static Result<std::shared_ptr<RecordBatch>> FromStructArray(
|
109 |
+
const std::shared_ptr<Array>& array, MemoryPool* pool = default_memory_pool());
|
110 |
+
|
111 |
+
/// \brief Determine if two record batches are exactly equal
|
112 |
+
///
|
113 |
+
/// \param[in] other the RecordBatch to compare with
|
114 |
+
/// \param[in] check_metadata if true, check that Schema metadata is the same
|
115 |
+
/// \param[in] opts the options for equality comparisons
|
116 |
+
/// \return true if batches are equal
|
117 |
+
bool Equals(const RecordBatch& other, bool check_metadata = false,
|
118 |
+
const EqualOptions& opts = EqualOptions::Defaults()) const;
|
119 |
+
|
120 |
+
/// \brief Determine if two record batches are approximately equal
|
121 |
+
///
|
122 |
+
/// \param[in] other the RecordBatch to compare with
|
123 |
+
/// \param[in] opts the options for equality comparisons
|
124 |
+
/// \return true if batches are approximately equal
|
125 |
+
bool ApproxEquals(const RecordBatch& other,
|
126 |
+
const EqualOptions& opts = EqualOptions::Defaults()) const;
|
127 |
+
|
128 |
+
/// \return the record batch's schema
|
129 |
+
const std::shared_ptr<Schema>& schema() const { return schema_; }
|
130 |
+
|
131 |
+
/// \brief Replace the schema with another schema with the same types, but potentially
|
132 |
+
/// different field names and/or metadata.
|
133 |
+
Result<std::shared_ptr<RecordBatch>> ReplaceSchema(
|
134 |
+
std::shared_ptr<Schema> schema) const;
|
135 |
+
|
136 |
+
/// \brief Retrieve all columns at once
|
137 |
+
virtual const std::vector<std::shared_ptr<Array>>& columns() const = 0;
|
138 |
+
|
139 |
+
/// \brief Retrieve an array from the record batch
|
140 |
+
/// \param[in] i field index, does not boundscheck
|
141 |
+
/// \return an Array object
|
142 |
+
virtual std::shared_ptr<Array> column(int i) const = 0;
|
143 |
+
|
144 |
+
/// \brief Retrieve an array from the record batch
|
145 |
+
/// \param[in] name field name
|
146 |
+
/// \return an Array or null if no field was found
|
147 |
+
std::shared_ptr<Array> GetColumnByName(const std::string& name) const;
|
148 |
+
|
149 |
+
/// \brief Retrieve an array's internal data from the record batch
|
150 |
+
/// \param[in] i field index, does not boundscheck
|
151 |
+
/// \return an internal ArrayData object
|
152 |
+
virtual std::shared_ptr<ArrayData> column_data(int i) const = 0;
|
153 |
+
|
154 |
+
/// \brief Retrieve all arrays' internal data from the record batch.
|
155 |
+
virtual const ArrayDataVector& column_data() const = 0;
|
156 |
+
|
157 |
+
/// \brief Add column to the record batch, producing a new RecordBatch
|
158 |
+
///
|
159 |
+
/// \param[in] i field index, which will be boundschecked
|
160 |
+
/// \param[in] field field to be added
|
161 |
+
/// \param[in] column column to be added
|
162 |
+
virtual Result<std::shared_ptr<RecordBatch>> AddColumn(
|
163 |
+
int i, const std::shared_ptr<Field>& field,
|
164 |
+
const std::shared_ptr<Array>& column) const = 0;
|
165 |
+
|
166 |
+
/// \brief Add new nullable column to the record batch, producing a new
|
167 |
+
/// RecordBatch.
|
168 |
+
///
|
169 |
+
/// For non-nullable columns, use the Field-based version of this method.
|
170 |
+
///
|
171 |
+
/// \param[in] i field index, which will be boundschecked
|
172 |
+
/// \param[in] field_name name of field to be added
|
173 |
+
/// \param[in] column column to be added
|
174 |
+
virtual Result<std::shared_ptr<RecordBatch>> AddColumn(
|
175 |
+
int i, std::string field_name, const std::shared_ptr<Array>& column) const;
|
176 |
+
|
177 |
+
/// \brief Replace a column in the record batch, producing a new RecordBatch
|
178 |
+
///
|
179 |
+
/// \param[in] i field index, does boundscheck
|
180 |
+
/// \param[in] field field to be replaced
|
181 |
+
/// \param[in] column column to be replaced
|
182 |
+
virtual Result<std::shared_ptr<RecordBatch>> SetColumn(
|
183 |
+
int i, const std::shared_ptr<Field>& field,
|
184 |
+
const std::shared_ptr<Array>& column) const = 0;
|
185 |
+
|
186 |
+
/// \brief Remove column from the record batch, producing a new RecordBatch
|
187 |
+
///
|
188 |
+
/// \param[in] i field index, does boundscheck
|
189 |
+
virtual Result<std::shared_ptr<RecordBatch>> RemoveColumn(int i) const = 0;
|
190 |
+
|
191 |
+
virtual std::shared_ptr<RecordBatch> ReplaceSchemaMetadata(
|
192 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata) const = 0;
|
193 |
+
|
194 |
+
/// \brief Name in i-th column
|
195 |
+
const std::string& column_name(int i) const;
|
196 |
+
|
197 |
+
/// \return the number of columns in the table
|
198 |
+
int num_columns() const;
|
199 |
+
|
200 |
+
/// \return the number of rows (the corresponding length of each column)
|
201 |
+
int64_t num_rows() const { return num_rows_; }
|
202 |
+
|
203 |
+
/// \brief Copy the entire RecordBatch to destination MemoryManager
|
204 |
+
///
|
205 |
+
/// This uses Array::CopyTo on each column of the record batch to create
|
206 |
+
/// a new record batch where all underlying buffers for the columns have
|
207 |
+
/// been copied to the destination MemoryManager. This uses
|
208 |
+
/// MemoryManager::CopyBuffer under the hood.
|
209 |
+
Result<std::shared_ptr<RecordBatch>> CopyTo(
|
210 |
+
const std::shared_ptr<MemoryManager>& to) const;
|
211 |
+
|
212 |
+
/// \brief View or Copy the entire RecordBatch to destination MemoryManager
|
213 |
+
///
|
214 |
+
/// This uses Array::ViewOrCopyTo on each column of the record batch to create
|
215 |
+
/// a new record batch where all underlying buffers for the columns have
|
216 |
+
/// been zero-copy viewed on the destination MemoryManager, falling back
|
217 |
+
/// to performing a copy if it can't be viewed as a zero-copy buffer. This uses
|
218 |
+
/// Buffer::ViewOrCopy under the hood.
|
219 |
+
Result<std::shared_ptr<RecordBatch>> ViewOrCopyTo(
|
220 |
+
const std::shared_ptr<MemoryManager>& to) const;
|
221 |
+
|
222 |
+
/// \brief Slice each of the arrays in the record batch
|
223 |
+
/// \param[in] offset the starting offset to slice, through end of batch
|
224 |
+
/// \return new record batch
|
225 |
+
virtual std::shared_ptr<RecordBatch> Slice(int64_t offset) const;
|
226 |
+
|
227 |
+
/// \brief Slice each of the arrays in the record batch
|
228 |
+
/// \param[in] offset the starting offset to slice
|
229 |
+
/// \param[in] length the number of elements to slice from offset
|
230 |
+
/// \return new record batch
|
231 |
+
virtual std::shared_ptr<RecordBatch> Slice(int64_t offset, int64_t length) const = 0;
|
232 |
+
|
233 |
+
/// \return PrettyPrint representation suitable for debugging
|
234 |
+
std::string ToString() const;
|
235 |
+
|
236 |
+
/// \brief Return names of all columns
|
237 |
+
std::vector<std::string> ColumnNames() const;
|
238 |
+
|
239 |
+
/// \brief Rename columns with provided names
|
240 |
+
Result<std::shared_ptr<RecordBatch>> RenameColumns(
|
241 |
+
const std::vector<std::string>& names) const;
|
242 |
+
|
243 |
+
/// \brief Return new record batch with specified columns
|
244 |
+
Result<std::shared_ptr<RecordBatch>> SelectColumns(
|
245 |
+
const std::vector<int>& indices) const;
|
246 |
+
|
247 |
+
/// \brief Perform cheap validation checks to determine obvious inconsistencies
|
248 |
+
/// within the record batch's schema and internal data.
|
249 |
+
///
|
250 |
+
/// This is O(k) where k is the total number of fields and array descendents.
|
251 |
+
///
|
252 |
+
/// \return Status
|
253 |
+
virtual Status Validate() const;
|
254 |
+
|
255 |
+
/// \brief Perform extensive validation checks to determine inconsistencies
|
256 |
+
/// within the record batch's schema and internal data.
|
257 |
+
///
|
258 |
+
/// This is potentially O(k*n) where n is the number of rows.
|
259 |
+
///
|
260 |
+
/// \return Status
|
261 |
+
virtual Status ValidateFull() const;
|
262 |
+
|
263 |
+
protected:
|
264 |
+
RecordBatch(const std::shared_ptr<Schema>& schema, int64_t num_rows);
|
265 |
+
|
266 |
+
std::shared_ptr<Schema> schema_;
|
267 |
+
int64_t num_rows_;
|
268 |
+
|
269 |
+
private:
|
270 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(RecordBatch);
|
271 |
+
};
|
272 |
+
|
273 |
+
struct ARROW_EXPORT RecordBatchWithMetadata {
|
274 |
+
std::shared_ptr<RecordBatch> batch;
|
275 |
+
std::shared_ptr<KeyValueMetadata> custom_metadata;
|
276 |
+
};
|
277 |
+
|
278 |
+
/// \brief Abstract interface for reading stream of record batches
|
279 |
+
class ARROW_EXPORT RecordBatchReader {
|
280 |
+
public:
|
281 |
+
using ValueType = std::shared_ptr<RecordBatch>;
|
282 |
+
|
283 |
+
virtual ~RecordBatchReader();
|
284 |
+
|
285 |
+
/// \return the shared schema of the record batches in the stream
|
286 |
+
virtual std::shared_ptr<Schema> schema() const = 0;
|
287 |
+
|
288 |
+
/// \brief Read the next record batch in the stream. Return null for batch
|
289 |
+
/// when reaching end of stream
|
290 |
+
///
|
291 |
+
/// \param[out] batch the next loaded batch, null at end of stream
|
292 |
+
/// \return Status
|
293 |
+
virtual Status ReadNext(std::shared_ptr<RecordBatch>* batch) = 0;
|
294 |
+
|
295 |
+
virtual Result<RecordBatchWithMetadata> ReadNext() {
|
296 |
+
return Status::NotImplemented("ReadNext with custom metadata");
|
297 |
+
}
|
298 |
+
|
299 |
+
/// \brief Iterator interface
|
300 |
+
Result<std::shared_ptr<RecordBatch>> Next() {
|
301 |
+
std::shared_ptr<RecordBatch> batch;
|
302 |
+
ARROW_RETURN_NOT_OK(ReadNext(&batch));
|
303 |
+
return batch;
|
304 |
+
}
|
305 |
+
|
306 |
+
/// \brief finalize reader
|
307 |
+
virtual Status Close() { return Status::OK(); }
|
308 |
+
|
309 |
+
class RecordBatchReaderIterator {
|
310 |
+
public:
|
311 |
+
using iterator_category = std::input_iterator_tag;
|
312 |
+
using difference_type = std::ptrdiff_t;
|
313 |
+
using value_type = std::shared_ptr<RecordBatch>;
|
314 |
+
using pointer = value_type const*;
|
315 |
+
using reference = value_type const&;
|
316 |
+
|
317 |
+
RecordBatchReaderIterator() : batch_(RecordBatchEnd()), reader_(NULLPTR) {}
|
318 |
+
|
319 |
+
explicit RecordBatchReaderIterator(RecordBatchReader* reader)
|
320 |
+
: batch_(RecordBatchEnd()), reader_(reader) {
|
321 |
+
Next();
|
322 |
+
}
|
323 |
+
|
324 |
+
bool operator==(const RecordBatchReaderIterator& other) const {
|
325 |
+
return batch_ == other.batch_;
|
326 |
+
}
|
327 |
+
|
328 |
+
bool operator!=(const RecordBatchReaderIterator& other) const {
|
329 |
+
return !(*this == other);
|
330 |
+
}
|
331 |
+
|
332 |
+
Result<std::shared_ptr<RecordBatch>> operator*() {
|
333 |
+
ARROW_RETURN_NOT_OK(batch_.status());
|
334 |
+
|
335 |
+
return batch_;
|
336 |
+
}
|
337 |
+
|
338 |
+
RecordBatchReaderIterator& operator++() {
|
339 |
+
Next();
|
340 |
+
return *this;
|
341 |
+
}
|
342 |
+
|
343 |
+
RecordBatchReaderIterator operator++(int) {
|
344 |
+
RecordBatchReaderIterator tmp(*this);
|
345 |
+
Next();
|
346 |
+
return tmp;
|
347 |
+
}
|
348 |
+
|
349 |
+
private:
|
350 |
+
std::shared_ptr<RecordBatch> RecordBatchEnd() {
|
351 |
+
return std::shared_ptr<RecordBatch>(NULLPTR);
|
352 |
+
}
|
353 |
+
|
354 |
+
void Next() {
|
355 |
+
if (reader_ == NULLPTR) {
|
356 |
+
batch_ = RecordBatchEnd();
|
357 |
+
return;
|
358 |
+
}
|
359 |
+
batch_ = reader_->Next();
|
360 |
+
}
|
361 |
+
|
362 |
+
Result<std::shared_ptr<RecordBatch>> batch_;
|
363 |
+
RecordBatchReader* reader_;
|
364 |
+
};
|
365 |
+
/// \brief Return an iterator to the first record batch in the stream
|
366 |
+
RecordBatchReaderIterator begin() { return RecordBatchReaderIterator(this); }
|
367 |
+
|
368 |
+
/// \brief Return an iterator to the end of the stream
|
369 |
+
RecordBatchReaderIterator end() { return RecordBatchReaderIterator(); }
|
370 |
+
|
371 |
+
/// \brief Consume entire stream as a vector of record batches
|
372 |
+
Result<RecordBatchVector> ToRecordBatches();
|
373 |
+
|
374 |
+
/// \brief Read all batches and concatenate as arrow::Table
|
375 |
+
Result<std::shared_ptr<Table>> ToTable();
|
376 |
+
|
377 |
+
/// \brief Create a RecordBatchReader from a vector of RecordBatch.
|
378 |
+
///
|
379 |
+
/// \param[in] batches the vector of RecordBatch to read from
|
380 |
+
/// \param[in] schema schema to conform to. Will be inferred from the first
|
381 |
+
/// element if not provided.
|
382 |
+
static Result<std::shared_ptr<RecordBatchReader>> Make(
|
383 |
+
RecordBatchVector batches, std::shared_ptr<Schema> schema = NULLPTR);
|
384 |
+
|
385 |
+
/// \brief Create a RecordBatchReader from an Iterator of RecordBatch.
|
386 |
+
///
|
387 |
+
/// \param[in] batches an iterator of RecordBatch to read from.
|
388 |
+
/// \param[in] schema schema that each record batch in iterator will conform to.
|
389 |
+
static Result<std::shared_ptr<RecordBatchReader>> MakeFromIterator(
|
390 |
+
Iterator<std::shared_ptr<RecordBatch>> batches, std::shared_ptr<Schema> schema);
|
391 |
+
};
|
392 |
+
|
393 |
+
/// \brief Concatenate record batches
|
394 |
+
///
|
395 |
+
/// The columns of the new batch are formed by concatenate the same columns of each input
|
396 |
+
/// batch. Concatenate multiple batches into a new batch requires that the schema must be
|
397 |
+
/// consistent. It supports merging batches without columns (only length, scenarios such
|
398 |
+
/// as count(*)).
|
399 |
+
///
|
400 |
+
/// \param[in] batches a vector of record batches to be concatenated
|
401 |
+
/// \param[in] pool memory to store the result will be allocated from this memory pool
|
402 |
+
/// \return the concatenated record batch
|
403 |
+
ARROW_EXPORT
|
404 |
+
Result<std::shared_ptr<RecordBatch>> ConcatenateRecordBatches(
|
405 |
+
const RecordBatchVector& batches, MemoryPool* pool = default_memory_pool());
|
406 |
+
|
407 |
+
} // namespace arrow
|