diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..8cc54b4c6bfdaa0e347b3927d7932934916a1ade --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.pxd @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from libcpp.memory cimport shared_ptr +from pyarrow.includes.libarrow cimport (CArray, CBuffer, CDataType, + CField, CRecordBatch, CSchema, + CTable, CTensor, CSparseCOOTensor, + CSparseCSRMatrix, CSparseCSCMatrix, + CSparseCSFTensor) + +cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py": + cdef int import_pyarrow() except -1 + cdef object wrap_buffer(const shared_ptr[CBuffer]& buffer) + cdef object wrap_data_type(const shared_ptr[CDataType]& type) + cdef object wrap_field(const shared_ptr[CField]& field) + cdef object wrap_schema(const shared_ptr[CSchema]& schema) + cdef object wrap_array(const shared_ptr[CArray]& sp_array) + cdef object wrap_tensor(const shared_ptr[CTensor]& sp_tensor) + cdef object wrap_sparse_tensor_coo( + const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csr( + const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csc( + const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csf( + const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor) + cdef object wrap_table(const shared_ptr[CTable]& ctable) + cdef object wrap_batch(const shared_ptr[CRecordBatch]& cbatch) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.py b/llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..936f4736977c85e977792d73c34af8ba97e6f7b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/__init__.py @@ -0,0 +1,429 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + +""" +PyArrow is the python implementation of Apache Arrow. + +Apache Arrow is a cross-language development platform for in-memory data. +It specifies a standardized language-independent columnar memory format for +flat and hierarchical data, organized for efficient analytic operations on +modern hardware. It also provides computational libraries and zero-copy +streaming messaging and interprocess communication. + +For more information see the official page at https://arrow.apache.org +""" + +import gc as _gc +import importlib as _importlib +import os as _os +import platform as _platform +import sys as _sys +import warnings as _warnings + +try: + from ._generated_version import version as __version__ +except ImportError: + # Package is not installed, parse git tag at runtime + try: + import setuptools_scm + # Code duplicated from setup.py to avoid a dependency on each other + + def parse_git(root, **kwargs): + """ + Parse function for setuptools_scm that ignores tags for non-C++ + subprojects, e.g. apache-arrow-js-XXX tags. + """ + from setuptools_scm.git import parse + kwargs['describe_command'] = \ + "git describe --dirty --tags --long --match 'apache-arrow-[0-9]*.*'" + return parse(root, **kwargs) + __version__ = setuptools_scm.get_version('../', + parse=parse_git) + except ImportError: + __version__ = None + +# ARROW-8684: Disable GC while initializing Cython extension module, +# to workaround Cython bug in https://github.com/cython/cython/issues/3603 +_gc_enabled = _gc.isenabled() +_gc.disable() +import pyarrow.lib as _lib +if _gc_enabled: + _gc.enable() + +from pyarrow.lib import (BuildInfo, RuntimeInfo, set_timezone_db_path, + MonthDayNano, VersionInfo, cpp_build_info, + cpp_version, cpp_version_info, runtime_info, + cpu_count, set_cpu_count, enable_signal_handlers, + io_thread_count, set_io_thread_count) + + +def show_versions(): + """ + Print various version information, to help with error reporting. + """ + def print_entry(label, value): + print(f"{label: <26}: {value: <8}") + + print("pyarrow version info\n--------------------") + print_entry("Package kind", cpp_build_info.package_kind + if len(cpp_build_info.package_kind) > 0 + else "not indicated") + print_entry("Arrow C++ library version", cpp_build_info.version) + print_entry("Arrow C++ compiler", + f"{cpp_build_info.compiler_id} {cpp_build_info.compiler_version}") + print_entry("Arrow C++ compiler flags", cpp_build_info.compiler_flags) + print_entry("Arrow C++ git revision", cpp_build_info.git_id) + print_entry("Arrow C++ git description", cpp_build_info.git_description) + print_entry("Arrow C++ build type", cpp_build_info.build_type) + + +def _module_is_available(module): + try: + _importlib.import_module(f'pyarrow.{module}') + except ImportError: + return False + else: + return True + + +def _filesystem_is_available(fs): + try: + import pyarrow.fs + except ImportError: + return False + + try: + getattr(pyarrow.fs, fs) + except (ImportError, AttributeError): + return False + else: + return True + + +def show_info(): + """ + Print detailed version and platform information, for error reporting + """ + show_versions() + + def print_entry(label, value): + print(f" {label: <20}: {value: <8}") + + print("\nPlatform:") + print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}") + print_entry("SIMD Level", runtime_info().simd_level) + print_entry("Detected SIMD Level", runtime_info().detected_simd_level) + + pool = default_memory_pool() + print("\nMemory:") + print_entry("Default backend", pool.backend_name) + print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes") + print_entry("Max memory", f"{pool.max_memory()} bytes") + print_entry("Supported Backends", ', '.join(supported_memory_backends())) + + print("\nOptional modules:") + modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json", + "orc", "parquet"] + for module in modules: + status = "Enabled" if _module_is_available(module) else "-" + print(f" {module: <20}: {status: <8}") + + print("\nFilesystems:") + filesystems = ["AzureFileSystem", "GcsFileSystem", + "HadoopFileSystem", "S3FileSystem"] + for fs in filesystems: + status = "Enabled" if _filesystem_is_available(fs) else "-" + print(f" {fs: <20}: {status: <8}") + + print("\nCompression Codecs:") + codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"] + for codec in codecs: + status = "Enabled" if Codec.is_available(codec) else "-" + print(f" {codec: <20}: {status: <8}") + + +from pyarrow.lib import (null, bool_, + int8, int16, int32, int64, + uint8, uint16, uint32, uint64, + time32, time64, timestamp, date32, date64, duration, + month_day_nano_interval, + float16, float32, float64, + binary, string, utf8, binary_view, string_view, + large_binary, large_string, large_utf8, + decimal128, decimal256, + list_, large_list, list_view, large_list_view, + map_, struct, + union, sparse_union, dense_union, + dictionary, + run_end_encoded, + fixed_shape_tensor, + field, + type_for_alias, + DataType, DictionaryType, StructType, + ListType, LargeListType, FixedSizeListType, + ListViewType, LargeListViewType, + MapType, UnionType, SparseUnionType, DenseUnionType, + TimestampType, Time32Type, Time64Type, DurationType, + FixedSizeBinaryType, Decimal128Type, Decimal256Type, + BaseExtensionType, ExtensionType, + RunEndEncodedType, FixedShapeTensorType, + PyExtensionType, UnknownExtensionType, + register_extension_type, unregister_extension_type, + DictionaryMemo, + KeyValueMetadata, + Field, + Schema, + schema, + unify_schemas, + Array, Tensor, + array, chunked_array, record_batch, nulls, repeat, + SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix, + SparseCSFTensor, + infer_type, from_numpy_dtype, + NullArray, + NumericArray, IntegerArray, FloatingPointArray, + BooleanArray, + Int8Array, UInt8Array, + Int16Array, UInt16Array, + Int32Array, UInt32Array, + Int64Array, UInt64Array, + HalfFloatArray, FloatArray, DoubleArray, + ListArray, LargeListArray, FixedSizeListArray, + ListViewArray, LargeListViewArray, + MapArray, UnionArray, + BinaryArray, StringArray, + LargeBinaryArray, LargeStringArray, + BinaryViewArray, StringViewArray, + FixedSizeBinaryArray, + DictionaryArray, + Date32Array, Date64Array, TimestampArray, + Time32Array, Time64Array, DurationArray, + MonthDayNanoIntervalArray, + Decimal128Array, Decimal256Array, StructArray, ExtensionArray, + RunEndEncodedArray, FixedShapeTensorArray, + scalar, NA, _NULL as NULL, Scalar, + NullScalar, BooleanScalar, + Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar, + UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar, + HalfFloatScalar, FloatScalar, DoubleScalar, + Decimal128Scalar, Decimal256Scalar, + ListScalar, LargeListScalar, FixedSizeListScalar, + ListViewScalar, LargeListViewScalar, + Date32Scalar, Date64Scalar, + Time32Scalar, Time64Scalar, + TimestampScalar, DurationScalar, + MonthDayNanoIntervalScalar, + BinaryScalar, LargeBinaryScalar, BinaryViewScalar, + StringScalar, LargeStringScalar, StringViewScalar, + FixedSizeBinaryScalar, DictionaryScalar, + MapScalar, StructScalar, UnionScalar, + RunEndEncodedScalar, ExtensionScalar) + +# Buffers, allocation +from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer, + Codec, compress, decompress, allocate_buffer) + +from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool, + total_allocated_bytes, set_memory_pool, + default_memory_pool, system_memory_pool, + jemalloc_memory_pool, mimalloc_memory_pool, + logging_memory_pool, proxy_memory_pool, + log_memory_allocations, jemalloc_set_decay_ms, + supported_memory_backends) + +# I/O +from pyarrow.lib import (NativeFile, PythonFile, + BufferedInputStream, BufferedOutputStream, CacheOptions, + CompressedInputStream, CompressedOutputStream, + TransformInputStream, transcoding_input_stream, + FixedSizeBufferWriter, + BufferReader, BufferOutputStream, + OSFile, MemoryMappedFile, memory_map, + create_memory_map, MockOutputStream, + input_stream, output_stream, + have_libhdfs) + +from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table, + concat_arrays, concat_tables, TableGroupBy, + RecordBatchReader) + +# Exceptions +from pyarrow.lib import (ArrowCancelled, + ArrowCapacityError, + ArrowException, + ArrowKeyError, + ArrowIndexError, + ArrowInvalid, + ArrowIOError, + ArrowMemoryError, + ArrowNotImplementedError, + ArrowTypeError, + ArrowSerializationError) + +from pyarrow.ipc import serialize_pandas, deserialize_pandas +import pyarrow.ipc as ipc + +import pyarrow.types as types + + +# ---------------------------------------------------------------------- +# Deprecations + +from pyarrow.util import _deprecate_api, _deprecate_class + + +# TODO: Deprecate these somehow in the pyarrow namespace +from pyarrow.ipc import (Message, MessageReader, MetadataVersion, + RecordBatchFileReader, RecordBatchFileWriter, + RecordBatchStreamReader, RecordBatchStreamWriter) + +# ---------------------------------------------------------------------- +# Returning absolute path to the pyarrow include directory (if bundled, e.g. in +# wheels) + + +def get_include(): + """ + Return absolute path to directory containing Arrow C++ include + headers. Similar to numpy.get_include + """ + return _os.path.join(_os.path.dirname(__file__), 'include') + + +def _get_pkg_config_executable(): + return _os.environ.get('PKG_CONFIG', 'pkg-config') + + +def _has_pkg_config(pkgname): + import subprocess + try: + return subprocess.call([_get_pkg_config_executable(), + '--exists', pkgname]) == 0 + except FileNotFoundError: + return False + + +def _read_pkg_config_variable(pkgname, cli_args): + import subprocess + cmd = [_get_pkg_config_executable(), pkgname] + cli_args + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = proc.communicate() + if proc.returncode != 0: + raise RuntimeError("pkg-config failed: " + err.decode('utf8')) + return out.rstrip().decode('utf8') + + +def get_libraries(): + """ + Return list of library names to include in the `libraries` argument for C + or Cython extensions using pyarrow + """ + return ['arrow_python', 'arrow'] + + +def create_library_symlinks(): + """ + With Linux and macOS wheels, the bundled shared libraries have an embedded + ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them + with -larrow won't work unless we create symlinks at locations like + site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses + prior problems we had with shipping two copies of the shared libraries to + permit third party projects like turbodbc to build their C++ extensions + against the pyarrow wheels. + + This function must only be invoked once and only when the shared libraries + are bundled with the Python package, which should only apply to wheel-based + installs. It requires write access to the site-packages/pyarrow directory + and so depending on your system may need to be run with root. + """ + import glob + if _sys.platform == 'win32': + return + package_cwd = _os.path.dirname(__file__) + + if _sys.platform == 'linux': + bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*')) + + def get_symlink_path(hard_path): + return hard_path.rsplit('.', 1)[0] + else: + bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib')) + + def get_symlink_path(hard_path): + return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib')) + + for lib_hard_path in bundled_libs: + symlink_path = get_symlink_path(lib_hard_path) + if _os.path.exists(symlink_path): + continue + try: + _os.symlink(lib_hard_path, symlink_path) + except PermissionError: + print("Tried creating symlink {}. If you need to link to " + "bundled shared libraries, run " + "pyarrow.create_library_symlinks() as root") + + +def get_library_dirs(): + """ + Return lists of directories likely to contain Arrow C++ libraries for + linking C or Cython extensions using pyarrow + """ + package_cwd = _os.path.dirname(__file__) + library_dirs = [package_cwd] + + def append_library_dir(library_dir): + if library_dir not in library_dirs: + library_dirs.append(library_dir) + + # Search library paths via pkg-config. This is necessary if the user + # installed libarrow and the other shared libraries manually and they + # are not shipped inside the pyarrow package (see also ARROW-2976). + pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config' + for pkgname in ["arrow", "arrow_python"]: + if _has_pkg_config(pkgname): + library_dir = _read_pkg_config_variable(pkgname, + ["--libs-only-L"]) + # pkg-config output could be empty if Arrow is installed + # as a system package. + if library_dir: + if not library_dir.startswith("-L"): + raise ValueError( + "pkg-config --libs-only-L returned unexpected " + "value {!r}".format(library_dir)) + append_library_dir(library_dir[2:]) + + if _sys.platform == 'win32': + # TODO(wesm): Is this necessary, or does setuptools within a conda + # installation add Library\lib to the linker path for MSVC? + python_base_install = _os.path.dirname(_sys.executable) + library_dir = _os.path.join(python_base_install, 'Library', 'lib') + + if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')): + append_library_dir(library_dir) + + # ARROW-4074: Allow for ARROW_HOME to be set to some other directory + if _os.environ.get('ARROW_HOME'): + append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib')) + else: + # Python wheels bundle the Arrow libraries in the pyarrow directory. + append_library_dir(_os.path.dirname(_os.path.abspath(__file__))) + + return library_dirs diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4553aee9d6f16c391340aa45489471bdcfe0cb76 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pxd @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * + + +cdef class ExecNodeOptions(_Weakrefable): + cdef: + shared_ptr[CExecNodeOptions] wrapped + + cdef void init(self, const shared_ptr[CExecNodeOptions]& sp) + cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil + + +cdef class Declaration(_Weakrefable): + + cdef: + CDeclaration decl + + cdef void init(self, const CDeclaration& c_decl) + + @staticmethod + cdef wrap(const CDeclaration& c_decl) + + cdef inline CDeclaration unwrap(self) nogil diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pyx new file mode 100644 index 0000000000000000000000000000000000000000..9e8cbd65be224bb255448b580b44f0575942fc1e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_acero.pyx @@ -0,0 +1,608 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Low-level Acero bindings + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * +from pyarrow.lib cimport (Table, pyarrow_unwrap_table, pyarrow_wrap_table, + RecordBatchReader) +from pyarrow.lib import frombytes, tobytes +from pyarrow._compute cimport ( + Expression, FunctionOptions, _ensure_field_ref, _true, + unwrap_null_placement, unwrap_sort_order +) + + +cdef class ExecNodeOptions(_Weakrefable): + """ + Base class for the node options. + + Use one of the subclasses to construct an options object. + """ + __slots__ = () # avoid mistakingly creating attributes + + cdef void init(self, const shared_ptr[CExecNodeOptions]& sp): + self.wrapped = sp + + cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil: + return self.wrapped + + +cdef class _TableSourceNodeOptions(ExecNodeOptions): + + def _set_options(self, Table table): + cdef: + shared_ptr[CTable] c_table + + c_table = pyarrow_unwrap_table(table) + self.wrapped.reset( + new CTableSourceNodeOptions(c_table) + ) + + +class TableSourceNodeOptions(_TableSourceNodeOptions): + """ + A Source node which accepts a table. + + This is the option class for the "table_source" node factory. + + Parameters + ---------- + table : pyarrow.Table + The table which acts as the data source. + """ + + def __init__(self, Table table): + self._set_options(table) + + +cdef class _FilterNodeOptions(ExecNodeOptions): + + def _set_options(self, Expression filter_expression not None): + self.wrapped.reset( + new CFilterNodeOptions(filter_expression.unwrap()) + ) + + +class FilterNodeOptions(_FilterNodeOptions): + """ + Make a node which excludes some rows from batches passed through it. + + This is the option class for the "filter" node factory. + + The "filter" operation provides an option to define data filtering + criteria. It selects rows where the given expression evaluates to true. + Filters can be written using pyarrow.compute.Expression, and the + expression must have a return type of boolean. + + Parameters + ---------- + filter_expression : pyarrow.compute.Expression + """ + + def __init__(self, Expression filter_expression): + self._set_options(filter_expression) + + +cdef class _ProjectNodeOptions(ExecNodeOptions): + + def _set_options(self, expressions, names=None): + cdef: + Expression expr + vector[CExpression] c_expressions + vector[c_string] c_names + + for expr in expressions: + c_expressions.push_back(expr.unwrap()) + + if names is not None: + if len(names) != len(expressions): + raise ValueError( + "The number of names should be equal to the number of expressions" + ) + + for name in names: + c_names.push_back(tobytes(name)) + + self.wrapped.reset( + new CProjectNodeOptions(c_expressions, c_names) + ) + else: + self.wrapped.reset( + new CProjectNodeOptions(c_expressions) + ) + + +class ProjectNodeOptions(_ProjectNodeOptions): + """ + Make a node which executes expressions on input batches, + producing batches of the same length with new columns. + + This is the option class for the "project" node factory. + + The "project" operation rearranges, deletes, transforms, and + creates columns. Each output column is computed by evaluating + an expression against the source record batch. These must be + scalar expressions (expressions consisting of scalar literals, + field references and scalar functions, i.e. elementwise functions + that return one value for each input row independent of the value + of all other rows). + + Parameters + ---------- + expressions : list of pyarrow.compute.Expression + List of expressions to evaluate against the source batch. This must + be scalar expressions. + names : list of str, optional + List of names for each of the output columns (same length as + `expressions`). If `names` is not provided, the string + representations of exprs will be used. + """ + + def __init__(self, expressions, names=None): + self._set_options(expressions, names) + + +cdef class _AggregateNodeOptions(ExecNodeOptions): + + def _set_options(self, aggregates, keys=None): + cdef: + CAggregate c_aggr + vector[CAggregate] c_aggregations + vector[CFieldRef] c_keys + + for arg_names, func_name, opts, name in aggregates: + c_aggr.function = tobytes(func_name) + if opts is not None: + c_aggr.options = (opts).wrapped + else: + c_aggr.options = nullptr + if not isinstance(arg_names, (list, tuple)): + arg_names = [arg_names] + for arg in arg_names: + c_aggr.target.push_back(_ensure_field_ref(arg)) + c_aggr.name = tobytes(name) + + c_aggregations.push_back(move(c_aggr)) + + if keys is None: + keys = [] + for name in keys: + c_keys.push_back(_ensure_field_ref(name)) + + self.wrapped.reset( + new CAggregateNodeOptions(c_aggregations, c_keys) + ) + + +class AggregateNodeOptions(_AggregateNodeOptions): + """ + Make a node which aggregates input batches, optionally grouped by keys. + + This is the option class for the "aggregate" node factory. + + Acero supports two types of aggregates: "scalar" aggregates, + and "hash" aggregates. Scalar aggregates reduce an array or scalar + input to a single scalar output (e.g. computing the mean of a column). + Hash aggregates act like GROUP BY in SQL and first partition data + based on one or more key columns, then reduce the data in each partition. + The aggregate node supports both types of computation, and can compute + any number of aggregations at once. + + Parameters + ---------- + aggregates : list of tuples + Aggregations which will be applied to the targeted fields. + Specified as a list of tuples, where each tuple is one aggregation + specification and consists of: aggregation target column(s) followed + by function name, aggregation function options object and the + output field name. + The target column(s) specification can be a single field reference, + an empty list or a list of fields unary, nullary and n-ary aggregation + functions respectively. Each field reference can be a string + column name or expression. + keys : list of field references, optional + Keys by which aggregations will be grouped. Each key can reference + a field using a string name or expression. + """ + + def __init__(self, aggregates, keys=None): + self._set_options(aggregates, keys) + + +cdef class _OrderByNodeOptions(ExecNodeOptions): + + def _set_options(self, sort_keys, null_placement): + cdef: + vector[CSortKey] c_sort_keys + + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + + self.wrapped.reset( + new COrderByNodeOptions( + COrdering(c_sort_keys, unwrap_null_placement(null_placement)) + ) + ) + + +class OrderByNodeOptions(_OrderByNodeOptions): + """ + Make a node which applies a new ordering to the data. + + Currently this node works by accumulating all data, sorting, and then + emitting the new data with an updated batch index. + Larger-than-memory sort is not currently supported. + + This is the option class for the "order_by" node factory. + + Parameters + ---------- + sort_keys : sequence of (name, order) tuples + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + Each field reference can be a string column name or expression. + null_placement : str, default "at_end" + Where nulls in input should be sorted, only applying to + columns/fields mentioned in `sort_keys`. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, sort_keys=(), *, null_placement="at_end"): + self._set_options(sort_keys, null_placement) + + +cdef class _HashJoinNodeOptions(ExecNodeOptions): + + def _set_options( + self, join_type, left_keys, right_keys, left_output=None, right_output=None, + output_suffix_for_left="", output_suffix_for_right="", + ): + cdef: + CJoinType c_join_type + vector[CFieldRef] c_left_keys + vector[CFieldRef] c_right_keys + vector[CFieldRef] c_left_output + vector[CFieldRef] c_right_output + + # join type + if join_type == "left semi": + c_join_type = CJoinType_LEFT_SEMI + elif join_type == "right semi": + c_join_type = CJoinType_RIGHT_SEMI + elif join_type == "left anti": + c_join_type = CJoinType_LEFT_ANTI + elif join_type == "right anti": + c_join_type = CJoinType_RIGHT_ANTI + elif join_type == "inner": + c_join_type = CJoinType_INNER + elif join_type == "left outer": + c_join_type = CJoinType_LEFT_OUTER + elif join_type == "right outer": + c_join_type = CJoinType_RIGHT_OUTER + elif join_type == "full outer": + c_join_type = CJoinType_FULL_OUTER + else: + raise ValueError("Unsupported join type") + + # left/right keys + if not isinstance(left_keys, (list, tuple)): + left_keys = [left_keys] + for key in left_keys: + c_left_keys.push_back(_ensure_field_ref(key)) + if not isinstance(right_keys, (list, tuple)): + right_keys = [right_keys] + for key in right_keys: + c_right_keys.push_back(_ensure_field_ref(key)) + + # left/right output fields + if left_output is not None and right_output is not None: + for colname in left_output: + c_left_output.push_back(_ensure_field_ref(colname)) + for colname in right_output: + c_right_output.push_back(_ensure_field_ref(colname)) + + self.wrapped.reset( + new CHashJoinNodeOptions( + c_join_type, c_left_keys, c_right_keys, + c_left_output, c_right_output, + _true, + tobytes(output_suffix_for_left), + tobytes(output_suffix_for_right) + ) + ) + else: + self.wrapped.reset( + new CHashJoinNodeOptions( + c_join_type, c_left_keys, c_right_keys, + _true, + tobytes(output_suffix_for_left), + tobytes(output_suffix_for_right) + ) + ) + + +class HashJoinNodeOptions(_HashJoinNodeOptions): + """ + Make a node which implements join operation using hash join strategy. + + This is the option class for the "hashjoin" node factory. + + Parameters + ---------- + join_type : str + Type of join. One of "left semi", "right semi", "left anti", + "right anti", "inner", "left outer", "right outer", "full outer". + left_keys : str, Expression or list + Key fields from left input. Each key can be a string column name + or a field expression, or a list of such field references. + right_keys : str, Expression or list + Key fields from right input. See `left_keys` for details. + left_output : list, optional + List of output fields passed from left input. If left and right + output fields are not specified, all valid fields from both left and + right input will be output. Each field can be a string column name + or a field expression. + right_output : list, optional + List of output fields passed from right input. If left and right + output fields are not specified, all valid fields from both left and + right input will be output. Each field can be a string column name + or a field expression. + output_suffix_for_left : str + Suffix added to names of output fields coming from left input + (used to distinguish, if necessary, between fields of the same + name in left and right input and can be left empty if there are + no name collisions). + output_suffix_for_right : str + Suffix added to names of output fields coming from right input, + see `output_suffix_for_left` for details. + """ + + def __init__( + self, join_type, left_keys, right_keys, left_output=None, right_output=None, + output_suffix_for_left="", output_suffix_for_right="" + ): + self._set_options( + join_type, left_keys, right_keys, left_output, right_output, + output_suffix_for_left, output_suffix_for_right + ) + + +cdef class _AsofJoinNodeOptions(ExecNodeOptions): + + def _set_options(self, left_on, left_by, right_on, right_by, tolerance): + cdef: + vector[CFieldRef] c_left_by + vector[CFieldRef] c_right_by + CAsofJoinKeys c_left_keys + CAsofJoinKeys c_right_keys + vector[CAsofJoinKeys] c_input_keys + + # Prepare left AsofJoinNodeOption::Keys + if not isinstance(left_by, (list, tuple)): + left_by = [left_by] + for key in left_by: + c_left_by.push_back(_ensure_field_ref(key)) + + c_left_keys.on_key = _ensure_field_ref(left_on) + c_left_keys.by_key = c_left_by + + c_input_keys.push_back(c_left_keys) + + # Prepare right AsofJoinNodeOption::Keys + if not isinstance(right_by, (list, tuple)): + right_by = [right_by] + for key in right_by: + c_right_by.push_back(_ensure_field_ref(key)) + + c_right_keys.on_key = _ensure_field_ref(right_on) + c_right_keys.by_key = c_right_by + + c_input_keys.push_back(c_right_keys) + + self.wrapped.reset( + new CAsofJoinNodeOptions( + c_input_keys, + tolerance, + ) + ) + + +class AsofJoinNodeOptions(_AsofJoinNodeOptions): + """ + Make a node which implements 'as of join' operation. + + This is the option class for the "asofjoin" node factory. + + Parameters + ---------- + left_on : str, Expression + The left key on which the join operation should be performed. + Can be a string column name or a field expression. + + An inexact match is used on the "on" key, i.e. a row is considered a + match if and only if left_on - tolerance <= right_on <= left_on. + + The input dataset must be sorted by the "on" key. Must be a single + field of a common type. + + Currently, the "on" key must be an integer, date, or timestamp type. + left_by: str, Expression or list + The left keys on which the join operation should be performed. + Exact equality is used for each field of the "by" keys. + Each key can be a string column name or a field expression, + or a list of such field references. + right_on : str, Expression + The right key on which the join operation should be performed. + See `left_on` for details. + right_by: str, Expression or list + The right keys on which the join operation should be performed. + See `left_by` for details. + tolerance : int + The tolerance to use for the asof join. The tolerance is interpreted in + the same units as the "on" key. + """ + + def __init__(self, left_on, left_by, right_on, right_by, tolerance): + self._set_options(left_on, left_by, right_on, right_by, tolerance) + + +cdef class Declaration(_Weakrefable): + """ + Helper class for declaring the nodes of an ExecPlan. + + A Declaration represents an unconstructed ExecNode, and potentially + more since its inputs may also be Declarations or when constructed + with ``from_sequence``. + + The possible ExecNodes to use are registered with a name, + the "factory name", and need to be specified using this name, together + with its corresponding ExecNodeOptions subclass. + + Parameters + ---------- + factory_name : str + The ExecNode factory name, such as "table_source", "filter", + "project" etc. See the ExecNodeOptions subclasses for the exact + factory names to use. + options : ExecNodeOptions + Corresponding ExecNodeOptions subclass (matching the factory name). + inputs : list of Declaration, optional + Input nodes for this declaration. Optional if the node is a source + node, or when the declaration gets combined later with + ``from_sequence``. + + Returns + ------- + Declaration + """ + cdef void init(self, const CDeclaration& c_decl): + self.decl = c_decl + + @staticmethod + cdef wrap(const CDeclaration& c_decl): + cdef Declaration self = Declaration.__new__(Declaration) + self.init(c_decl) + return self + + cdef inline CDeclaration unwrap(self) nogil: + return self.decl + + def __init__(self, factory_name, ExecNodeOptions options, inputs=None): + cdef: + c_string c_factory_name + CDeclaration c_decl + vector[CDeclaration.Input] c_inputs + + c_factory_name = tobytes(factory_name) + + if inputs is not None: + for ipt in inputs: + c_inputs.push_back( + CDeclaration.Input((ipt).unwrap()) + ) + + c_decl = CDeclaration(c_factory_name, c_inputs, options.unwrap()) + self.init(c_decl) + + @staticmethod + def from_sequence(decls): + """ + Convenience factory for the common case of a simple sequence of nodes. + + Each of the declarations will be appended to the inputs of the + subsequent declaration, and the final modified declaration will + be returned. + + Parameters + ---------- + decls : list of Declaration + + Returns + ------- + Declaration + """ + cdef: + vector[CDeclaration] c_decls + CDeclaration c_decl + + for decl in decls: + c_decls.push_back(( decl).unwrap()) + + c_decl = CDeclaration.Sequence(c_decls) + return Declaration.wrap(c_decl) + + def __str__(self): + return frombytes(GetResultValue(DeclarationToString(self.decl))) + + def __repr__(self): + return "\n{0}".format(str(self)) + + def to_table(self, bint use_threads=True): + """ + Run the declaration and collect the results into a table. + + This method will implicitly add a sink node to the declaration + to collect results into a table. It will then create an ExecPlan + from the declaration, start the exec plan, block until the plan + has finished, and return the created table. + + Parameters + ---------- + use_threads : bool, default True + If set to False, then all CPU work will be done on the calling + thread. I/O tasks will still happen on the I/O executor + and may be multi-threaded (but should not use significant CPU + resources). + + Returns + ------- + pyarrow.Table + """ + cdef: + shared_ptr[CTable] c_table + + with nogil: + c_table = GetResultValue(DeclarationToTable(self.unwrap(), use_threads)) + return pyarrow_wrap_table(c_table) + + def to_reader(self, bint use_threads=True): + """Run the declaration and return results as a RecordBatchReader. + + For details about the parameters, see `to_table`. + + Returns + ------- + pyarrow.RecordBatchReader + """ + cdef: + RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader.reset( + GetResultValue(DeclarationToReader(self.unwrap(), use_threads)).release() + ) + return reader diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dcc562a41c795896d12fc7cdd3baebf0122bedc9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pxd @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable + + +cdef class ConvertOptions(_Weakrefable): + cdef: + unique_ptr[CCSVConvertOptions] options + + @staticmethod + cdef ConvertOptions wrap(CCSVConvertOptions options) + + +cdef class ParseOptions(_Weakrefable): + cdef: + unique_ptr[CCSVParseOptions] options + object _invalid_row_handler + + @staticmethod + cdef ParseOptions wrap(CCSVParseOptions options) + + +cdef class ReadOptions(_Weakrefable): + cdef: + unique_ptr[CCSVReadOptions] options + public object encoding + + @staticmethod + cdef ReadOptions wrap(CCSVReadOptions options) + + +cdef class WriteOptions(_Weakrefable): + cdef: + unique_ptr[CCSVWriteOptions] options + + @staticmethod + cdef WriteOptions wrap(CCSVWriteOptions options) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pyx new file mode 100644 index 0000000000000000000000000000000000000000..508488c0c3b3c3bcd2d2157f57f625b1e5b92c2e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_csv.pyx @@ -0,0 +1,1542 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from cython.operator cimport dereference as deref + +from collections import namedtuple +from collections.abc import Mapping + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema, + RecordBatchReader, ensure_type, + maybe_unbox_memory_pool, get_input_stream, + get_writer, native_transcoding_input_stream, + pyarrow_unwrap_batch, pyarrow_unwrap_schema, + pyarrow_unwrap_table, pyarrow_wrap_schema, + pyarrow_wrap_table, pyarrow_wrap_data_type, + pyarrow_unwrap_data_type, Table, RecordBatch, + StopToken, _CRecordBatchWriter) +from pyarrow.lib import frombytes, tobytes, SignalStopHandler + + +cdef unsigned char _single_char(s) except 0: + val = ord(s) + if val == 0 or val > 127: + raise ValueError("Expecting an ASCII character") + return val + + +_InvalidRow = namedtuple( + "_InvalidRow", ("expected_columns", "actual_columns", "number", "text"), + module=__name__) + + +class InvalidRow(_InvalidRow): + """ + Description of an invalid row in a CSV file. + + Parameters + ---------- + expected_columns : int + The expected number of columns in the row. + actual_columns : int + The actual number of columns in the row. + number : int or None + The physical row number if known, otherwise None. + text : str + The contents of the row. + """ + __slots__ = () + + +cdef CInvalidRowResult _handle_invalid_row( + handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error: + # A negative row number means undetermined (because of parallel reading) + row_number = c_row.number if c_row.number >= 0 else None + row = InvalidRow(c_row.expected_columns, c_row.actual_columns, + row_number, frombytes( c_row.text)) + result = handler(row) + if result == 'error': + return CInvalidRowResult_Error + elif result == 'skip': + return CInvalidRowResult_Skip + else: + raise ValueError("Invalid return value for invalid row handler: " + f"expected 'error' or 'skip', got {result!r}") + + +cdef class ReadOptions(_Weakrefable): + """ + Options for reading CSV files. + + Parameters + ---------- + use_threads : bool, optional (default True) + Whether to use multiple threads to accelerate reading + block_size : int, optional + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + Minimum valid value for block size is 1 + skip_rows : int, optional (default 0) + The number of rows to skip before the column names (if any) + and the CSV data. + skip_rows_after_names : int, optional (default 0) + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + column_names : list, optional + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + autogenerate_column_names : bool, optional (default False) + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + encoding : str, optional (default 'utf8') + The character encoding of the CSV data. Columns that cannot + decode using this encoding can still be read as Binary. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04" + >>> print(s) + 1,2,3 + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + + Ignore the first numbered row and substitute it with defined + or autogenerated column names: + + >>> from pyarrow import csv + >>> read_options = csv.ReadOptions( + ... column_names=["animals", "n_legs", "entry"], + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + >>> read_options = csv.ReadOptions(autogenerate_column_names=True, + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + f0: string + f1: int64 + f2: date32[day] + ---- + f0: [["Flamingo","Horse","Brittle stars","Centipede"]] + f1: [[2,4,5,100]] + f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + Remove the first 2 rows of the data: + + >>> read_options = csv.ReadOptions(skip_rows_after_names=2) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + 1: string + 2: int64 + 3: date32[day] + ---- + 1: [["Brittle stars","Centipede"]] + 2: [[5,100]] + 3: [[2022-03-03,2022-03-04]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + # __init__() is not called when unpickling, initialize storage here + def __cinit__(self, *argw, **kwargs): + self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults())) + + def __init__(self, *, use_threads=None, block_size=None, skip_rows=None, + skip_rows_after_names=None, column_names=None, + autogenerate_column_names=None, encoding='utf8'): + if use_threads is not None: + self.use_threads = use_threads + if block_size is not None: + self.block_size = block_size + if skip_rows is not None: + self.skip_rows = skip_rows + if skip_rows_after_names is not None: + self.skip_rows_after_names = skip_rows_after_names + if column_names is not None: + self.column_names = column_names + if autogenerate_column_names is not None: + self.autogenerate_column_names= autogenerate_column_names + # Python-specific option + self.encoding = encoding + + @property + def use_threads(self): + """ + Whether to use multiple threads to accelerate reading. + """ + return deref(self.options).use_threads + + @use_threads.setter + def use_threads(self, value): + deref(self.options).use_threads = value + + @property + def block_size(self): + """ + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + """ + return deref(self.options).block_size + + @block_size.setter + def block_size(self, value): + deref(self.options).block_size = value + + @property + def skip_rows(self): + """ + The number of rows to skip before the column names (if any) + and the CSV data. + See `skip_rows_after_names` for interaction description + """ + return deref(self.options).skip_rows + + @skip_rows.setter + def skip_rows(self, value): + deref(self.options).skip_rows = value + + @property + def skip_rows_after_names(self): + """ + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + """ + return deref(self.options).skip_rows_after_names + + @skip_rows_after_names.setter + def skip_rows_after_names(self, value): + deref(self.options).skip_rows_after_names = value + + @property + def column_names(self): + """ + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + """ + return [frombytes(s) for s in deref(self.options).column_names] + + @column_names.setter + def column_names(self, value): + deref(self.options).column_names.clear() + for item in value: + deref(self.options).column_names.push_back(tobytes(item)) + + @property + def autogenerate_column_names(self): + """ + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + """ + return deref(self.options).autogenerate_column_names + + @autogenerate_column_names.setter + def autogenerate_column_names(self, value): + deref(self.options).autogenerate_column_names = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ReadOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ReadOptions + + Returns + ------- + bool + """ + return ( + self.use_threads == other.use_threads and + self.block_size == other.block_size and + self.skip_rows == other.skip_rows and + self.skip_rows_after_names == other.skip_rows_after_names and + self.column_names == other.column_names and + self.autogenerate_column_names == + other.autogenerate_column_names and + self.encoding == other.encoding + ) + + @staticmethod + cdef ReadOptions wrap(CCSVReadOptions options): + out = ReadOptions() + out.options.reset(new CCSVReadOptions(move(options))) + out.encoding = 'utf8' # No way to know this + return out + + def __getstate__(self): + return (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) + + def __setstate__(self, state): + (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class ParseOptions(_Weakrefable): + """ + Options for parsing CSV files. + + Parameters + ---------- + delimiter : 1-character string, optional (default ',') + The character delimiting individual cells in the CSV data. + quote_char : 1-character string or False, optional (default '"') + The character used optionally for quoting CSV values + (False if quoting is not allowed). + double_quote : bool, optional (default True) + Whether two quotes in a quoted CSV value denote a single quote + in the data. + escape_char : 1-character string or False, optional (default False) + The character used optionally for escaping special characters + (False if escaping is not allowed). + newlines_in_values : bool, optional (default False) + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + ignore_empty_lines : bool, optional (default True) + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + invalid_row_handler : callable, optional (default None) + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals;n_legs;entry\\n" + ... "Flamingo;2;2022-03-01\\n" + ... "# Comment here:\\n" + ... "Horse;4;2022-03-02\\n" + ... "Brittle stars;5;2022-03-03\\n" + ... "Centipede;100;2022-03-04" + ... ) + >>> print(s) + animals;n_legs;entry + Flamingo;2;2022-03-01 + # Comment here: + Horse;4;2022-03-02 + Brittle stars;5;2022-03-03 + Centipede;100;2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Read the data from a file skipping rows with comments + and defining the delimiter: + + >>> from pyarrow import csv + >>> def skip_comment(row): + ... if row.text.startswith("# "): + ... return 'skip' + ... else: + ... return 'error' + ... + >>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment) + >>> csv.read_csv(source, parse_options=parse_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self._invalid_row_handler = None + self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults())) + + def __init__(self, *, delimiter=None, quote_char=None, double_quote=None, + escape_char=None, newlines_in_values=None, + ignore_empty_lines=None, invalid_row_handler=None): + if delimiter is not None: + self.delimiter = delimiter + if quote_char is not None: + self.quote_char = quote_char + if double_quote is not None: + self.double_quote = double_quote + if escape_char is not None: + self.escape_char = escape_char + if newlines_in_values is not None: + self.newlines_in_values = newlines_in_values + if ignore_empty_lines is not None: + self.ignore_empty_lines = ignore_empty_lines + if invalid_row_handler is not None: + self.invalid_row_handler = invalid_row_handler + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quote_char(self): + """ + The character used optionally for quoting CSV values + (False if quoting is not allowed). + """ + if deref(self.options).quoting: + return chr(deref(self.options).quote_char) + else: + return False + + @quote_char.setter + def quote_char(self, value): + if value is False: + deref(self.options).quoting = False + else: + deref(self.options).quote_char = _single_char(value) + deref(self.options).quoting = True + + @property + def double_quote(self): + """ + Whether two quotes in a quoted CSV value denote a single quote + in the data. + """ + return deref(self.options).double_quote + + @double_quote.setter + def double_quote(self, value): + deref(self.options).double_quote = value + + @property + def escape_char(self): + """ + The character used optionally for escaping special characters + (False if escaping is not allowed). + """ + if deref(self.options).escaping: + return chr(deref(self.options).escape_char) + else: + return False + + @escape_char.setter + def escape_char(self, value): + if value is False: + deref(self.options).escaping = False + else: + deref(self.options).escape_char = _single_char(value) + deref(self.options).escaping = True + + @property + def newlines_in_values(self): + """ + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + """ + return deref(self.options).newlines_in_values + + @newlines_in_values.setter + def newlines_in_values(self, value): + deref(self.options).newlines_in_values = value + + @property + def ignore_empty_lines(self): + """ + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + """ + return deref(self.options).ignore_empty_lines + + @property + def invalid_row_handler(self): + """ + Optional handler for invalid rows. + + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + """ + return self._invalid_row_handler + + @invalid_row_handler.setter + def invalid_row_handler(self, value): + if value is not None and not callable(value): + raise TypeError("Expected callable or None, " + f"got instance of {type(value)!r}") + self._invalid_row_handler = value + deref(self.options).invalid_row_handler = MakeInvalidRowHandler( + &_handle_invalid_row, value) + + @ignore_empty_lines.setter + def ignore_empty_lines(self, value): + deref(self.options).ignore_empty_lines = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ParseOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ParseOptions + + Returns + ------- + bool + """ + return ( + self.delimiter == other.delimiter and + self.quote_char == other.quote_char and + self.double_quote == other.double_quote and + self.escape_char == other.escape_char and + self.newlines_in_values == other.newlines_in_values and + self.ignore_empty_lines == other.ignore_empty_lines and + self._invalid_row_handler == other._invalid_row_handler + ) + + @staticmethod + cdef ParseOptions wrap(CCSVParseOptions options): + out = ParseOptions() + out.options.reset(new CCSVParseOptions(move(options))) + return out + + def __getstate__(self): + return (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) + + def __setstate__(self, state): + (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class _ISO8601(_Weakrefable): + """ + A special object indicating ISO-8601 parsing. + """ + __slots__ = () + + def __str__(self): + return 'ISO8601' + + def __eq__(self, other): + return isinstance(other, _ISO8601) + + +ISO8601 = _ISO8601() + + +cdef class ConvertOptions(_Weakrefable): + """ + Options for converting CSV data. + + Parameters + ---------- + check_utf8 : bool, optional (default True) + Whether to check UTF8 validity of string columns. + column_types : pyarrow.Schema or dict, optional + Explicitly map column names to column types. Passing this argument + disables type inference on the defined columns. + null_values : list, optional + A sequence of strings that denote nulls in the data + (defaults are appropriate in most cases). Note that by default, + string columns are not checked for null values. To enable + null checking for those, specify ``strings_can_be_null=True``. + true_values : list, optional + A sequence of strings that denote true booleans in the data + (defaults are appropriate in most cases). + false_values : list, optional + A sequence of strings that denote false booleans in the data + (defaults are appropriate in most cases). + decimal_point : 1-character string, optional (default '.') + The character used as decimal point in floating-point and decimal + data. + strings_can_be_null : bool, optional (default False) + Whether string / binary columns can have null values. + If true, then strings in null_values are considered null for + string columns. + If false, then all strings are valid string values. + quoted_strings_can_be_null : bool, optional (default True) + Whether quoted values can be null. + If true, then strings in "null_values" are also considered null + when they appear quoted in the CSV file. Otherwise, quoted values + are never considered null. + include_columns : list, optional + The names of columns to include in the Table. + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + include_missing_columns : bool, optional (default False) + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a column of nulls (whose type is selected using + `column_types`, or null by default). + This option is ignored if `include_columns` is empty. + auto_dict_encode : bool, optional (default False) + Whether to try to automatically dict-encode string / binary data. + If true, then when type inference detects a string or binary column, + it it dict-encoded up to `auto_dict_max_cardinality` distinct values + (per chunk), after which it switches to regular encoding. + This setting is ignored for non-inferred columns (those in + `column_types`). + auto_dict_max_cardinality : int, optional + The maximum dictionary cardinality for `auto_dict_encode`. + This value is per chunk. + timestamp_parsers : list, optional + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry,fast\\n" + ... "Flamingo,2,01/03/2022,Yes\\n" + ... "Horse,4,02/03/2022,Yes\\n" + ... "Brittle stars,5,03/03/2022,No\\n" + ... "Centipede,100,04/03/2022,No\\n" + ... ",6,05/03/2022," + ... ) + >>> print(s) + animals,n_legs,entry,fast + Flamingo,2,01/03/2022,Yes + Horse,4,02/03/2022,Yes + Brittle stars,5,03/03/2022,No + Centipede,100,04/03/2022,No + ,6,05/03/2022, + + Change the type of a column: + + >>> import pyarrow as pa + >>> from pyarrow import csv + >>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()}) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: double + entry: string + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]] + fast: [["Yes","Yes","No","No",""]] + + Define a date parsing format to get a timestamp type column + (in case dates are not in ISO format and not converted by default): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: timestamp[s] + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [["Yes","Yes","No","No",""]] + + Specify a subset of columns to be read: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + + List additional column to be included as a null typed column: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs", "location"], + ... include_missing_columns=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + location: null + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + location: [5 nulls] + + Define columns as dictionary type (by default only the + string/binary columns are dictionary encoded): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"], + ... auto_dict_encode=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: dictionary + n_legs: int64 + entry: timestamp[s] + fast: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Horse","Brittle stars","Centipede",""] -- indices: + [0,1,2,3,4]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [ -- dictionary: + ["Yes","No",""] -- indices: + [0,0,1,1,2]] + + Set upper limit for the number of categories. If the categories + is more than the limit, the conversion to dictionary will not + happen: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals"], + ... auto_dict_encode=True, + ... auto_dict_max_cardinality=2) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + + Set empty strings to missing values: + + >>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"], + ... strings_can_be_null=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",null]] + n_legs: [[2,4,5,100,6]] + + Define values to be True and False when converting a column + into a bool type: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["fast"], + ... false_values=["No"], + ... true_values=["Yes"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + fast: bool + ---- + fast: [[true,true,false,false,null]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self.options.reset( + new CCSVConvertOptions(CCSVConvertOptions.Defaults())) + + def __init__(self, *, check_utf8=None, column_types=None, null_values=None, + true_values=None, false_values=None, decimal_point=None, + strings_can_be_null=None, quoted_strings_can_be_null=None, + include_columns=None, include_missing_columns=None, + auto_dict_encode=None, auto_dict_max_cardinality=None, + timestamp_parsers=None): + if check_utf8 is not None: + self.check_utf8 = check_utf8 + if column_types is not None: + self.column_types = column_types + if null_values is not None: + self.null_values = null_values + if true_values is not None: + self.true_values = true_values + if false_values is not None: + self.false_values = false_values + if decimal_point is not None: + self.decimal_point = decimal_point + if strings_can_be_null is not None: + self.strings_can_be_null = strings_can_be_null + if quoted_strings_can_be_null is not None: + self.quoted_strings_can_be_null = quoted_strings_can_be_null + if include_columns is not None: + self.include_columns = include_columns + if include_missing_columns is not None: + self.include_missing_columns = include_missing_columns + if auto_dict_encode is not None: + self.auto_dict_encode = auto_dict_encode + if auto_dict_max_cardinality is not None: + self.auto_dict_max_cardinality = auto_dict_max_cardinality + if timestamp_parsers is not None: + self.timestamp_parsers = timestamp_parsers + + @property + def check_utf8(self): + """ + Whether to check UTF8 validity of string columns. + """ + return deref(self.options).check_utf8 + + @check_utf8.setter + def check_utf8(self, value): + deref(self.options).check_utf8 = value + + @property + def strings_can_be_null(self): + """ + Whether string / binary columns can have null values. + """ + return deref(self.options).strings_can_be_null + + @strings_can_be_null.setter + def strings_can_be_null(self, value): + deref(self.options).strings_can_be_null = value + + @property + def quoted_strings_can_be_null(self): + """ + Whether quoted values can be null. + """ + return deref(self.options).quoted_strings_can_be_null + + @quoted_strings_can_be_null.setter + def quoted_strings_can_be_null(self, value): + deref(self.options).quoted_strings_can_be_null = value + + @property + def column_types(self): + """ + Explicitly map column names to column types. + """ + d = {frombytes(item.first): pyarrow_wrap_data_type(item.second) + for item in deref(self.options).column_types} + return d + + @column_types.setter + def column_types(self, value): + cdef: + shared_ptr[CDataType] typ + + if isinstance(value, Mapping): + value = value.items() + + deref(self.options).column_types.clear() + for item in value: + if isinstance(item, Field): + k = item.name + v = item.type + else: + k, v = item + typ = pyarrow_unwrap_data_type(ensure_type(v)) + assert typ != NULL + deref(self.options).column_types[tobytes(k)] = typ + + @property + def null_values(self): + """ + A sequence of strings that denote nulls in the data. + """ + return [frombytes(x) for x in deref(self.options).null_values] + + @null_values.setter + def null_values(self, value): + deref(self.options).null_values = [tobytes(x) for x in value] + + @property + def true_values(self): + """ + A sequence of strings that denote true booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).true_values] + + @true_values.setter + def true_values(self, value): + deref(self.options).true_values = [tobytes(x) for x in value] + + @property + def false_values(self): + """ + A sequence of strings that denote false booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).false_values] + + @false_values.setter + def false_values(self, value): + deref(self.options).false_values = [tobytes(x) for x in value] + + @property + def decimal_point(self): + """ + The character used as decimal point in floating-point and decimal + data. + """ + return chr(deref(self.options).decimal_point) + + @decimal_point.setter + def decimal_point(self, value): + deref(self.options).decimal_point = _single_char(value) + + @property + def auto_dict_encode(self): + """ + Whether to try to automatically dict-encode string / binary data. + """ + return deref(self.options).auto_dict_encode + + @auto_dict_encode.setter + def auto_dict_encode(self, value): + deref(self.options).auto_dict_encode = value + + @property + def auto_dict_max_cardinality(self): + """ + The maximum dictionary cardinality for `auto_dict_encode`. + + This value is per chunk. + """ + return deref(self.options).auto_dict_max_cardinality + + @auto_dict_max_cardinality.setter + def auto_dict_max_cardinality(self, value): + deref(self.options).auto_dict_max_cardinality = value + + @property + def include_columns(self): + """ + The names of columns to include in the Table. + + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + """ + return [frombytes(s) for s in deref(self.options).include_columns] + + @include_columns.setter + def include_columns(self, value): + deref(self.options).include_columns.clear() + for item in value: + deref(self.options).include_columns.push_back(tobytes(item)) + + @property + def include_missing_columns(self): + """ + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a null column (whose type is selected using `column_types`, + or null by default). + This option is ignored if `include_columns` is empty. + """ + return deref(self.options).include_missing_columns + + @include_missing_columns.setter + def include_missing_columns(self, value): + deref(self.options).include_missing_columns = value + + @property + def timestamp_parsers(self): + """ + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + """ + cdef: + shared_ptr[CTimestampParser] c_parser + c_string kind + + parsers = [] + for c_parser in deref(self.options).timestamp_parsers: + kind = deref(c_parser).kind() + if kind == b'strptime': + parsers.append(frombytes(deref(c_parser).format())) + else: + assert kind == b'iso8601' + parsers.append(ISO8601) + + return parsers + + @timestamp_parsers.setter + def timestamp_parsers(self, value): + cdef: + vector[shared_ptr[CTimestampParser]] c_parsers + + for v in value: + if isinstance(v, str): + c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v))) + elif v == ISO8601: + c_parsers.push_back(CTimestampParser.MakeISO8601()) + else: + raise TypeError("Expected list of str or ISO8601 objects") + + deref(self.options).timestamp_parsers = move(c_parsers) + + @staticmethod + cdef ConvertOptions wrap(CCSVConvertOptions options): + out = ConvertOptions() + out.options.reset(new CCSVConvertOptions(move(options))) + return out + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ConvertOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ConvertOptions + + Returns + ------- + bool + """ + return ( + self.check_utf8 == other.check_utf8 and + self.column_types == other.column_types and + self.null_values == other.null_values and + self.true_values == other.true_values and + self.false_values == other.false_values and + self.decimal_point == other.decimal_point and + self.timestamp_parsers == other.timestamp_parsers and + self.strings_can_be_null == other.strings_can_be_null and + self.quoted_strings_can_be_null == + other.quoted_strings_can_be_null and + self.auto_dict_encode == other.auto_dict_encode and + self.auto_dict_max_cardinality == + other.auto_dict_max_cardinality and + self.include_columns == other.include_columns and + self.include_missing_columns == other.include_missing_columns + ) + + def __getstate__(self): + return (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) + + def __setstate__(self, state): + (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef _get_reader(input_file, ReadOptions read_options, + shared_ptr[CInputStream]* out): + use_memory_map = False + get_input_stream(input_file, use_memory_map, out) + if read_options is not None: + out[0] = native_transcoding_input_stream(out[0], + read_options.encoding, + 'utf8') + + +cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out): + if read_options is None: + out[0] = CCSVReadOptions.Defaults() + else: + out[0] = deref(read_options.options) + + +cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out): + if parse_options is None: + out[0] = CCSVParseOptions.Defaults() + else: + out[0] = deref(parse_options.options) + + +cdef _get_convert_options(ConvertOptions convert_options, + CCSVConvertOptions* out): + if convert_options is None: + out[0] = CCSVConvertOptions.Defaults() + else: + out[0] = deref(convert_options.options) + + +cdef class CSVStreamingReader(RecordBatchReader): + """An object that reads record batches incrementally from a CSV file. + + Should not be instantiated directly by user code. + """ + cdef readonly: + Schema schema + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.csv.open_csv() instead." + .format(self.__class__.__name__)) + + # Note about cancellation: we cannot create a SignalStopHandler + # by default here, as several CSVStreamingReader instances may be + # created (including by the same thread). Handling cancellation + # would require having the user pass the SignalStopHandler. + # (in addition to solving ARROW-11853) + + cdef _open(self, shared_ptr[CInputStream] stream, + CCSVReadOptions c_read_options, + CCSVParseOptions c_parse_options, + CCSVConvertOptions c_convert_options, + MemoryPool memory_pool): + cdef: + shared_ptr[CSchema] c_schema + CIOContext io_context + + io_context = CIOContext(maybe_unbox_memory_pool(memory_pool)) + + with nogil: + self.reader = GetResultValue( + CCSVStreamingReader.Make( + io_context, stream, + move(c_read_options), move(c_parse_options), + move(c_convert_options))) + c_schema = self.reader.get().schema() + + self.schema = pyarrow_wrap_schema(c_schema) + + +def read_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Read a Table from a stream of CSV data. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.Table` + Contents of the CSV file as a in-memory table. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry\\n" + ... "Flamingo,2,2022-03-01\\n" + ... "Horse,4,2022-03-02\\n" + ... "Brittle stars,5,2022-03-03\\n" + ... "Centipede,100,2022-03-04" + ... ) + >>> print(s) + animals,n_legs,entry + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Reading from the file + + >>> from pyarrow import csv + >>> csv.read_csv(source) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CIOContext io_context + SharedPtrNoGIL[CCSVReader] reader + shared_ptr[CTable] table + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + with SignalStopHandler() as stop_handler: + io_context = CIOContext( + maybe_unbox_memory_pool(memory_pool), + ( stop_handler.stop_token).stop_token) + reader = GetResultValue(CCSVReader.Make( + io_context, stream, + c_read_options, c_parse_options, c_convert_options)) + + with nogil: + table = GetResultValue(reader.get().Read()) + + return pyarrow_wrap_table(table) + + +def open_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Open a streaming reader of CSV data. + + Reading using this function is always single-threaded. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.csv.CSVStreamingReader` + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CSVStreamingReader reader + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + reader = CSVStreamingReader.__new__(CSVStreamingReader) + reader._open(stream, move(c_read_options), move(c_parse_options), + move(c_convert_options), memory_pool) + return reader + + +def _raise_invalid_function_option(value, description, *, + exception_class=ValueError): + raise exception_class(f"\"{value}\" is not a valid {description}") + + +cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *: + if quoting_style == "needed": + return CQuotingStyle_Needed + elif quoting_style == "all_valid": + return CQuotingStyle_AllValid + elif quoting_style == "none": + return CQuotingStyle_None + _raise_invalid_function_option(quoting_style, "quoting style") + + +cdef wrap_quoting_style(quoting_style): + if quoting_style == CQuotingStyle_Needed: + return 'needed' + elif quoting_style == CQuotingStyle_AllValid: + return 'all_valid' + elif quoting_style == CQuotingStyle_None: + return 'none' + + +cdef class WriteOptions(_Weakrefable): + """ + Options for writing CSV files. + + Parameters + ---------- + include_header : bool, optional (default True) + Whether to write an initial header line with column names + batch_size : int, optional (default 1024) + How many rows to process together when converting and writing + CSV data + delimiter : 1-character string, optional (default ",") + The character delimiting individual cells in the CSV data. + quoting_style : str, optional (default "needed") + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, include_header=None, batch_size=None, + delimiter=None, quoting_style=None): + self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults())) + if include_header is not None: + self.include_header = include_header + if batch_size is not None: + self.batch_size = batch_size + if delimiter is not None: + self.delimiter = delimiter + if quoting_style is not None: + self.quoting_style = quoting_style + + @property + def include_header(self): + """ + Whether to write an initial header line with column names. + """ + return deref(self.options).include_header + + @include_header.setter + def include_header(self, value): + deref(self.options).include_header = value + + @property + def batch_size(self): + """ + How many rows to process together when converting and writing + CSV data. + """ + return deref(self.options).batch_size + + @batch_size.setter + def batch_size(self, value): + deref(self.options).batch_size = value + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quoting_style(self): + """ + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + return wrap_quoting_style(deref(self.options).quoting_style) + + @quoting_style.setter + def quoting_style(self, value): + deref(self.options).quoting_style = unwrap_quoting_style(value) + + @staticmethod + cdef WriteOptions wrap(CCSVWriteOptions options): + out = WriteOptions() + out.options.reset(new CCSVWriteOptions(move(options))) + return out + + def validate(self): + check_status(self.options.get().Validate()) + + +cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out): + if write_options is None: + out[0] = CCSVWriteOptions.Defaults() + else: + out[0] = deref(write_options.options) + + +def write_csv(data, output_file, write_options=None, + MemoryPool memory_pool=None): + """ + Write record batch or table to a CSV file. + + Parameters + ---------- + data : pyarrow.RecordBatch or pyarrow.Table + The data to write. + output_file : string, path, pyarrow.NativeFile, or file-like object + The location where to write the CSV data. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + + Examples + -------- + + >>> import pyarrow as pa + >>> from pyarrow import csv + + >>> legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> entry_date = pa.array(["01/03/2022", "02/03/2022", + ... "03/03/2022", "04/03/2022"]) + >>> table = pa.table([animals, legs, entry_date], + ... names=["animals", "n_legs", "entry"]) + + >>> csv.write_csv(table, "animals.csv") + + >>> write_options = csv.WriteOptions(include_header=False) + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + + >>> write_options = csv.WriteOptions(delimiter=";") + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + """ + cdef: + shared_ptr[COutputStream] stream + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool + CRecordBatch* batch + CTable* table + _get_write_options(write_options, &c_write_options) + + get_writer(output_file, &stream) + c_memory_pool = maybe_unbox_memory_pool(memory_pool) + c_write_options.io_context = CIOContext(c_memory_pool) + if isinstance(data, RecordBatch): + batch = pyarrow_unwrap_batch(data).get() + with nogil: + check_status(WriteCSV(deref(batch), c_write_options, stream.get())) + elif isinstance(data, Table): + table = pyarrow_unwrap_table(data).get() + with nogil: + check_status(WriteCSV(deref(table), c_write_options, stream.get())) + else: + raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'") + + +cdef class CSVWriter(_CRecordBatchWriter): + """ + Writer to create a CSV file. + + Parameters + ---------- + sink : str, path, pyarrow.OutputStream or file-like object + The location where to write the CSV data. + schema : pyarrow.Schema + The schema of the data to be written. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + """ + + def __init__(self, sink, Schema schema, *, + WriteOptions write_options=None, MemoryPool memory_pool=None): + cdef: + shared_ptr[COutputStream] c_stream + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool) + _get_write_options(write_options, &c_write_options) + c_write_options.io_context = CIOContext(c_memory_pool) + get_writer(sink, &c_stream) + with nogil: + self.writer = GetResultValue(MakeCSVWriter( + c_stream, c_schema, c_write_options)) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..220ab6b19affe6b520db3a3501fad2772919f5e4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pxd @@ -0,0 +1,183 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset is currently unstable. APIs subject to change without notice.""" + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.lib cimport * +from pyarrow._fs cimport FileSystem, FileInfo + + +cdef CFileSource _make_file_source(object file, FileSystem filesystem=*, object file_size=*) + +cdef class DatasetFactory(_Weakrefable): + + cdef: + SharedPtrNoGIL[CDatasetFactory] wrapped + CDatasetFactory* factory + + cdef init(self, const shared_ptr[CDatasetFactory]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CDatasetFactory]& sp) + + cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil + + +cdef class Dataset(_Weakrefable): + + cdef: + SharedPtrNoGIL[CDataset] wrapped + CDataset* dataset + public dict _scan_options + + cdef void init(self, const shared_ptr[CDataset]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CDataset]& sp) + + cdef shared_ptr[CDataset] unwrap(self) nogil + + +cdef class Scanner(_Weakrefable): + cdef: + SharedPtrNoGIL[CScanner] wrapped + CScanner* scanner + + cdef void init(self, const shared_ptr[CScanner]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CScanner]& sp) + + cdef shared_ptr[CScanner] unwrap(self) + + @staticmethod + cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except * + + +cdef class FragmentScanOptions(_Weakrefable): + + cdef: + shared_ptr[CFragmentScanOptions] wrapped + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFragmentScanOptions]& sp) + + +cdef class FileFormat(_Weakrefable): + + cdef: + shared_ptr[CFileFormat] wrapped + CFileFormat* format + + cdef void init(self, const shared_ptr[CFileFormat]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFileFormat]& sp) + + cdef inline shared_ptr[CFileFormat] unwrap(self) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options) + + # Return a WrittenFile after a file was written. + # May be overridden by subclasses, e.g. to add metadata. + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer) + + +cdef class FileWriteOptions(_Weakrefable): + + cdef: + shared_ptr[CFileWriteOptions] wrapped + CFileWriteOptions* c_options + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFileWriteOptions]& sp) + + cdef inline shared_ptr[CFileWriteOptions] unwrap(self) + + +cdef class Fragment(_Weakrefable): + + cdef: + SharedPtrNoGIL[CFragment] wrapped + CFragment* fragment + + cdef void init(self, const shared_ptr[CFragment]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFragment]& sp) + + cdef inline shared_ptr[CFragment] unwrap(self) + + +cdef class FileFragment(Fragment): + + cdef: + CFileFragment* file_fragment + + cdef void init(self, const shared_ptr[CFragment]& sp) + + +cdef class Partitioning(_Weakrefable): + + cdef: + shared_ptr[CPartitioning] wrapped + CPartitioning* partitioning + + cdef init(self, const shared_ptr[CPartitioning]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CPartitioning]& sp) + + cdef inline shared_ptr[CPartitioning] unwrap(self) + + +cdef class PartitioningFactory(_Weakrefable): + + cdef: + shared_ptr[CPartitioningFactory] wrapped + CPartitioningFactory* factory + object constructor + object options + + cdef init(self, const shared_ptr[CPartitioningFactory]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CPartitioningFactory]& sp, + object constructor, object options) + + cdef inline shared_ptr[CPartitioningFactory] unwrap(self) + + +cdef class WrittenFile(_Weakrefable): + + # The full path to the created file + cdef public str path + # Optional Parquet metadata + # This metadata will have the file path attribute set to the path of + # the written file. + cdef public object metadata + # The size of the file in bytes + cdef public int64_t size diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pyx new file mode 100644 index 0000000000000000000000000000000000000000..3583a3213ccbc4c46a5a05056ccb3c0c3882a444 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.pyx @@ -0,0 +1,4052 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset is currently unstable. APIs subject to change without notice.""" + +from cython.operator cimport dereference as deref + +import codecs +import collections +from libcpp cimport bool + +import pyarrow as pa +from pyarrow.lib cimport * +from pyarrow.lib import ArrowTypeError, frombytes, tobytes, _pac +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow._acero cimport ExecNodeOptions +from pyarrow._compute cimport Expression, _bind +from pyarrow._compute import _forbid_instantiation +from pyarrow._fs cimport FileSystem, FileSelector, FileInfo +from pyarrow._csv cimport ( + ConvertOptions, ParseOptions, ReadOptions, WriteOptions) +from pyarrow.util import _is_iterable, _is_path_like, _stringify_path +from pyarrow._json cimport ParseOptions as JsonParseOptions +from pyarrow._json cimport ReadOptions as JsonReadOptions + + +_DEFAULT_BATCH_SIZE = 2**17 +_DEFAULT_BATCH_READAHEAD = 16 +_DEFAULT_FRAGMENT_READAHEAD = 4 + + +# Initialise support for Datasets in ExecPlan +Initialize() + + +_orc_fileformat = None +_orc_imported = False + + +def _get_orc_fileformat(): + """ + Import OrcFileFormat on first usage (to avoid circular import issue + when `pyarrow._dataset_orc` would be imported first) + """ + global _orc_fileformat + global _orc_imported + if not _orc_imported: + try: + from pyarrow._dataset_orc import OrcFileFormat + _orc_fileformat = OrcFileFormat + except ImportError as e: + _orc_fileformat = None + finally: + _orc_imported = True + return _orc_fileformat + + +_dataset_pq = False + + +def _get_parquet_classes(): + """ + Import Parquet class files on first usage (to avoid circular import issue + when `pyarrow._dataset_parquet` would be imported first) + """ + global _dataset_pq + if _dataset_pq is False: + try: + import pyarrow._dataset_parquet as _dataset_pq + except ImportError: + _dataset_pq = None + + +def _get_parquet_symbol(name): + """ + Get a symbol from pyarrow.parquet if the latter is importable, otherwise + return None. + """ + _get_parquet_classes() + return _dataset_pq and getattr(_dataset_pq, name) + + +cdef CFileSource _make_file_source(object file, FileSystem filesystem=None, object file_size=None): + + cdef: + CFileSource c_source + shared_ptr[CFileSystem] c_filesystem + CFileInfo c_info + c_string c_path + shared_ptr[CRandomAccessFile] c_file + shared_ptr[CBuffer] c_buffer + int64_t c_size + + if isinstance(file, Buffer): + c_buffer = pyarrow_unwrap_buffer(file) + c_source = CFileSource(move(c_buffer)) + elif _is_path_like(file): + if filesystem is None: + raise ValueError("cannot construct a FileSource from " + "a path without a FileSystem") + c_filesystem = filesystem.unwrap() + c_path = tobytes(_stringify_path(file)) + + if file_size is not None: + c_size = file_size + c_info = FileInfo(c_path, size=c_size).unwrap() + c_source = CFileSource(move(c_info), move(c_filesystem)) + else: + c_source = CFileSource(move(c_path), move(c_filesystem)) + elif hasattr(file, 'read'): + # Optimistically hope this is file-like + c_file = get_native_file(file, False).get_random_access_file() + c_source = CFileSource(move(c_file)) + + else: + raise TypeError("cannot construct a FileSource " + "from " + str(file)) + + return c_source + + +cdef CSegmentEncoding _get_segment_encoding(str segment_encoding): + if segment_encoding == "none": + return CSegmentEncoding_None + elif segment_encoding == "uri": + return CSegmentEncoding_Uri + raise ValueError(f"Unknown segment encoding: {segment_encoding}") + + +cdef str _wrap_segment_encoding(CSegmentEncoding segment_encoding): + if segment_encoding == CSegmentEncoding_None: + return "none" + elif segment_encoding == CSegmentEncoding_Uri: + return "uri" + raise ValueError("Unknown segment encoding") + + +cdef Expression _true = Expression._scalar(True) + + +cdef class Dataset(_Weakrefable): + """ + Collection of data fragments and potentially child datasets. + + Arrow Datasets allow you to query against data that has been split across + multiple files. This sharding of data may indicate partitioning, which + can accelerate queries that only touch some partitions (files). + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CDataset]& sp): + self.wrapped = sp + self.dataset = sp.get() + self._scan_options = dict() + + @staticmethod + cdef wrap(const shared_ptr[CDataset]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'union': UnionDataset, + 'filesystem': FileSystemDataset, + 'in-memory': InMemoryDataset, + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef Dataset self = class_.__new__(class_) + self.init(sp) + return self + + cdef shared_ptr[CDataset] unwrap(self) nogil: + return self.wrapped + + @property + def partition_expression(self): + """ + An Expression which evaluates to true for all data viewed by this + Dataset. + """ + return Expression.wrap(self.dataset.partition_expression()) + + def replace_schema(self, Schema schema not None): + """ + Return a copy of this Dataset with a different schema. + + The copy will view the same Fragments. If the new schema is not + compatible with the original dataset's schema then an error will + be raised. + + Parameters + ---------- + schema : Schema + The new dataset schema. + """ + cdef shared_ptr[CDataset] copy = GetResultValue( + self.dataset.ReplaceSchema(pyarrow_unwrap_schema(schema)) + ) + + d = Dataset.wrap(move(copy)) + if self._scan_options: + # Preserve scan options if set. + d._scan_options = self._scan_options.copy() + return d + + def get_fragments(self, Expression filter=None): + """Returns an iterator over the fragments in this dataset. + + Parameters + ---------- + filter : Expression, default None + Return fragments matching the optional filter, either using the + partition_expression or internal information like Parquet's + statistics. + + Returns + ------- + fragments : iterator of Fragment + """ + if self._scan_options.get("filter") is not None: + # Accessing fragments of a filtered dataset is not supported. + # It would be unclear if you wanted to filter the fragments + # or the rows in those fragments. + raise ValueError( + "Retrieving fragments of a filtered or projected " + "dataset is not allowed. Remove the filtering." + ) + + return self._get_fragments(filter) + + def _get_fragments(self, Expression filter): + cdef: + CExpression c_filter + + if filter is None: + c_fragments = move(GetResultValue(self.dataset.GetFragments())) + else: + c_filter = _bind(filter, self.schema) + c_fragments = move(GetResultValue( + self.dataset.GetFragments(c_filter))) + + for maybe_fragment in c_fragments: + yield Fragment.wrap(GetResultValue(move(maybe_fragment))) + + def _scanner_options(self, options): + """Returns the default options to create a new Scanner. + + This is automatically invoked by :meth:`Dataset.scanner` + and there is no need to use it. + """ + new_options = options.copy() + + # at the moment only support filter + requested_filter = options.get("filter") + current_filter = self._scan_options.get("filter") + if requested_filter is not None and current_filter is not None: + new_options["filter"] = current_filter & requested_filter + elif current_filter is not None: + new_options["filter"] = current_filter + + return new_options + + def scanner(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Build a scan operation against the dataset. + + Data is not loaded immediately. Instead, this produces a Scanner, + which exposes further operations (e.g. loading all data as a + table, counting rows). + + See the :meth:`Scanner.from_dataset` method for further information. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + scanner : Scanner + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, "dataset_scanner.parquet") + + >>> import pyarrow.dataset as ds + >>> dataset = ds.dataset("dataset_scanner.parquet") + + Selecting a subset of the columns: + + >>> dataset.scanner(columns=["year", "n_legs"]).to_table() + pyarrow.Table + year: int64 + n_legs: int64 + ---- + year: [[2020,2022,2021,2022,2019,2021]] + n_legs: [[2,2,4,4,5,100]] + + Projecting selected columns using an expression: + + >>> dataset.scanner(columns={ + ... "n_legs_uint": ds.field("n_legs").cast("uint8"), + ... }).to_table() + pyarrow.Table + n_legs_uint: uint8 + ---- + n_legs_uint: [[2,2,4,4,5,100]] + + Filtering rows while scanning: + + >>> dataset.scanner(filter=ds.field("year") > 2020).to_table() + pyarrow.Table + year: int64 + n_legs: int64 + animal: string + ---- + year: [[2022,2021,2022,2021]] + n_legs: [[2,4,4,100]] + animal: [["Parrot","Dog","Horse","Centipede"]] + """ + return Scanner.from_dataset( + self, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ) + + def to_batches(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the dataset as materialized record batches. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_batches() + + def to_table(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the dataset to an Arrow table. + + Note that this method reads all the selected data from the dataset + into memory. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_table() + + def take(self, + object indices, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Select rows of data by index. + + Parameters + ---------- + indices : Array or array-like + indices of rows to select in the dataset. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).take(indices) + + def head(self, + int num_rows, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Load the first N rows of the dataset. + + Parameters + ---------- + num_rows : int + The number of rows to load. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).head(num_rows) + + def count_rows(self, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Count rows matching the scanner filter. + + Parameters + ---------- + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + count : int + """ + return self.scanner( + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).count_rows() + + @property + def schema(self): + """The common schema of the full Dataset""" + return pyarrow_wrap_schema(self.dataset.schema()) + + def filter(self, expression not None): + """ + Apply a row filter to the dataset. + + Parameters + ---------- + expression : Expression + The filter that should be applied to the dataset. + + Returns + ------- + Dataset + """ + cdef: + Dataset filtered_dataset + + new_filter = expression + current_filter = self._scan_options.get("filter") + if current_filter is not None and new_filter is not None: + new_filter = current_filter & new_filter + + filtered_dataset = self.__class__.__new__(self.__class__) + filtered_dataset.init(self.wrapped) + filtered_dataset._scan_options = dict(filter=new_filter) + return filtered_dataset + + def sort_by(self, sorting, **kwargs): + """ + Sort the Dataset by one or multiple columns. + + Parameters + ---------- + sorting : str or list[tuple(name, order)] + Name of the column to use to sort (ascending), or + a list of multiple sorting conditions where + each entry is a tuple with column name + and sorting order ("ascending" or "descending") + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + InMemoryDataset + A new dataset sorted according to the sort keys. + """ + if isinstance(sorting, str): + sorting = [(sorting, "ascending")] + + res = _pac()._sort_source( + self, output_type=InMemoryDataset, sort_keys=sorting, **kwargs + ) + return res + + def join(self, right_dataset, keys, right_keys=None, join_type="left outer", + left_suffix=None, right_suffix=None, coalesce_keys=True, + use_threads=True): + """ + Perform a join between this dataset and another one. + + Result of the join will be a new dataset, where further + operations can be applied. + + Parameters + ---------- + right_dataset : dataset + The dataset to join to the current one, acting as the right dataset + in the join operation. + keys : str or list[str] + The columns from current dataset that should be used as keys + of the join operation left side. + right_keys : str or list[str], default None + The columns from the right_dataset that should be used as keys + on the join operation right side. + When ``None`` use the same key names as the left dataset. + join_type : str, default "left outer" + The kind of join that should be performed, one of + ("left semi", "right semi", "left anti", "right anti", + "inner", "left outer", "right outer", "full outer") + left_suffix : str, default None + Which suffix to add to right column names. This prevents confusion + when the columns in left and right datasets have colliding names. + right_suffix : str, default None + Which suffix to add to the left column names. This prevents confusion + when the columns in left and right datasets have colliding names. + coalesce_keys : bool, default True + If the duplicated keys should be omitted from one of the sides + in the join result. + use_threads : bool, default True + Whenever to use multithreading or not. + + Returns + ------- + InMemoryDataset + """ + if right_keys is None: + right_keys = keys + return _pac()._perform_join( + join_type, self, keys, right_dataset, right_keys, + left_suffix=left_suffix, right_suffix=right_suffix, + use_threads=use_threads, coalesce_keys=coalesce_keys, + output_type=InMemoryDataset + ) + + def join_asof(self, right_dataset, on, by, tolerance, right_on=None, right_by=None): + """ + Perform an asof join between this dataset and another one. + + This is similar to a left-join except that we match on nearest key rather + than equal keys. Both datasets must be sorted by the key. This type of join + is most useful for time series data that are not perfectly aligned. + + Optionally match on equivalent keys with "by" before searching with "on". + + Result of the join will be a new Dataset, where further + operations can be applied. + + Parameters + ---------- + right_dataset : dataset + The dataset to join to the current one, acting as the right dataset + in the join operation. + on : str + The column from current dataset that should be used as the "on" key + of the join operation left side. + + An inexact match is used on the "on" key, i.e. a row is considered a + match if and only if left_on - tolerance <= right_on <= left_on. + + The input table must be sorted by the "on" key. Must be a single + field of a common type. + + Currently, the "on" key must be an integer, date, or timestamp type. + by : str or list[str] + The columns from current dataset that should be used as the keys + of the join operation left side. The join operation is then done + only for the matches in these columns. + tolerance : int + The tolerance for inexact "on" key matching. A right row is considered + a match with the left row `right.on - left.on <= tolerance`. The + `tolerance` may be: + + - negative, in which case a past-as-of-join occurs; + - or positive, in which case a future-as-of-join occurs; + - or zero, in which case an exact-as-of-join occurs. + + The tolerance is interpreted in the same units as the "on" key. + right_on : str or list[str], default None + The columns from the right_dataset that should be used as the on key + on the join operation right side. + When ``None`` use the same key name as the left dataset. + right_by : str or list[str], default None + The columns from the right_dataset that should be used as by keys + on the join operation right side. + When ``None`` use the same key names as the left dataset. + + Returns + ------- + InMemoryDataset + """ + if right_on is None: + right_on = on + if right_by is None: + right_by = by + return _pac()._perform_join_asof(self, on, by, + right_dataset, right_on, right_by, + tolerance, output_type=InMemoryDataset) + + +cdef class InMemoryDataset(Dataset): + """ + A Dataset wrapping in-memory data. + + Parameters + ---------- + source : RecordBatch, Table, list, tuple + The data for this dataset. Can be a RecordBatch, Table, list of + RecordBatch/Table, iterable of RecordBatch, or a RecordBatchReader + If an iterable is provided, the schema must also be provided. + schema : Schema, optional + Only required if passing an iterable as the source + """ + + cdef: + CInMemoryDataset* in_memory_dataset + + def __init__(self, source, Schema schema=None): + cdef: + shared_ptr[CInMemoryDataset] in_memory_dataset + + if isinstance(source, (pa.RecordBatch, pa.Table)): + source = [source] + + if isinstance(source, (list, tuple)): + batches = [] + for item in source: + if isinstance(item, pa.RecordBatch): + batches.append(item) + elif isinstance(item, pa.Table): + batches.extend(item.to_batches()) + else: + raise TypeError( + 'Expected a list of tables or batches. The given list ' + 'contains a ' + type(item).__name__) + if schema is None: + schema = item.schema + elif not schema.equals(item.schema): + raise ArrowTypeError( + f'Item has schema\n{item.schema}\nwhich does not ' + f'match expected schema\n{schema}') + if not batches and schema is None: + raise ValueError('Must provide schema to construct in-memory ' + 'dataset from an empty list') + table = pa.Table.from_batches(batches, schema=schema) + in_memory_dataset = make_shared[CInMemoryDataset]( + pyarrow_unwrap_table(table)) + else: + raise TypeError( + 'Expected a table, batch, or list of tables/batches ' + 'instead of the given type: ' + + type(source).__name__ + ) + + self.init( in_memory_dataset) + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.in_memory_dataset = sp.get() + + +cdef class UnionDataset(Dataset): + """ + A Dataset wrapping child datasets. + + Children's schemas must agree with the provided schema. + + Parameters + ---------- + schema : Schema + A known schema to conform to. + children : list of Dataset + One or more input children + """ + + cdef: + CUnionDataset* union_dataset + + def __init__(self, Schema schema not None, children): + cdef: + Dataset child + CDatasetVector c_children + shared_ptr[CUnionDataset] union_dataset + + for child in children: + c_children.push_back(child.wrapped) + + union_dataset = GetResultValue(CUnionDataset.Make( + pyarrow_unwrap_schema(schema), move(c_children))) + self.init( union_dataset) + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.union_dataset = sp.get() + + def __reduce__(self): + return UnionDataset, (self.schema, self.children) + + @property + def children(self): + cdef CDatasetVector children = self.union_dataset.children() + return [Dataset.wrap(children[i]) for i in range(children.size())] + + +cdef class FileSystemDataset(Dataset): + """ + A Dataset of file fragments. + + A FileSystemDataset is composed of one or more FileFragment. + + Parameters + ---------- + fragments : list[Fragments] + List of fragments to consume. + schema : Schema + The top-level schema of the Dataset. + format : FileFormat + File format of the fragments, currently only ParquetFileFormat, + IpcFileFormat, CsvFileFormat, and JsonFileFormat are supported. + filesystem : FileSystem + FileSystem of the fragments. + root_partition : Expression, optional + The top-level partition of the DataDataset. + """ + + cdef: + CFileSystemDataset* filesystem_dataset + + def __init__(self, fragments, Schema schema, FileFormat format, + FileSystem filesystem=None, root_partition=None): + cdef: + FileFragment fragment=None + vector[shared_ptr[CFileFragment]] c_fragments + CResult[shared_ptr[CDataset]] result + shared_ptr[CFileSystem] c_filesystem + + if root_partition is None: + root_partition = _true + elif not isinstance(root_partition, Expression): + raise TypeError( + "Argument 'root_partition' has incorrect type (expected " + "Expression, got {0})".format(type(root_partition)) + ) + + for fragment in fragments: + c_fragments.push_back( + static_pointer_cast[CFileFragment, CFragment]( + fragment.unwrap())) + + if filesystem is None: + filesystem = fragment.filesystem + + if filesystem is not None: + c_filesystem = filesystem.unwrap() + + result = CFileSystemDataset.Make( + pyarrow_unwrap_schema(schema), + ( root_partition).unwrap(), + format.unwrap(), + c_filesystem, + c_fragments + ) + self.init(GetResultValue(result)) + + @property + def filesystem(self): + return FileSystem.wrap(self.filesystem_dataset.filesystem()) + + @property + def partitioning(self): + """ + The partitioning of the Dataset source, if discovered. + + If the FileSystemDataset is created using the ``dataset()`` factory + function with a partitioning specified, this will return the + finalized Partitioning object from the dataset discovery. In all + other cases, this returns None. + """ + c_partitioning = self.filesystem_dataset.partitioning() + if c_partitioning.get() == nullptr: + return None + try: + return Partitioning.wrap(c_partitioning) + except TypeError: + # e.g. type_name "default" + return None + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.filesystem_dataset = sp.get() + + def __reduce__(self): + return FileSystemDataset, ( + list(self.get_fragments()), + self.schema, + self.format, + self.filesystem, + self.partition_expression + ) + + @classmethod + def from_paths(cls, paths, schema=None, format=None, + filesystem=None, partitions=None, root_partition=None): + """ + A Dataset created from a list of paths on a particular filesystem. + + Parameters + ---------- + paths : list of str + List of file paths to create the fragments from. + schema : Schema + The top-level schema of the DataDataset. + format : FileFormat + File format to create fragments from, currently only + ParquetFileFormat, IpcFileFormat, CsvFileFormat, and JsonFileFormat are supported. + filesystem : FileSystem + The filesystem which files are from. + partitions : list[Expression], optional + Attach additional partition information for the file paths. + root_partition : Expression, optional + The top-level partition of the DataDataset. + """ + if root_partition is None: + root_partition = _true + + for arg, class_, name in [ + (schema, Schema, 'schema'), + (format, FileFormat, 'format'), + (filesystem, FileSystem, 'filesystem'), + (root_partition, Expression, 'root_partition') + ]: + if not isinstance(arg, class_): + raise TypeError( + "Argument '{0}' has incorrect type (expected {1}, " + "got {2})".format(name, class_.__name__, type(arg)) + ) + + partitions = partitions or [_true] * len(paths) + + if len(paths) != len(partitions): + raise ValueError( + 'The number of files resulting from paths_or_selector ' + 'must be equal to the number of partitions.' + ) + + fragments = [ + format.make_fragment(path, filesystem, partitions[i]) + for i, path in enumerate(paths) + ] + return FileSystemDataset(fragments, schema, format, + filesystem, root_partition) + + @property + def files(self): + """List of the files""" + cdef vector[c_string] files = self.filesystem_dataset.files() + return [frombytes(f) for f in files] + + @property + def format(self): + """The FileFormat of this source.""" + return FileFormat.wrap(self.filesystem_dataset.format()) + + +cdef class FileWriteOptions(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + self.wrapped = sp + self.c_options = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileWriteOptions]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'csv': CsvFileWriteOptions, + 'ipc': IpcFileWriteOptions, + 'parquet': _get_parquet_symbol('ParquetFileWriteOptions'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FileWriteOptions self = class_.__new__(class_) + self.init(sp) + return self + + @property + def format(self): + return FileFormat.wrap(self.c_options.format()) + + cdef inline shared_ptr[CFileWriteOptions] unwrap(self): + return self.wrapped + + +cdef class FileFormat(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + self.wrapped = sp + self.format = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileFormat]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'ipc': IpcFileFormat, + 'csv': CsvFileFormat, + 'json': JsonFileFormat, + 'parquet': _get_parquet_symbol('ParquetFileFormat'), + 'orc': _get_orc_fileformat(), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FileFormat self = class_.__new__(class_) + self.init(sp) + return self + + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer): + parquet_metadata = None + size = GetResultValue(file_writer.GetBytesWritten()) + return WrittenFile(path, parquet_metadata, size) + + cdef inline shared_ptr[CFileFormat] unwrap(self): + return self.wrapped + + def inspect(self, file, filesystem=None): + """ + Infer the schema of a file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to infer a schema from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + + Returns + ------- + schema : Schema + The schema inferred from the file + """ + cdef: + CFileSource c_source = _make_file_source(file, filesystem, file_size=None) + CResult[shared_ptr[CSchema]] c_result + with nogil: + c_result = self.format.Inspect(c_source) + c_schema = GetResultValue(c_result) + return pyarrow_wrap_schema(move(c_schema)) + + def make_fragment(self, file, filesystem=None, + Expression partition_expression=None, + *, file_size=None): + """ + Make a FileFragment from a given file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to make a fragment from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + partition_expression : Expression, optional + An expression that is guaranteed true for all rows in the fragment. Allows + fragment to be potentially skipped while scanning with a filter. + file_size : int, optional + The size of the file in bytes. Can improve performance with high-latency filesystems + when file size needs to be known before reading. + + Returns + ------- + fragment : Fragment + The file fragment + """ + if partition_expression is None: + partition_expression = _true + c_source = _make_file_source(file, filesystem, file_size) + c_fragment = GetResultValue( + self.format.MakeFragment(move(c_source), + partition_expression.unwrap(), + nullptr)) + return Fragment.wrap(move(c_fragment)) + + def make_write_options(self): + sp_write_options = self.format.DefaultWriteOptions() + if sp_write_options.get() == nullptr: + # DefaultWriteOptions() may return `nullptr` which means that + # the format does not yet support writing datasets. + raise NotImplementedError( + "Writing datasets not yet implemented for this file format." + ) + return FileWriteOptions.wrap(sp_write_options) + + @property + def default_extname(self): + return frombytes(self.format.type_name()) + + @property + def default_fragment_scan_options(self): + dfso = FragmentScanOptions.wrap( + self.wrapped.get().default_fragment_scan_options) + # CsvFileFormat stores a Python-specific encoding field that needs + # to be restored because it does not exist in the C++ struct + if isinstance(self, CsvFileFormat): + if self._read_options_py is not None: + dfso.read_options = self._read_options_py + return dfso + + @default_fragment_scan_options.setter + def default_fragment_scan_options(self, FragmentScanOptions options): + if options is None: + self.wrapped.get().default_fragment_scan_options =\ + nullptr + else: + self._set_default_fragment_scan_options(options) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + raise ValueError(f"Cannot set fragment scan options for " + f"'{options.type_name}' on {self.__class__.__name__}") + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class Fragment(_Weakrefable): + """Fragment of data from a Dataset.""" + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFragment]& sp): + self.wrapped = sp + self.fragment = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFragment]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + # IpcFileFormat, CsvFileFormat, JsonFileFormat and OrcFileFormat do not have + # corresponding subclasses of FileFragment + 'ipc': FileFragment, + 'csv': FileFragment, + 'json': FileFragment, + 'orc': FileFragment, + 'parquet': _get_parquet_symbol('ParquetFileFragment'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + class_ = Fragment + + cdef Fragment self = class_.__new__(class_) + self.init(sp) + return self + + cdef inline shared_ptr[CFragment] unwrap(self): + return self.wrapped + + @property + def physical_schema(self): + """Return the physical schema of this Fragment. This schema can be + different from the dataset read schema.""" + cdef: + CResult[shared_ptr[CSchema]] maybe_schema + with nogil: + maybe_schema = self.fragment.ReadPhysicalSchema() + return pyarrow_wrap_schema(GetResultValue(maybe_schema)) + + @property + def partition_expression(self): + """An Expression which evaluates to true for all data viewed by this + Fragment. + """ + return Expression.wrap(self.fragment.partition_expression()) + + def scanner(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Build a scan operation against the fragment. + + Data is not loaded immediately. Instead, this produces a Scanner, + which exposes further operations (e.g. loading all data as a + table, counting rows). + + Parameters + ---------- + schema : Schema + Schema to use for scanning. This is used to unify a Fragment to + its Dataset's schema. If not specified this will use the + Fragment's physical schema which might differ for each Fragment. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + scanner : Scanner + """ + return Scanner.from_fragment( + self, + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ) + + def to_batches(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the fragment as materialized record batches. + + Parameters + ---------- + schema : Schema, optional + Concrete schema to use for scanning. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + return Scanner.from_fragment( + self, + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_batches() + + def to_table(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Convert this Fragment into a Table. + + Use this convenience utility with care. This will serially materialize + the Scan result in memory before creating the Table. + + Parameters + ---------- + schema : Schema, optional + Concrete schema to use for scanning. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_table() + + def take(self, + object indices, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Select rows of data by index. + + Parameters + ---------- + indices : Array or array-like + The indices of row to select in the dataset. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).take(indices) + + def head(self, + int num_rows, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Load the first N rows of the fragment. + + Parameters + ---------- + num_rows : int + The number of rows to load. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).head(num_rows) + + def count_rows(self, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Count rows matching the scanner filter. + + Parameters + ---------- + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + count : int + """ + return self.scanner( + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).count_rows() + + +cdef class FileFragment(Fragment): + """A Fragment representing a data file.""" + + cdef void init(self, const shared_ptr[CFragment]& sp): + Fragment.init(self, sp) + self.file_fragment = sp.get() + + def __repr__(self): + type_name = frombytes(self.fragment.type_name()) + if type_name != "parquet": + typ = f" type={type_name}" + else: + # parquet has a subclass -> type embedded in class name + typ = "" + partition_dict = get_partition_keys(self.partition_expression) + partition = ", ".join( + [f"{key}={val}" for key, val in partition_dict.items()] + ) + if partition: + partition = f" partition=[{partition}]" + return "".format( + self.__class__.__name__, typ, self.path, partition + ) + + def __reduce__(self): + buffer = self.buffer + return self.format.make_fragment, ( + self.path if buffer is None else buffer, + self.filesystem, + self.partition_expression + ) + + def open(self): + """ + Open a NativeFile of the buffer or file viewed by this fragment. + """ + cdef: + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CRandomAccessFile] opened + c_string c_path + NativeFile out = NativeFile() + + if self.buffer is not None: + return pa.BufferReader(self.buffer) + + c_path = tobytes(self.file_fragment.source().path()) + with nogil: + c_filesystem = self.file_fragment.source().filesystem() + opened = GetResultValue(c_filesystem.get().OpenInputFile(c_path)) + + out.set_random_access_file(opened) + out.is_readable = True + return out + + @property + def path(self): + """ + The path of the data file viewed by this fragment, if it views a + file. If instead it views a buffer, this will be "". + """ + return frombytes(self.file_fragment.source().path()) + + @property + def filesystem(self): + """ + The FileSystem containing the data file viewed by this fragment, if + it views a file. If instead it views a buffer, this will be None. + """ + cdef: + shared_ptr[CFileSystem] c_fs + c_fs = self.file_fragment.source().filesystem() + + if c_fs.get() == nullptr: + return None + + return FileSystem.wrap(c_fs) + + @property + def buffer(self): + """ + The buffer viewed by this fragment, if it views a buffer. If + instead it views a file, this will be None. + """ + cdef: + shared_ptr[CBuffer] c_buffer + c_buffer = self.file_fragment.source().buffer() + + if c_buffer.get() == nullptr: + return None + + return pyarrow_wrap_buffer(c_buffer) + + @property + def format(self): + """ + The format of the data file viewed by this fragment. + """ + return FileFormat.wrap(self.file_fragment.format()) + + +cdef class FragmentScanOptions(_Weakrefable): + """Scan options specific to a particular fragment and scan operation.""" + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + self.wrapped = sp + + @staticmethod + cdef wrap(const shared_ptr[CFragmentScanOptions]& sp): + if not sp: + return None + + type_name = frombytes(sp.get().type_name()) + + classes = { + 'csv': CsvFragmentScanOptions, + 'json': JsonFragmentScanOptions, + 'parquet': _get_parquet_symbol('ParquetFragmentScanOptions'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FragmentScanOptions self = class_.__new__(class_) + self.init(sp) + return self + + @property + def type_name(self): + return frombytes(self.wrapped.get().type_name()) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class IpcFileWriteOptions(FileWriteOptions): + cdef: + CIpcFileWriteOptions* ipc_options + + def __init__(self): + _forbid_instantiation(self.__class__) + + @property + def write_options(self): + out = IpcWriteOptions() + out.c_options = CIpcWriteOptions(deref(self.ipc_options.options)) + return out + + @write_options.setter + def write_options(self, IpcWriteOptions write_options not None): + self.ipc_options.options.reset( + new CIpcWriteOptions(write_options.c_options)) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.ipc_options = sp.get() + + +cdef class IpcFileFormat(FileFormat): + + def __init__(self): + self.init(shared_ptr[CFileFormat](new CIpcFileFormat())) + + def equals(self, IpcFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.IpcFileFormat + + Returns + ------- + True + """ + return True + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.ipc.IpcWriteOptions + """ + cdef IpcFileWriteOptions opts = \ + FileFormat.make_write_options(self) + opts.write_options = IpcWriteOptions(**kwargs) + return opts + + @property + def default_extname(self): + return "arrow" + + def __reduce__(self): + return IpcFileFormat, tuple() + + +cdef class FeatherFileFormat(IpcFileFormat): + + @property + def default_extname(self): + return "feather" + + +cdef class CsvFileFormat(FileFormat): + """ + FileFormat for CSV files. + + Parameters + ---------- + parse_options : pyarrow.csv.ParseOptions + Options regarding CSV parsing. + default_fragment_scan_options : CsvFragmentScanOptions + Default options for fragments scan. + convert_options : pyarrow.csv.ConvertOptions + Options regarding value conversion. + read_options : pyarrow.csv.ReadOptions + General read options. + """ + cdef: + CCsvFileFormat* csv_format + # The encoding field in ReadOptions does not exist in the C++ struct. + # We need to store it here and override it when reading + # default_fragment_scan_options.read_options + public ReadOptions _read_options_py + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, ParseOptions parse_options=None, + default_fragment_scan_options=None, + ConvertOptions convert_options=None, + ReadOptions read_options=None): + self.init(shared_ptr[CFileFormat](new CCsvFileFormat())) + if parse_options is not None: + self.parse_options = parse_options + if convert_options is not None or read_options is not None: + if default_fragment_scan_options: + raise ValueError('If `default_fragment_scan_options` is ' + 'given, cannot specify convert_options ' + 'or read_options') + self.default_fragment_scan_options = CsvFragmentScanOptions( + convert_options=convert_options, read_options=read_options) + elif isinstance(default_fragment_scan_options, dict): + self.default_fragment_scan_options = CsvFragmentScanOptions( + **default_fragment_scan_options) + elif isinstance(default_fragment_scan_options, CsvFragmentScanOptions): + self.default_fragment_scan_options = default_fragment_scan_options + elif default_fragment_scan_options is not None: + raise TypeError('`default_fragment_scan_options` must be either ' + 'a dictionary or an instance of ' + 'CsvFragmentScanOptions') + if read_options is not None: + self._read_options_py = read_options + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.csv_format = sp.get() + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.csv.WriteOptions + """ + cdef CsvFileWriteOptions opts = \ + FileFormat.make_write_options(self) + opts.write_options = WriteOptions(**kwargs) + return opts + + @property + def parse_options(self): + return ParseOptions.wrap(self.csv_format.parse_options) + + @parse_options.setter + def parse_options(self, ParseOptions parse_options not None): + self.csv_format.parse_options = deref(parse_options.options) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'csv': + self.csv_format.default_fragment_scan_options = options.wrapped + self.default_fragment_scan_options.read_options = options.read_options + self._read_options_py = options.read_options + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, CsvFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.CsvFileFormat + + Returns + ------- + bool + """ + return ( + self.parse_options.equals(other.parse_options) and + self.default_fragment_scan_options == + other.default_fragment_scan_options) + + def __reduce__(self): + return CsvFileFormat, (self.parse_options, + self.default_fragment_scan_options) + + def __repr__(self): + return f"" + + +cdef class CsvFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for CSV fragments. + + Parameters + ---------- + convert_options : pyarrow.csv.ConvertOptions + Options regarding value conversion. + read_options : pyarrow.csv.ReadOptions + General read options. + """ + + cdef: + CCsvFragmentScanOptions* csv_options + # The encoding field in ReadOptions does not exist in the C++ struct. + # We need to store it here and override it when reading read_options + ReadOptions _read_options_py + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, ConvertOptions convert_options=None, + ReadOptions read_options=None): + self.init(shared_ptr[CFragmentScanOptions]( + new CCsvFragmentScanOptions())) + if convert_options is not None: + self.convert_options = convert_options + if read_options is not None: + self.read_options = read_options + self._read_options_py = read_options + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.csv_options = sp.get() + + @property + def convert_options(self): + return ConvertOptions.wrap(self.csv_options.convert_options) + + @convert_options.setter + def convert_options(self, ConvertOptions convert_options not None): + self.csv_options.convert_options = deref(convert_options.options) + + @property + def read_options(self): + read_options = ReadOptions.wrap(self.csv_options.read_options) + if self._read_options_py is not None: + read_options.encoding = self._read_options_py.encoding + return read_options + + @read_options.setter + def read_options(self, ReadOptions read_options not None): + self.csv_options.read_options = deref(read_options.options) + self._read_options_py = read_options + if codecs.lookup(read_options.encoding).name != 'utf-8': + self.csv_options.stream_transform_func = deref( + make_streamwrap_func(read_options.encoding, 'utf-8')) + + def equals(self, CsvFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.CsvFragmentScanOptions + + Returns + ------- + bool + """ + return ( + other and + self.convert_options.equals(other.convert_options) and + self.read_options.equals(other.read_options)) + + def __reduce__(self): + return CsvFragmentScanOptions, (self.convert_options, + self.read_options) + + +cdef class CsvFileWriteOptions(FileWriteOptions): + cdef: + CCsvFileWriteOptions* csv_options + object _properties + + def __init__(self): + _forbid_instantiation(self.__class__) + + @property + def write_options(self): + return WriteOptions.wrap(deref(self.csv_options.write_options)) + + @write_options.setter + def write_options(self, WriteOptions write_options not None): + self.csv_options.write_options.reset( + new CCSVWriteOptions(deref(write_options.options))) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.csv_options = sp.get() + + +cdef class JsonFileFormat(FileFormat): + """ + FileFormat for JSON files. + + Parameters + ---------- + default_fragment_scan_options : JsonFragmentScanOptions + Default options for fragments scan. + parse_options : pyarrow.json.ParseOptions + Options regarding json parsing. + read_options : pyarrow.json.ReadOptions + General read options. + """ + cdef: + CJsonFileFormat* json_format + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, default_fragment_scan_options=None, + JsonParseOptions parse_options=None, + JsonReadOptions read_options=None): + self.init(shared_ptr[CFileFormat](new CJsonFileFormat())) + if parse_options is not None or read_options is not None: + if default_fragment_scan_options is not None: + raise ValueError('If `default_fragment_scan_options` is ' + 'given, cannot specify read_options') + self.default_fragment_scan_options = JsonFragmentScanOptions( + parse_options=parse_options, + read_options=read_options) + elif isinstance(default_fragment_scan_options, dict): + self.default_fragment_scan_options = JsonFragmentScanOptions( + **default_fragment_scan_options) + elif isinstance(default_fragment_scan_options, JsonFragmentScanOptions): + self.default_fragment_scan_options = default_fragment_scan_options + elif default_fragment_scan_options is not None: + raise TypeError('`default_fragment_scan_options` must be either ' + 'a dictionary or an instance of ' + 'JsonFragmentScanOptions') + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.json_format = sp.get() + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'json': + self.json_format.default_fragment_scan_options = options.wrapped + self.default_fragment_scan_options.read_options = options.read_options + self.default_fragment_scan_options.parse_options = options.parse_options + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, JsonFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.JsonFileFormat + + Returns + ------- + bool + """ + return (other and + self.default_fragment_scan_options == + other.default_fragment_scan_options) + + def __reduce__(self): + return JsonFileFormat, (self.default_fragment_scan_options,) + + def __repr__(self): + return "" + + +cdef class JsonFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for JSON fragments. + + Parameters + ---------- + parse_options : pyarrow.json.ParseOptions + Options regarding JSON parsing. + read_options : pyarrow.json.ReadOptions + General read options. + """ + cdef: + CJsonFragmentScanOptions* json_options + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, JsonParseOptions parse_options=None, + JsonReadOptions read_options=None): + self.init(shared_ptr[CFragmentScanOptions]( + new CJsonFragmentScanOptions())) + if parse_options is not None: + self.parse_options = parse_options + if read_options is not None: + self.read_options = read_options + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.json_options = sp.get() + + @property + def parse_options(self): + return JsonParseOptions.wrap(self.json_options.parse_options) + + @parse_options.setter + def parse_options(self, JsonParseOptions parse_options not None): + self.json_options.parse_options = parse_options.options + + @property + def read_options(self): + return JsonReadOptions.wrap(self.json_options.read_options) + + @read_options.setter + def read_options(self, JsonReadOptions read_options not None): + self.json_options.read_options = read_options.options + + def equals(self, JsonFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.JsonFragmentScanOptions + + Returns + ------- + bool + """ + return ( + other and + self.read_options.equals(other.read_options) and + self.parse_options.equals(other.parse_options)) + + def __reduce__(self): + return JsonFragmentScanOptions, (self.parse_options, self.read_options) + + +cdef class Partitioning(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + self.wrapped = sp + self.partitioning = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CPartitioning]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'directory': DirectoryPartitioning, + 'hive': HivePartitioning, + 'filename': FilenamePartitioning, + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef Partitioning self = class_.__new__(class_) + self.init(sp) + return self + + cdef inline shared_ptr[CPartitioning] unwrap(self): + return self.wrapped + + def __eq__(self, other): + if isinstance(other, Partitioning): + return self.partitioning.Equals(deref((other).unwrap())) + return False + + def parse(self, path): + """ + Parse a path into a partition expression. + + Parameters + ---------- + path : str + + Returns + ------- + pyarrow.dataset.Expression + """ + cdef CResult[CExpression] result + result = self.partitioning.Parse(tobytes(path)) + return Expression.wrap(GetResultValue(result)) + + @property + def schema(self): + """The arrow Schema attached to the partitioning.""" + return pyarrow_wrap_schema(self.partitioning.schema()) + + +cdef class PartitioningFactory(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioningFactory]& sp): + self.wrapped = sp + self.factory = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CPartitioningFactory]& sp, + object constructor, object options): + cdef PartitioningFactory self = PartitioningFactory.__new__( + PartitioningFactory + ) + self.init(sp) + self.constructor = constructor + self.options = options + return self + + cdef inline shared_ptr[CPartitioningFactory] unwrap(self): + return self.wrapped + + def __reduce__(self): + return self.constructor, self.options + + @property + def type_name(self): + return frombytes(self.factory.type_name()) + + +cdef vector[shared_ptr[CArray]] _partitioning_dictionaries( + Schema schema, dictionaries) except *: + cdef: + vector[shared_ptr[CArray]] c_dictionaries + + dictionaries = dictionaries or {} + + for field in schema: + dictionary = dictionaries.get(field.name) + + if (isinstance(field.type, pa.DictionaryType) and + dictionary is not None): + c_dictionaries.push_back(pyarrow_unwrap_array(dictionary)) + else: + c_dictionaries.push_back( nullptr) + + return c_dictionaries + + +cdef class KeyValuePartitioning(Partitioning): + + cdef: + CKeyValuePartitioning* keyvalue_partitioning + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + Partitioning.init(self, sp) + self.keyvalue_partitioning = sp.get() + self.wrapped = sp + self.partitioning = sp.get() + + def __reduce__(self): + dictionaries = self.dictionaries + if dictionaries: + dictionaries = dict(zip(self.schema.names, dictionaries)) + segment_encoding = _wrap_segment_encoding( + deref(self.keyvalue_partitioning).segment_encoding() + ) + return self.__class__, (self.schema, dictionaries, segment_encoding) + + @property + def dictionaries(self): + """ + The unique values for each partition field, if available. + + Those values are only available if the Partitioning object was + created through dataset discovery from a PartitioningFactory, or + if the dictionaries were manually specified in the constructor. + If no dictionary field is available, this returns an empty list. + """ + cdef vector[shared_ptr[CArray]] c_arrays + c_arrays = self.keyvalue_partitioning.dictionaries() + res = [] + for arr in c_arrays: + if arr.get() == nullptr: + # Partitioning object has not been created through + # inspected Factory + res.append(None) + else: + res.append(pyarrow_wrap_array(arr)) + return res + + +def _constructor_directory_partitioning_factory(*args): + return DirectoryPartitioning.discover(*args) + + +cdef class DirectoryPartitioning(KeyValuePartitioning): + """ + A Partitioning based on a specified Schema. + + The DirectoryPartitioning expects one segment in the file path for each + field in the schema (all fields are required to be present). + For example given schema the path "/2009/11" would + be parsed to ("year"_ == 2009 and "month"_ == 11). + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + DirectoryPartitioning + + Examples + -------- + >>> from pyarrow.dataset import DirectoryPartitioning + >>> partitioning = DirectoryPartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("/2009/11/")) + ((year == 2009) and (month == 11)) + """ + + cdef: + CDirectoryPartitioning* directory_partitioning + + def __init__(self, Schema schema not None, dictionaries=None, + segment_encoding="uri"): + cdef: + shared_ptr[CDirectoryPartitioning] c_partitioning + CKeyValuePartitioningOptions c_options + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + c_partitioning = make_shared[CDirectoryPartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.directory_partitioning = sp.get() + + @staticmethod + def discover(field_names=None, infer_dictionary=False, + max_partition_dictionary_size=0, + schema=None, segment_encoding="uri"): + """ + Discover a DirectoryPartitioning. + + Parameters + ---------- + field_names : list of str + The names to associate with the values from the subdirectory names. + If schema is given, will be populated from the schema. + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain types. This can be more efficient + when materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + max_partition_dictionary_size : int, default 0 + Synonymous with infer_dictionary for backwards compatibility with + 1.0: setting this to -1 or None is equivalent to passing + infer_dictionary=True. + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CPartitioningFactoryOptions c_options + vector[c_string] c_field_names + + if max_partition_dictionary_size in {-1, None}: + infer_dictionary = True + elif max_partition_dictionary_size != 0: + raise NotImplementedError("max_partition_dictionary_size must be " + "0, -1, or None") + + if infer_dictionary: + c_options.infer_dictionary = True + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + c_field_names = [tobytes(f.name) for f in schema] + elif not field_names: + raise ValueError( + "Neither field_names nor schema was passed; " + "cannot infer field_names") + else: + c_field_names = [tobytes(s) for s in field_names] + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CDirectoryPartitioning.MakeFactory(c_field_names, c_options), + _constructor_directory_partitioning_factory, + (field_names, infer_dictionary, max_partition_dictionary_size, + schema, segment_encoding) + ) + + +def _constructor_hive_partitioning_factory(*args): + return HivePartitioning.discover(*args) + + +cdef class HivePartitioning(KeyValuePartitioning): + """ + A Partitioning for "/$key=$value/" nested directories as found in + Apache Hive. + + Multi-level, directory based partitioning scheme originating from + Apache Hive with all data files stored in the leaf directories. Data is + partitioned by static values of a particular column in the schema. + Partition keys are represented in the form $key=$value in directory names. + Field order is ignored, as are missing or unrecognized field names. + + For example, given schema, a possible + path would be "/year=2009/month=11/day=15". + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + null_fallback : str, default "__HIVE_DEFAULT_PARTITION__" + If any field is None then this fallback will be used as a label + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + HivePartitioning + + Examples + -------- + >>> from pyarrow.dataset import HivePartitioning + >>> partitioning = HivePartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("/year=2009/month=11/")) + ((year == 2009) and (month == 11)) + + """ + + cdef: + CHivePartitioning* hive_partitioning + + def __init__(self, + Schema schema not None, + dictionaries=None, + null_fallback="__HIVE_DEFAULT_PARTITION__", + segment_encoding="uri"): + + cdef: + shared_ptr[CHivePartitioning] c_partitioning + CHivePartitioningOptions c_options + + c_options.null_fallback = tobytes(null_fallback) + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + c_partitioning = make_shared[CHivePartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.hive_partitioning = sp.get() + + def __reduce__(self): + dictionaries = self.dictionaries + if dictionaries: + dictionaries = dict(zip(self.schema.names, dictionaries)) + segment_encoding = _wrap_segment_encoding( + deref(self.keyvalue_partitioning).segment_encoding() + ) + null_fallback = frombytes(deref(self.hive_partitioning).null_fallback()) + return HivePartitioning, ( + self.schema, dictionaries, null_fallback, segment_encoding + ) + + @staticmethod + def discover(infer_dictionary=False, + max_partition_dictionary_size=0, + null_fallback="__HIVE_DEFAULT_PARTITION__", + schema=None, + segment_encoding="uri"): + """ + Discover a HivePartitioning. + + Parameters + ---------- + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain. This can be more efficient when + materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + max_partition_dictionary_size : int, default 0 + Synonymous with infer_dictionary for backwards compatibility with + 1.0: setting this to -1 or None is equivalent to passing + infer_dictionary=True. + null_fallback : str, default "__HIVE_DEFAULT_PARTITION__" + When inferring a schema for partition fields this value will be + replaced by null. The default is set to __HIVE_DEFAULT_PARTITION__ + for compatibility with Spark + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CHivePartitioningFactoryOptions c_options + + if max_partition_dictionary_size in {-1, None}: + infer_dictionary = True + elif max_partition_dictionary_size != 0: + raise NotImplementedError("max_partition_dictionary_size must be " + "0, -1, or None") + + if infer_dictionary: + c_options.infer_dictionary = True + + c_options.null_fallback = tobytes(null_fallback) + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CHivePartitioning.MakeFactory(c_options), + _constructor_hive_partitioning_factory, + (infer_dictionary, max_partition_dictionary_size, null_fallback, + schema, segment_encoding), + ) + + +def _constructor_filename_partitioning_factory(*args): + return FilenamePartitioning.discover(*args) + + +cdef class FilenamePartitioning(KeyValuePartitioning): + """ + A Partitioning based on a specified Schema. + + The FilenamePartitioning expects one segment in the file name for each + field in the schema (all fields are required to be present) separated + by '_'. For example given schema the name + ``"2009_11_"`` would be parsed to ("year" == 2009 and "month" == 11). + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + FilenamePartitioning + + Examples + -------- + >>> from pyarrow.dataset import FilenamePartitioning + >>> partitioning = FilenamePartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("2009_11_data.parquet")) + ((year == 2009) and (month == 11)) + """ + + cdef: + CFilenamePartitioning* filename_partitioning + + def __init__(self, Schema schema not None, dictionaries=None, + segment_encoding="uri"): + cdef: + shared_ptr[CFilenamePartitioning] c_partitioning + CKeyValuePartitioningOptions c_options + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + c_partitioning = make_shared[CFilenamePartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.filename_partitioning = sp.get() + + @staticmethod + def discover(field_names=None, infer_dictionary=False, + schema=None, segment_encoding="uri"): + """ + Discover a FilenamePartitioning. + + Parameters + ---------- + field_names : list of str + The names to associate with the values from the subdirectory names. + If schema is given, will be populated from the schema. + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain types. This can be more efficient + when materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CPartitioningFactoryOptions c_options + vector[c_string] c_field_names + + if infer_dictionary: + c_options.infer_dictionary = True + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + c_field_names = [tobytes(f.name) for f in schema] + elif not field_names: + raise TypeError( + "Neither field_names nor schema was passed; " + "cannot infer field_names") + else: + c_field_names = [tobytes(s) for s in field_names] + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CFilenamePartitioning.MakeFactory(c_field_names, c_options), + _constructor_filename_partitioning_factory, + (field_names, infer_dictionary, schema, segment_encoding) + ) + + +cdef class DatasetFactory(_Weakrefable): + """ + DatasetFactory is used to create a Dataset, inspect the Schema + of the fragments contained in it, and declare a partitioning. + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CDatasetFactory]& sp): + self.wrapped = sp + self.factory = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CDatasetFactory]& sp): + cdef DatasetFactory self = \ + DatasetFactory.__new__(DatasetFactory) + self.init(sp) + return self + + cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil: + return self.wrapped + + @property + def root_partition(self): + return Expression.wrap(self.factory.root_partition()) + + @root_partition.setter + def root_partition(self, Expression expr): + check_status(self.factory.SetRootPartition(expr.unwrap())) + + def inspect_schemas(self): + cdef CResult[vector[shared_ptr[CSchema]]] result + cdef CInspectOptions options + with nogil: + result = self.factory.InspectSchemas(options) + + schemas = [] + for s in GetResultValue(result): + schemas.append(pyarrow_wrap_schema(s)) + return schemas + + def inspect(self): + """ + Inspect all data fragments and return a common Schema. + + Returns + ------- + Schema + """ + cdef: + CInspectOptions options + CResult[shared_ptr[CSchema]] result + with nogil: + result = self.factory.Inspect(options) + return pyarrow_wrap_schema(GetResultValue(result)) + + def finish(self, Schema schema=None): + """ + Create a Dataset using the inspected schema or an explicit schema + (if given). + + Parameters + ---------- + schema : Schema, default None + The schema to conform the source to. If None, the inspected + schema is used. + + Returns + ------- + Dataset + """ + cdef: + shared_ptr[CSchema] sp_schema + CResult[shared_ptr[CDataset]] result + + if schema is not None: + sp_schema = pyarrow_unwrap_schema(schema) + with nogil: + result = self.factory.FinishWithSchema(sp_schema) + else: + with nogil: + result = self.factory.Finish() + + return Dataset.wrap(GetResultValue(result)) + + +cdef class FileSystemFactoryOptions(_Weakrefable): + """ + Influences the discovery of filesystem paths. + + Parameters + ---------- + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + partitioning : Partitioning/PartitioningFactory, optional + Apply the Partitioning to every discovered Fragment. See Partitioning or + PartitioningFactory documentation. + exclude_invalid_files : bool, optional (default True) + If True, invalid files will be excluded (file format specific check). + This will incur IO for each files in a serial and single threaded + fashion. Disabling this feature will skip the IO, but unsupported + files may be present in the Dataset (resulting in an error at scan + time). + selector_ignore_prefixes : list, optional + When discovering from a Selector (and not from an explicit file list), + ignore files and directories matching any of these prefixes. + By default this is ['.', '_']. + """ + + cdef: + CFileSystemFactoryOptions options + + __slots__ = () # avoid mistakingly creating attributes + + def __init__(self, partition_base_dir=None, partitioning=None, + exclude_invalid_files=None, + list selector_ignore_prefixes=None): + if isinstance(partitioning, PartitioningFactory): + self.partitioning_factory = partitioning + elif isinstance(partitioning, Partitioning): + self.partitioning = partitioning + + if partition_base_dir is not None: + self.partition_base_dir = partition_base_dir + if exclude_invalid_files is not None: + self.exclude_invalid_files = exclude_invalid_files + if selector_ignore_prefixes is not None: + self.selector_ignore_prefixes = selector_ignore_prefixes + + cdef inline CFileSystemFactoryOptions unwrap(self): + return self.options + + @property + def partitioning(self): + """Partitioning to apply to discovered files. + + NOTE: setting this property will overwrite partitioning_factory. + """ + c_partitioning = self.options.partitioning.partitioning() + if c_partitioning.get() == nullptr: + return None + return Partitioning.wrap(c_partitioning) + + @partitioning.setter + def partitioning(self, Partitioning value): + self.options.partitioning = ( value).unwrap() + + @property + def partitioning_factory(self): + """PartitioningFactory to apply to discovered files and + discover a Partitioning. + + NOTE: setting this property will overwrite partitioning. + """ + c_factory = self.options.partitioning.factory() + if c_factory.get() == nullptr: + return None + return PartitioningFactory.wrap(c_factory, None, None) + + @partitioning_factory.setter + def partitioning_factory(self, PartitioningFactory value): + self.options.partitioning = ( value).unwrap() + + @property + def partition_base_dir(self): + """ + Base directory to strip paths before applying the partitioning. + """ + return frombytes(self.options.partition_base_dir) + + @partition_base_dir.setter + def partition_base_dir(self, value): + self.options.partition_base_dir = tobytes(value) + + @property + def exclude_invalid_files(self): + """Whether to exclude invalid files.""" + return self.options.exclude_invalid_files + + @exclude_invalid_files.setter + def exclude_invalid_files(self, bint value): + self.options.exclude_invalid_files = value + + @property + def selector_ignore_prefixes(self): + """ + List of prefixes. Files matching one of those prefixes will be + ignored by the discovery process. + """ + return [frombytes(p) for p in self.options.selector_ignore_prefixes] + + @selector_ignore_prefixes.setter + def selector_ignore_prefixes(self, values): + self.options.selector_ignore_prefixes = [tobytes(v) for v in values] + + +cdef vector[CFileInfo] unwrap_finfos(finfos): + cdef vector[CFileInfo] o_vect + for fi in finfos: + o_vect.push_back(( fi).unwrap()) + return o_vect + + +cdef class FileSystemDatasetFactory(DatasetFactory): + """ + Create a DatasetFactory from a list of paths with schema inspection. + + Parameters + ---------- + filesystem : pyarrow.fs.FileSystem + Filesystem to discover. + paths_or_selector : pyarrow.fs.FileSelector or list of path-likes + Either a Selector object or a list of path-like objects. + format : FileFormat + Currently only ParquetFileFormat and IpcFileFormat are supported. + options : FileSystemFactoryOptions, optional + Various flags influencing the discovery of filesystem paths. + """ + + cdef: + CFileSystemDatasetFactory* filesystem_factory + + def __init__(self, FileSystem filesystem not None, paths_or_selector, + FileFormat format not None, + FileSystemFactoryOptions options=None): + cdef: + vector[c_string] paths + vector[CFileInfo] finfos + CFileSelector c_selector + CResult[shared_ptr[CDatasetFactory]] result + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CFileFormat] c_format + CFileSystemFactoryOptions c_options + + options = options or FileSystemFactoryOptions() + c_options = options.unwrap() + c_filesystem = filesystem.unwrap() + c_format = format.unwrap() + + if isinstance(paths_or_selector, FileSelector): + with nogil: + c_selector = ( paths_or_selector).selector + result = CFileSystemDatasetFactory.MakeFromSelector( + c_filesystem, + c_selector, + c_format, + c_options + ) + elif isinstance(paths_or_selector, (list, tuple)): + if len(paths_or_selector) > 0 and isinstance(paths_or_selector[0], FileInfo): + finfos = unwrap_finfos(paths_or_selector) + with nogil: + result = CFileSystemDatasetFactory.MakeFromFileInfos( + c_filesystem, + finfos, + c_format, + c_options + ) + else: + paths = [tobytes(s) for s in paths_or_selector] + with nogil: + result = CFileSystemDatasetFactory.MakeFromPaths( + c_filesystem, + paths, + c_format, + c_options + ) + else: + raise TypeError('Must pass either paths or a FileSelector, but ' + 'passed {}'.format(type(paths_or_selector))) + + self.init(GetResultValue(result)) + + cdef init(self, shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.filesystem_factory = sp.get() + + +cdef class UnionDatasetFactory(DatasetFactory): + """ + Provides a way to inspect/discover a Dataset's expected schema before + materialization. + + Parameters + ---------- + factories : list of DatasetFactory + """ + + cdef: + CUnionDatasetFactory* union_factory + + def __init__(self, list factories): + cdef: + DatasetFactory factory + vector[shared_ptr[CDatasetFactory]] c_factories + for factory in factories: + c_factories.push_back(factory.unwrap()) + self.init(GetResultValue(CUnionDatasetFactory.Make(c_factories))) + + cdef init(self, const shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.union_factory = sp.get() + + +cdef class RecordBatchIterator(_Weakrefable): + """An iterator over a sequence of record batches.""" + cdef: + # An object that must be kept alive with the iterator. + object iterator_owner + # Iterator is a non-POD type and Cython uses offsetof, leading + # to a compiler warning unless wrapped like so + SharedPtrNoGIL[CRecordBatchIterator] iterator + + def __init__(self): + _forbid_instantiation(self.__class__, subclasses_instead=False) + + @staticmethod + cdef wrap(object owner, CRecordBatchIterator iterator): + cdef RecordBatchIterator self = \ + RecordBatchIterator.__new__(RecordBatchIterator) + self.iterator_owner = owner + self.iterator = make_shared[CRecordBatchIterator](move(iterator)) + return self + + cdef inline shared_ptr[CRecordBatchIterator] unwrap(self) nogil: + return self.iterator + + def __iter__(self): + return self + + def __next__(self): + cdef shared_ptr[CRecordBatch] record_batch + with nogil: + record_batch = GetResultValue(move(self.iterator.get().Next())) + if record_batch == NULL: + raise StopIteration + return pyarrow_wrap_batch(record_batch) + + +class TaggedRecordBatch(collections.namedtuple( + "TaggedRecordBatch", ["record_batch", "fragment"])): + """ + A combination of a record batch and the fragment it came from. + + Parameters + ---------- + record_batch : RecordBatch + The record batch. + fragment : Fragment + Fragment of the record batch. + """ + + +cdef class TaggedRecordBatchIterator(_Weakrefable): + """An iterator over a sequence of record batches with fragments.""" + cdef: + object iterator_owner + SharedPtrNoGIL[CTaggedRecordBatchIterator] iterator + + def __init__(self): + _forbid_instantiation(self.__class__, subclasses_instead=False) + + @staticmethod + cdef wrap(object owner, CTaggedRecordBatchIterator iterator): + cdef TaggedRecordBatchIterator self = \ + TaggedRecordBatchIterator.__new__(TaggedRecordBatchIterator) + self.iterator_owner = owner + self.iterator = make_shared[CTaggedRecordBatchIterator]( + move(iterator)) + return self + + def __iter__(self): + return self + + def __next__(self): + cdef CTaggedRecordBatch batch + with nogil: + batch = GetResultValue(move(self.iterator.get().Next())) + if batch.record_batch == NULL: + raise StopIteration + return TaggedRecordBatch( + record_batch=pyarrow_wrap_batch(batch.record_batch), + fragment=Fragment.wrap(batch.fragment)) + + +cdef void _populate_builder(const shared_ptr[CScannerBuilder]& ptr, + object columns=None, Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + bint use_threads=True, MemoryPool memory_pool=None, + FragmentScanOptions fragment_scan_options=None)\ + except *: + cdef: + CScannerBuilder *builder + vector[CExpression] c_exprs + + builder = ptr.get() + + check_status(builder.Filter(_bind( + filter, pyarrow_wrap_schema(builder.schema())))) + + if columns is not None: + if isinstance(columns, dict): + for expr in columns.values(): + if not isinstance(expr, Expression): + raise TypeError( + "Expected an Expression for a 'column' dictionary " + "value, got {} instead".format(type(expr)) + ) + c_exprs.push_back(( expr).unwrap()) + + check_status( + builder.Project(c_exprs, [tobytes(c) for c in columns.keys()]) + ) + elif isinstance(columns, list): + check_status(builder.ProjectColumns([tobytes(c) for c in columns])) + else: + raise ValueError( + "Expected a list or a dict for 'columns', " + "got {} instead.".format(type(columns)) + ) + + check_status(builder.BatchSize(batch_size)) + check_status(builder.BatchReadahead(batch_readahead)) + check_status(builder.FragmentReadahead(fragment_readahead)) + check_status(builder.UseThreads(use_threads)) + check_status(builder.Pool(maybe_unbox_memory_pool(memory_pool))) + if fragment_scan_options: + check_status( + builder.FragmentScanOptions(fragment_scan_options.wrapped)) + + +cdef class Scanner(_Weakrefable): + """A materialized scan operation with context and options bound. + + A scanner is the class that glues the scan tasks, data fragments and data + sources together. + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CScanner]& sp): + self.wrapped = sp + self.scanner = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CScanner]& sp): + cdef Scanner self = Scanner.__new__(Scanner) + self.init(sp) + return self + + cdef inline shared_ptr[CScanner] unwrap(self): + return self.wrapped + + @staticmethod + cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except *: + cdef: + shared_ptr[CScannerBuilder] builder = make_shared[CScannerBuilder](dataset.unwrap()) + + py_scanoptions = dataset._scanner_options(py_scanoptions) + + # Need to explicitly expand the arguments as Cython doesn't support + # keyword expansion in cdef functions. + _populate_builder( + builder, + columns=py_scanoptions.get("columns"), + filter=py_scanoptions.get("filter"), + batch_size=py_scanoptions.get("batch_size", _DEFAULT_BATCH_SIZE), + batch_readahead=py_scanoptions.get( + "batch_readahead", _DEFAULT_BATCH_READAHEAD), + fragment_readahead=py_scanoptions.get( + "fragment_readahead", _DEFAULT_FRAGMENT_READAHEAD), + use_threads=py_scanoptions.get("use_threads", True), + memory_pool=py_scanoptions.get("memory_pool"), + fragment_scan_options=py_scanoptions.get("fragment_scan_options")) + + return GetResultValue(deref(builder).GetScanOptions()) + + @staticmethod + def from_dataset(Dataset dataset not None, *, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create Scanner from Dataset, + + Parameters + ---------- + dataset : Dataset + Dataset to scan. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScanOptions] options + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + + options = Scanner._make_scan_options( + dataset, + dict(columns=columns, filter=filter, batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, use_threads=use_threads, + memory_pool=memory_pool, fragment_scan_options=fragment_scan_options) + ) + builder = make_shared[CScannerBuilder](dataset.unwrap(), options) + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @staticmethod + def from_fragment(Fragment fragment not None, *, Schema schema=None, + object columns=None, Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create Scanner from Fragment, + + Parameters + ---------- + fragment : Fragment + fragment to scan. + schema : Schema, optional + The schema of the fragment. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScanOptions] options = make_shared[CScanOptions]() + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + + schema = schema or fragment.physical_schema + + builder = make_shared[CScannerBuilder](pyarrow_unwrap_schema(schema), + fragment.unwrap(), options) + _populate_builder(builder, columns=columns, filter=filter, + batch_size=batch_size, batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + use_threads=use_threads, + memory_pool=memory_pool, + fragment_scan_options=fragment_scan_options) + + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @staticmethod + def from_batches(source, *, Schema schema=None, object columns=None, + Expression filter=None, int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create a Scanner from an iterator of batches. + + This creates a scanner which can be used only once. It is + intended to support writing a dataset (which takes a scanner) + from a source which can be read only once (e.g. a + RecordBatchReader or generator). + + Parameters + ---------- + source : Iterator + The iterator of Batches. + schema : Schema + The schema of the batches. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + RecordBatchReader reader + if isinstance(source, pa.ipc.RecordBatchReader): + if schema: + raise ValueError('Cannot specify a schema when providing ' + 'a RecordBatchReader') + reader = source + elif _is_iterable(source): + if schema is None: + raise ValueError('Must provide schema to construct scanner ' + 'from an iterable') + reader = pa.ipc.RecordBatchReader.from_batches(schema, source) + else: + raise TypeError('Expected a RecordBatchReader or an iterable of ' + 'batches instead of the given type: ' + + type(source).__name__) + builder = CScannerBuilder.FromRecordBatchReader(reader.reader) + _populate_builder(builder, columns=columns, filter=filter, + batch_size=batch_size, batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, use_threads=use_threads, + memory_pool=memory_pool, + fragment_scan_options=fragment_scan_options) + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @property + def dataset_schema(self): + """The schema with which batches will be read from fragments.""" + return pyarrow_wrap_schema( + self.scanner.options().get().dataset_schema) + + @property + def projected_schema(self): + """ + The materialized schema of the data, accounting for projections. + + This is the schema of any data returned from the scanner. + """ + return pyarrow_wrap_schema( + self.scanner.options().get().projected_schema) + + def to_batches(self): + """ + Consume a Scanner in record batches. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + def _iterator(batch_iter): + for batch in batch_iter: + yield batch.record_batch + # Don't make ourselves a generator so errors are raised immediately + return _iterator(self.scan_batches()) + + def scan_batches(self): + """ + Consume a Scanner in record batches with corresponding fragments. + + Returns + ------- + record_batches : iterator of TaggedRecordBatch + """ + cdef CTaggedRecordBatchIterator iterator + with nogil: + iterator = move(GetResultValue(self.scanner.ScanBatches())) + # Don't make ourselves a generator so errors are raised immediately + return TaggedRecordBatchIterator.wrap(self, move(iterator)) + + def to_table(self): + """ + Convert a Scanner into a Table. + + Use this convenience utility with care. This will serially materialize + the Scan result in memory before creating the Table. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + + with nogil: + result = self.scanner.ToTable() + + return pyarrow_wrap_table(GetResultValue(result)) + + def take(self, object indices): + """ + Select rows of data by index. + + Will only consume as many batches of the underlying dataset as + needed. Otherwise, this is equivalent to + ``to_table().take(indices)``. + + Parameters + ---------- + indices : Array or array-like + indices of rows to select in the dataset. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + cdef shared_ptr[CArray] c_indices + + if not isinstance(indices, pa.Array): + indices = pa.array(indices) + c_indices = pyarrow_unwrap_array(indices) + + with nogil: + result = self.scanner.TakeRows(deref(c_indices)) + return pyarrow_wrap_table(GetResultValue(result)) + + def head(self, int num_rows): + """ + Load the first N rows of the dataset. + + Parameters + ---------- + num_rows : int + The number of rows to load. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + with nogil: + result = self.scanner.Head(num_rows) + return pyarrow_wrap_table(GetResultValue(result)) + + def count_rows(self): + """ + Count rows matching the scanner filter. + + Returns + ------- + count : int + """ + cdef CResult[int64_t] result + with nogil: + result = self.scanner.CountRows() + return GetResultValue(result) + + def to_reader(self): + """Consume this scanner as a RecordBatchReader. + + Returns + ------- + RecordBatchReader + """ + cdef RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = GetResultValue(self.scanner.ToRecordBatchReader()) + return reader + + +def get_partition_keys(Expression partition_expression): + """ + Extract partition keys (equality constraints between a field and a scalar) + from an expression as a dict mapping the field's name to its value. + + NB: All expressions yielded by a HivePartitioning or DirectoryPartitioning + will be conjunctions of equality conditions and are accessible through this + function. Other subexpressions will be ignored. + + Parameters + ---------- + partition_expression : pyarrow.dataset.Expression + + Returns + ------- + dict + + Examples + -------- + + For example, an expression of + + is converted to {'part': 'A', 'year': 2016} + """ + cdef: + CExpression expr = partition_expression.unwrap() + pair[CFieldRef, CDatum] ref_val + + out = {} + for ref_val in GetResultValue(CExtractKnownFieldValues(expr)).map: + assert ref_val.first.name() != nullptr + assert ref_val.second.kind() == DatumType_SCALAR + val = pyarrow_wrap_scalar(ref_val.second.scalar()) + out[frombytes(deref(ref_val.first.name()))] = val.as_py() + return out + + +cdef class WrittenFile(_Weakrefable): + """ + Metadata information about files written as + part of a dataset write operation + + Parameters + ---------- + path : str + Path to the file. + metadata : pyarrow.parquet.FileMetaData, optional + For Parquet files, the Parquet file metadata. + size : int + The size of the file in bytes. + """ + + def __init__(self, path, metadata, size): + self.path = path + self.metadata = metadata + self.size = size + + +cdef void _filesystemdataset_write_visitor( + dict visit_args, + CFileWriter* file_writer): + cdef: + str path + str base_dir + WrittenFile written_file + FileFormat file_format + + path = frombytes(deref(file_writer).destination().path) + base_dir = frombytes(visit_args['base_dir']) + file_format = FileFormat.wrap(file_writer.format()) + written_file = file_format._finish_write(path, base_dir, file_writer) + visit_args['file_visitor'](written_file) + + +def _filesystemdataset_write( + Scanner data not None, + object base_dir not None, + str basename_template not None, + FileSystem filesystem not None, + Partitioning partitioning not None, + FileWriteOptions file_options not None, + int max_partitions, + object file_visitor, + str existing_data_behavior not None, + int max_open_files, + int max_rows_per_file, + int min_rows_per_group, + int max_rows_per_group, + bool create_dir +): + """ + CFileSystemDataset.Write wrapper + """ + cdef: + CFileSystemDatasetWriteOptions c_options + shared_ptr[CScanner] c_scanner + dict visit_args + + c_options.file_write_options = file_options.unwrap() + c_options.filesystem = filesystem.unwrap() + c_options.base_dir = tobytes(_stringify_path(base_dir)) + c_options.partitioning = partitioning.unwrap() + c_options.max_partitions = max_partitions + c_options.max_open_files = max_open_files + c_options.max_rows_per_file = max_rows_per_file + c_options.max_rows_per_group = max_rows_per_group + c_options.min_rows_per_group = min_rows_per_group + c_options.basename_template = tobytes(basename_template) + if existing_data_behavior == 'error': + c_options.existing_data_behavior = ExistingDataBehavior_ERROR + elif existing_data_behavior == 'overwrite_or_ignore': + c_options.existing_data_behavior =\ + ExistingDataBehavior_OVERWRITE_OR_IGNORE + elif existing_data_behavior == 'delete_matching': + c_options.existing_data_behavior = ExistingDataBehavior_DELETE_MATCHING + else: + raise ValueError( + ("existing_data_behavior must be one of 'error', ", + "'overwrite_or_ignore' or 'delete_matching'") + ) + c_options.create_dir = create_dir + + if file_visitor is not None: + visit_args = {'base_dir': c_options.base_dir, + 'file_visitor': file_visitor} + # Need to use post_finish because parquet metadata is not available + # until after Finish has been called + c_options.writer_post_finish = BindFunction[cb_writer_finish_internal]( + &_filesystemdataset_write_visitor, visit_args) + + c_scanner = data.unwrap() + with nogil: + check_status(CFileSystemDataset.Write(c_options, c_scanner)) + + +cdef class _ScanNodeOptions(ExecNodeOptions): + + def _set_options(self, Dataset dataset, dict scan_options): + cdef: + shared_ptr[CScanOptions] c_scan_options + + c_scan_options = Scanner._make_scan_options(dataset, scan_options) + + self.wrapped.reset( + new CScanNodeOptions(dataset.unwrap(), c_scan_options) + ) + + +class ScanNodeOptions(_ScanNodeOptions): + """ + A Source node which yields batches from a Dataset scan. + + This is the option class for the "scan" node factory. + + This node is capable of applying pushdown projections or filters + to the file readers which reduce the amount of data that needs to + be read (if supported by the file format). But note that this does not + construct associated filter or project nodes to perform the final + filtering or projection. Rather, you may supply the same filter + expression or projection to the scan node that you also supply + to the filter or project node. + + Yielded batches will be augmented with fragment/batch indices to + enable stable ordering for simple ExecPlans. + + Parameters + ---------- + dataset : pyarrow.dataset.Dataset + The table which acts as the data source. + **kwargs : dict, optional + Scan options. See `Scanner.from_dataset` for possible arguments. + """ + + def __init__(self, Dataset dataset, **kwargs): + self._set_options(dataset, kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d5bc172d324d533dc642bbb4a3f87439779a16af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for Parquet file format.""" + +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libarrow_dataset_parquet cimport * + +from pyarrow._dataset cimport FragmentScanOptions, FileWriteOptions + + +cdef class ParquetFragmentScanOptions(FragmentScanOptions): + cdef: + CParquetFragmentScanOptions* parquet_options + object _parquet_decryption_config + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp) + cdef CReaderProperties* reader_properties(self) + cdef ArrowReaderProperties* arrow_reader_properties(self) + + +cdef class ParquetFileWriteOptions(FileWriteOptions): + + cdef: + CParquetFileWriteOptions* parquet_options + object _properties diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_feather.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_feather.pyx new file mode 100644 index 0000000000000000000000000000000000000000..7dd61c9a986ff1044fb7b5c22a2f24725710afd7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_feather.pyx @@ -0,0 +1,117 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Implement Feather file format + +# cython: profile=False +# distutils: language = c++ +# cython: language_level=3 + +from cython.operator cimport dereference as deref +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_feather cimport * +from pyarrow.lib cimport (check_status, Table, _Weakrefable, + get_writer, get_reader, pyarrow_wrap_table) +from pyarrow.lib import tobytes + + +class FeatherError(Exception): + pass + + +def write_feather(Table table, object dest, compression=None, + compression_level=None, chunksize=None, version=2): + cdef shared_ptr[COutputStream] sink + get_writer(dest, &sink) + + cdef CFeatherProperties properties + if version == 2: + properties.version = kFeatherV2Version + else: + properties.version = kFeatherV1Version + + if compression == 'zstd': + properties.compression = CCompressionType_ZSTD + elif compression == 'lz4': + properties.compression = CCompressionType_LZ4_FRAME + else: + properties.compression = CCompressionType_UNCOMPRESSED + + if chunksize is not None: + properties.chunksize = chunksize + + if compression_level is not None: + properties.compression_level = compression_level + + with nogil: + check_status(WriteFeather(deref(table.table), sink.get(), + properties)) + + +cdef class FeatherReader(_Weakrefable): + cdef: + shared_ptr[CFeatherReader] reader + + def __cinit__(self, source, c_bool use_memory_map, c_bool use_threads): + cdef: + shared_ptr[CRandomAccessFile] reader + CIpcReadOptions options = CIpcReadOptions.Defaults() + options.use_threads = use_threads + + get_reader(source, use_memory_map, &reader) + with nogil: + self.reader = GetResultValue(CFeatherReader.Open(reader, options)) + + @property + def version(self): + return self.reader.get().version() + + def read(self): + cdef shared_ptr[CTable] sp_table + with nogil: + check_status(self.reader.get() + .Read(&sp_table)) + + return pyarrow_wrap_table(sp_table) + + def read_indices(self, indices): + cdef: + shared_ptr[CTable] sp_table + vector[int] c_indices + + for index in indices: + c_indices.push_back(index) + with nogil: + check_status(self.reader.get() + .Read(c_indices, &sp_table)) + + return pyarrow_wrap_table(sp_table) + + def read_names(self, names): + cdef: + shared_ptr[CTable] sp_table + vector[c_string] c_names + + for name in names: + c_names.push_back(tobytes(name)) + with nogil: + check_status(self.reader.get() + .Read(c_names, &sp_table)) + + return pyarrow_wrap_table(sp_table) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4504b78b837ea8c7ec309510a319bde7b8dd3bc1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_fs.pxd @@ -0,0 +1,94 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow.lib import _detect_compression, frombytes, tobytes +from pyarrow.lib cimport * + + +cpdef enum FileType: + NotFound = CFileType_NotFound + Unknown = CFileType_Unknown + File = CFileType_File + Directory = CFileType_Directory + + +cdef class FileInfo(_Weakrefable): + cdef: + CFileInfo info + + @staticmethod + cdef wrap(CFileInfo info) + + cdef inline CFileInfo unwrap(self) nogil + + @staticmethod + cdef CFileInfo unwrap_safe(obj) + + +cdef class FileSelector(_Weakrefable): + cdef: + CFileSelector selector + + @staticmethod + cdef FileSelector wrap(CFileSelector selector) + + cdef inline CFileSelector unwrap(self) nogil + + +cdef class FileSystem(_Weakrefable): + cdef: + shared_ptr[CFileSystem] wrapped + CFileSystem* fs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + @staticmethod + cdef wrap(const shared_ptr[CFileSystem]& sp) + + cdef inline shared_ptr[CFileSystem] unwrap(self) nogil + + +cdef class LocalFileSystem(FileSystem): + cdef: + CLocalFileSystem* localfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class SubTreeFileSystem(FileSystem): + cdef: + CSubTreeFileSystem* subtreefs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class _MockFileSystem(FileSystem): + cdef: + CMockFileSystem* mockfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class PyFileSystem(FileSystem): + cdef: + CPyFileSystem* pyfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_generated_version.py b/llmeval-env/lib/python3.10/site-packages/pyarrow/_generated_version.py new file mode 100644 index 0000000000000000000000000000000000000000..5f069ac87d6a9ad71aa2d1bb57beac8259c68211 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_generated_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '16.0.0' +__version_tuple__ = version_tuple = (16, 0, 0) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_json.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_json.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d36dad67abbaa575d8963273c884dd9e8f047b13 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_json.pyx @@ -0,0 +1,310 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (_Weakrefable, MemoryPool, + maybe_unbox_memory_pool, + get_input_stream, pyarrow_wrap_table, + pyarrow_wrap_schema, pyarrow_unwrap_schema) + + +cdef class ReadOptions(_Weakrefable): + """ + Options for reading JSON files. + + Parameters + ---------- + use_threads : bool, optional (default True) + Whether to use multiple threads to accelerate reading + block_size : int, optional + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual chunks in the Table. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, use_threads=None, block_size=None): + self.options = CJSONReadOptions.Defaults() + if use_threads is not None: + self.use_threads = use_threads + if block_size is not None: + self.block_size = block_size + + @property + def use_threads(self): + """ + Whether to use multiple threads to accelerate reading. + """ + return self.options.use_threads + + @use_threads.setter + def use_threads(self, value): + self.options.use_threads = value + + @property + def block_size(self): + """ + How much bytes to process at a time from the input stream. + + This will determine multi-threading granularity as well as the size of + individual chunks in the Table. + """ + return self.options.block_size + + @block_size.setter + def block_size(self, value): + self.options.block_size = value + + def __reduce__(self): + return ReadOptions, ( + self.use_threads, + self.block_size + ) + + def equals(self, ReadOptions other): + """ + Parameters + ---------- + other : pyarrow.json.ReadOptions + + Returns + ------- + bool + """ + return ( + self.use_threads == other.use_threads and + self.block_size == other.block_size + ) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + @staticmethod + cdef ReadOptions wrap(CJSONReadOptions options): + out = ReadOptions() + out.options = options # shallow copy + return out + + +cdef class ParseOptions(_Weakrefable): + """ + Options for parsing JSON files. + + Parameters + ---------- + explicit_schema : Schema, optional (default None) + Optional explicit schema (no type inference, ignores other fields). + newlines_in_values : bool, optional (default False) + Whether objects may be printed across multiple lines (for example + pretty printed). If false, input must end with an empty line. + unexpected_field_behavior : str, default "infer" + How JSON fields outside of explicit_schema (if given) are treated. + + Possible behaviors: + + - "ignore": unexpected JSON fields are ignored + - "error": error out on unexpected JSON fields + - "infer": unexpected JSON fields are type-inferred and included in + the output + """ + + __slots__ = () + + def __init__(self, explicit_schema=None, newlines_in_values=None, + unexpected_field_behavior=None): + self.options = CJSONParseOptions.Defaults() + if explicit_schema is not None: + self.explicit_schema = explicit_schema + if newlines_in_values is not None: + self.newlines_in_values = newlines_in_values + if unexpected_field_behavior is not None: + self.unexpected_field_behavior = unexpected_field_behavior + + def __reduce__(self): + return ParseOptions, ( + self.explicit_schema, + self.newlines_in_values, + self.unexpected_field_behavior + ) + + @property + def explicit_schema(self): + """ + Optional explicit schema (no type inference, ignores other fields) + """ + if self.options.explicit_schema.get() == NULL: + return None + else: + return pyarrow_wrap_schema(self.options.explicit_schema) + + @explicit_schema.setter + def explicit_schema(self, value): + self.options.explicit_schema = pyarrow_unwrap_schema(value) + + @property + def newlines_in_values(self): + """ + Whether newline characters are allowed in JSON values. + Setting this to True reduces the performance of multi-threaded + JSON reading. + """ + return self.options.newlines_in_values + + @newlines_in_values.setter + def newlines_in_values(self, value): + self.options.newlines_in_values = value + + @property + def unexpected_field_behavior(self): + """ + How JSON fields outside of explicit_schema (if given) are treated. + + Possible behaviors: + + - "ignore": unexpected JSON fields are ignored + - "error": error out on unexpected JSON fields + - "infer": unexpected JSON fields are type-inferred and included in + the output + + Set to "infer" by default. + """ + v = self.options.unexpected_field_behavior + if v == CUnexpectedFieldBehavior_Ignore: + return "ignore" + elif v == CUnexpectedFieldBehavior_Error: + return "error" + elif v == CUnexpectedFieldBehavior_InferType: + return "infer" + else: + raise ValueError('Unexpected value for unexpected_field_behavior') + + @unexpected_field_behavior.setter + def unexpected_field_behavior(self, value): + cdef CUnexpectedFieldBehavior v + + if value == "ignore": + v = CUnexpectedFieldBehavior_Ignore + elif value == "error": + v = CUnexpectedFieldBehavior_Error + elif value == "infer": + v = CUnexpectedFieldBehavior_InferType + else: + raise ValueError( + "Unexpected value `{}` for `unexpected_field_behavior`, pass " + "either `ignore`, `error` or `infer`.".format(value) + ) + + self.options.unexpected_field_behavior = v + + def equals(self, ParseOptions other): + """ + Parameters + ---------- + other : pyarrow.json.ParseOptions + + Returns + ------- + bool + """ + return ( + self.explicit_schema == other.explicit_schema and + self.newlines_in_values == other.newlines_in_values and + self.unexpected_field_behavior == other.unexpected_field_behavior + ) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + @staticmethod + cdef ParseOptions wrap(CJSONParseOptions options): + out = ParseOptions() + out.options = options # shallow copy + return out + + +cdef _get_reader(input_file, shared_ptr[CInputStream]* out): + use_memory_map = False + get_input_stream(input_file, use_memory_map, out) + +cdef _get_read_options(ReadOptions read_options, CJSONReadOptions* out): + if read_options is None: + out[0] = CJSONReadOptions.Defaults() + else: + out[0] = read_options.options + +cdef _get_parse_options(ParseOptions parse_options, CJSONParseOptions* out): + if parse_options is None: + out[0] = CJSONParseOptions.Defaults() + else: + out[0] = parse_options.options + + +def read_json(input_file, read_options=None, parse_options=None, + MemoryPool memory_pool=None): + """ + Read a Table from a stream of JSON data. + + Parameters + ---------- + input_file : str, path or file-like object + The location of JSON data. Currently only the line-delimited JSON + format is supported. + read_options : pyarrow.json.ReadOptions, optional + Options for the JSON reader (see ReadOptions constructor for defaults). + parse_options : pyarrow.json.ParseOptions, optional + Options for the JSON parser + (see ParseOptions constructor for defaults). + memory_pool : MemoryPool, optional + Pool to allocate Table memory from. + + Returns + ------- + :class:`pyarrow.Table` + Contents of the JSON file as a in-memory table. + """ + cdef: + shared_ptr[CInputStream] stream + CJSONReadOptions c_read_options + CJSONParseOptions c_parse_options + shared_ptr[CJSONReader] reader + shared_ptr[CTable] table + + _get_reader(input_file, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + + reader = GetResultValue( + CJSONReader.Make(maybe_unbox_memory_pool(memory_pool), + stream, c_read_options, c_parse_options)) + + with nogil: + table = GetResultValue(reader.get().Read()) + + return pyarrow_wrap_table(table) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.pxd new file mode 100644 index 0000000000000000000000000000000000000000..aecbba317aecd1b331261ca600058e30e0c4f184 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_orc.pxd @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from libcpp cimport bool as c_bool +from libc.string cimport const_char +from libcpp.vector cimport vector as std_vector +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport (CArray, CSchema, CStatus, + CResult, CTable, CMemoryPool, + CKeyValueMetadata, + CRecordBatch, + CTable, CCompressionType, + CRandomAccessFile, COutputStream, + TimeUnit) + +cdef extern from "arrow/adapters/orc/options.h" \ + namespace "arrow::adapters::orc" nogil: + cdef enum CompressionStrategy \ + " arrow::adapters::orc::CompressionStrategy": + _CompressionStrategy_SPEED \ + " arrow::adapters::orc::CompressionStrategy::kSpeed" + _CompressionStrategy_COMPRESSION \ + " arrow::adapters::orc::CompressionStrategy::kCompression" + + cdef enum WriterId" arrow::adapters::orc::WriterId": + _WriterId_ORC_JAVA_WRITER" arrow::adapters::orc::WriterId::kOrcJava" + _WriterId_ORC_CPP_WRITER" arrow::adapters::orc::WriterId::kOrcCpp" + _WriterId_PRESTO_WRITER" arrow::adapters::orc::WriterId::kPresto" + _WriterId_SCRITCHLEY_GO \ + " arrow::adapters::orc::WriterId::kScritchleyGo" + _WriterId_TRINO_WRITER" arrow::adapters::orc::WriterId::kTrino" + _WriterId_UNKNOWN_WRITER" arrow::adapters::orc::WriterId::kUnknown" + + cdef enum WriterVersion" arrow::adapters::orc::WriterVersion": + _WriterVersion_ORIGINAL \ + " arrow::adapters::orc::WriterVersion::kOriginal" + _WriterVersion_HIVE_8732 \ + " arrow::adapters::orc::WriterVersion::kHive8732" + _WriterVersion_HIVE_4243 \ + " arrow::adapters::orc::WriterVersion::kHive4243" + _WriterVersion_HIVE_12055 \ + " arrow::adapters::orc::WriterVersion::kHive12055" + _WriterVersion_HIVE_13083 \ + " arrow::adapters::orc::WriterVersion::kHive13083" + _WriterVersion_ORC_101" arrow::adapters::orc::WriterVersion::kOrc101" + _WriterVersion_ORC_135" arrow::adapters::orc::WriterVersion::kOrc135" + _WriterVersion_ORC_517" arrow::adapters::orc::WriterVersion::kOrc517" + _WriterVersion_ORC_203" arrow::adapters::orc::WriterVersion::kOrc203" + _WriterVersion_ORC_14" arrow::adapters::orc::WriterVersion::kOrc14" + _WriterVersion_MAX" arrow::adapters::orc::WriterVersion::kMax" + + cdef cppclass FileVersion" arrow::adapters::orc::FileVersion": + FileVersion(uint32_t major_version, uint32_t minor_version) + uint32_t major_version() + uint32_t minor_version() + c_string ToString() + + cdef struct WriteOptions" arrow::adapters::orc::WriteOptions": + int64_t batch_size + FileVersion file_version + int64_t stripe_size + CCompressionType compression + int64_t compression_block_size + CompressionStrategy compression_strategy + int64_t row_index_stride + double padding_tolerance + double dictionary_key_size_threshold + std_vector[int64_t] bloom_filter_columns + double bloom_filter_fpp + + +cdef extern from "arrow/adapters/orc/adapter.h" \ + namespace "arrow::adapters::orc" nogil: + + cdef cppclass ORCFileReader: + @staticmethod + CResult[unique_ptr[ORCFileReader]] Open( + const shared_ptr[CRandomAccessFile]& file, + CMemoryPool* pool) + + CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata() + + CResult[shared_ptr[CSchema]] ReadSchema() + + CResult[shared_ptr[CRecordBatch]] ReadStripe(int64_t stripe) + CResult[shared_ptr[CRecordBatch]] ReadStripe( + int64_t stripe, std_vector[c_string]) + + CResult[shared_ptr[CTable]] Read() + CResult[shared_ptr[CTable]] Read(std_vector[c_string]) + + int64_t NumberOfStripes() + int64_t NumberOfRows() + FileVersion GetFileVersion() + c_string GetSoftwareVersion() + CResult[CCompressionType] GetCompression() + int64_t GetCompressionSize() + int64_t GetRowIndexStride() + WriterId GetWriterId() + int32_t GetWriterIdValue() + WriterVersion GetWriterVersion() + int64_t GetNumberOfStripeStatistics() + int64_t GetContentLength() + int64_t GetStripeStatisticsLength() + int64_t GetFileFooterLength() + int64_t GetFilePostscriptLength() + int64_t GetFileLength() + c_string GetSerializedFileTail() + + cdef cppclass ORCFileWriter: + @staticmethod + CResult[unique_ptr[ORCFileWriter]] Open( + COutputStream* output_stream, const WriteOptions& writer_options) + + CStatus Write(const CTable& table) + + CStatus Close() diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.pyx new file mode 100644 index 0000000000000000000000000000000000000000..7bc68a288aa780342cdf31ddb8fc05511e06e503 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet.pyx @@ -0,0 +1,2205 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from collections.abc import Sequence +from textwrap import indent +import warnings + +from cython.operator cimport dereference as deref +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.lib cimport (_Weakrefable, Buffer, Schema, + check_status, + MemoryPool, maybe_unbox_memory_pool, + Table, NativeFile, + pyarrow_wrap_chunked_array, + pyarrow_wrap_schema, + pyarrow_unwrap_schema, + pyarrow_wrap_table, + pyarrow_wrap_batch, + pyarrow_wrap_scalar, + NativeFile, get_reader, get_writer, + string_to_timeunit) + +from pyarrow.lib import (ArrowException, NativeFile, BufferOutputStream, + _stringify_path, + tobytes, frombytes) + +cimport cpython as cp + +_DEFAULT_ROW_GROUP_SIZE = 1024*1024 +_MAX_ROW_GROUP_SIZE = 64*1024*1024 + +cdef class Statistics(_Weakrefable): + """Statistics for a single column in a single row group.""" + + def __cinit__(self): + pass + + def __repr__(self): + return """{} + has_min_max: {} + min: {} + max: {} + null_count: {} + distinct_count: {} + num_values: {} + physical_type: {} + logical_type: {} + converted_type (legacy): {}""".format(object.__repr__(self), + self.has_min_max, + self.min, + self.max, + self.null_count, + self.distinct_count, + self.num_values, + self.physical_type, + str(self.logical_type), + self.converted_type) + + def to_dict(self): + """ + Get dictionary representation of statistics. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + d = dict( + has_min_max=self.has_min_max, + min=self.min, + max=self.max, + null_count=self.null_count, + distinct_count=self.distinct_count, + num_values=self.num_values, + physical_type=self.physical_type + ) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, Statistics other): + """ + Return whether the two column statistics objects are equal. + + Parameters + ---------- + other : Statistics + Statistics to compare against. + + Returns + ------- + are_equal : bool + """ + return self.statistics.get().Equals(deref(other.statistics.get())) + + @property + def has_min_max(self): + """Whether min and max are present (bool).""" + return self.statistics.get().HasMinMax() + + @property + def has_null_count(self): + """Whether null count is present (bool).""" + return self.statistics.get().HasNullCount() + + @property + def has_distinct_count(self): + """Whether distinct count is preset (bool).""" + return self.statistics.get().HasDistinctCount() + + @property + def min_raw(self): + """Min value as physical type (bool, int, float, or bytes).""" + if self.has_min_max: + return _cast_statistic_raw_min(self.statistics.get()) + else: + return None + + @property + def max_raw(self): + """Max value as physical type (bool, int, float, or bytes).""" + if self.has_min_max: + return _cast_statistic_raw_max(self.statistics.get()) + else: + return None + + @property + def min(self): + """ + Min value as logical type. + + Returned as the Python equivalent of logical type, such as datetime.date + for dates and decimal.Decimal for decimals. + """ + if self.has_min_max: + min_scalar, _ = _cast_statistics(self.statistics.get()) + return min_scalar.as_py() + else: + return None + + @property + def max(self): + """ + Max value as logical type. + + Returned as the Python equivalent of logical type, such as datetime.date + for dates and decimal.Decimal for decimals. + """ + if self.has_min_max: + _, max_scalar = _cast_statistics(self.statistics.get()) + return max_scalar.as_py() + else: + return None + + @property + def null_count(self): + """Number of null values in chunk (int).""" + if self.has_null_count: + return self.statistics.get().null_count() + else: + return None + + @property + def distinct_count(self): + """Distinct number of values in chunk (int).""" + if self.has_distinct_count: + return self.statistics.get().distinct_count() + else: + return None + + @property + def num_values(self): + """Number of non-null values (int).""" + return self.statistics.get().num_values() + + @property + def physical_type(self): + """Physical type of column (str).""" + raw_physical_type = self.statistics.get().physical_type() + return physical_type_name_from_enum(raw_physical_type) + + @property + def logical_type(self): + """Logical type of column (:class:`ParquetLogicalType`).""" + return wrap_logical_type(self.statistics.get().descr().logical_type()) + + @property + def converted_type(self): + """Legacy converted type (str or None).""" + raw_converted_type = self.statistics.get().descr().converted_type() + return converted_type_name_from_enum(raw_converted_type) + + +cdef class ParquetLogicalType(_Weakrefable): + """Logical type of parquet type.""" + cdef: + shared_ptr[const CParquetLogicalType] type + + def __cinit__(self): + pass + + cdef init(self, const shared_ptr[const CParquetLogicalType]& type): + self.type = type + + def __repr__(self): + return "{}\n {}".format(object.__repr__(self), str(self)) + + def __str__(self): + return frombytes(self.type.get().ToString(), safe=True) + + def to_json(self): + """ + Get a JSON string containing type and type parameters. + + Returns + ------- + json : str + JSON representation of type, with at least a field called 'Type' + which contains the type name. If the type is parameterized, such + as a decimal with scale and precision, will contain those as fields + as well. + """ + return frombytes(self.type.get().ToJSON()) + + @property + def type(self): + """Name of the logical type (str).""" + return logical_type_name_from_enum(self.type.get().type()) + + +cdef wrap_logical_type(const shared_ptr[const CParquetLogicalType]& type): + cdef ParquetLogicalType out = ParquetLogicalType() + out.init(type) + return out + + +cdef _cast_statistic_raw_min(CStatistics* statistics): + cdef ParquetType physical_type = statistics.physical_type() + cdef uint32_t type_length = statistics.descr().type_length() + if physical_type == ParquetType_BOOLEAN: + return ( statistics).min() + elif physical_type == ParquetType_INT32: + return ( statistics).min() + elif physical_type == ParquetType_INT64: + return ( statistics).min() + elif physical_type == ParquetType_FLOAT: + return ( statistics).min() + elif physical_type == ParquetType_DOUBLE: + return ( statistics).min() + elif physical_type == ParquetType_BYTE_ARRAY: + return _box_byte_array(( statistics).min()) + elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY: + return _box_flba(( statistics).min(), type_length) + + +cdef _cast_statistic_raw_max(CStatistics* statistics): + cdef ParquetType physical_type = statistics.physical_type() + cdef uint32_t type_length = statistics.descr().type_length() + if physical_type == ParquetType_BOOLEAN: + return ( statistics).max() + elif physical_type == ParquetType_INT32: + return ( statistics).max() + elif physical_type == ParquetType_INT64: + return ( statistics).max() + elif physical_type == ParquetType_FLOAT: + return ( statistics).max() + elif physical_type == ParquetType_DOUBLE: + return ( statistics).max() + elif physical_type == ParquetType_BYTE_ARRAY: + return _box_byte_array(( statistics).max()) + elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY: + return _box_flba(( statistics).max(), type_length) + + +cdef _cast_statistics(CStatistics* statistics): + cdef: + shared_ptr[CScalar] c_min + shared_ptr[CScalar] c_max + check_status(StatisticsAsScalars(statistics[0], &c_min, &c_max)) + return (pyarrow_wrap_scalar(c_min), pyarrow_wrap_scalar(c_max)) + + +cdef _box_byte_array(ParquetByteArray val): + return cp.PyBytes_FromStringAndSize( val.ptr, val.len) + + +cdef _box_flba(ParquetFLBA val, uint32_t len): + return cp.PyBytes_FromStringAndSize( val.ptr, len) + + +cdef class ColumnChunkMetaData(_Weakrefable): + """Column metadata for a single row group.""" + + def __cinit__(self): + pass + + def __repr__(self): + statistics = indent(repr(self.statistics), 4 * ' ') + return """{0} + file_offset: {1} + file_path: {2} + physical_type: {3} + num_values: {4} + path_in_schema: {5} + is_stats_set: {6} + statistics: +{7} + compression: {8} + encodings: {9} + has_dictionary_page: {10} + dictionary_page_offset: {11} + data_page_offset: {12} + total_compressed_size: {13} + total_uncompressed_size: {14}""".format(object.__repr__(self), + self.file_offset, + self.file_path, + self.physical_type, + self.num_values, + self.path_in_schema, + self.is_stats_set, + statistics, + self.compression, + self.encodings, + self.has_dictionary_page, + self.dictionary_page_offset, + self.data_page_offset, + self.total_compressed_size, + self.total_uncompressed_size) + + def to_dict(self): + """ + Get dictionary representation of the column chunk metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + statistics = self.statistics.to_dict() if self.is_stats_set else None + d = dict( + file_offset=self.file_offset, + file_path=self.file_path, + physical_type=self.physical_type, + num_values=self.num_values, + path_in_schema=self.path_in_schema, + is_stats_set=self.is_stats_set, + statistics=statistics, + compression=self.compression, + encodings=self.encodings, + has_dictionary_page=self.has_dictionary_page, + dictionary_page_offset=self.dictionary_page_offset, + data_page_offset=self.data_page_offset, + total_compressed_size=self.total_compressed_size, + total_uncompressed_size=self.total_uncompressed_size + ) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, ColumnChunkMetaData other): + """ + Return whether the two column chunk metadata objects are equal. + + Parameters + ---------- + other : ColumnChunkMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self.metadata.Equals(deref(other.metadata)) + + @property + def file_offset(self): + """Offset into file where column chunk is located (int).""" + return self.metadata.file_offset() + + @property + def file_path(self): + """Optional file path if set (str or None).""" + return frombytes(self.metadata.file_path()) + + @property + def physical_type(self): + """Physical type of column (str).""" + return physical_type_name_from_enum(self.metadata.type()) + + @property + def num_values(self): + """Total number of values (int).""" + return self.metadata.num_values() + + @property + def path_in_schema(self): + """Nested path to field, separated by periods (str).""" + path = self.metadata.path_in_schema().get().ToDotString() + return frombytes(path) + + @property + def is_stats_set(self): + """Whether or not statistics are present in metadata (bool).""" + return self.metadata.is_stats_set() + + @property + def statistics(self): + """Statistics for column chunk (:class:`Statistics`).""" + if not self.metadata.is_stats_set(): + return None + statistics = Statistics() + statistics.init(self.metadata.statistics(), self) + return statistics + + @property + def compression(self): + """ + Type of compression used for column (str). + + One of 'UNCOMPRESSED', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD', + or 'UNKNOWN'. + """ + return compression_name_from_enum(self.metadata.compression()) + + @property + def encodings(self): + """ + Encodings used for column (tuple of str). + + One of 'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', 'DELTA_BINARY_PACKED', + 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'. + """ + return tuple(map(encoding_name_from_enum, self.metadata.encodings())) + + @property + def has_dictionary_page(self): + """Whether there is dictionary data present in the column chunk (bool).""" + return bool(self.metadata.has_dictionary_page()) + + @property + def dictionary_page_offset(self): + """Offset of dictionary page relative to column chunk offset (int).""" + if self.has_dictionary_page: + return self.metadata.dictionary_page_offset() + else: + return None + + @property + def data_page_offset(self): + """Offset of data page relative to column chunk offset (int).""" + return self.metadata.data_page_offset() + + @property + def has_index_page(self): + """Not yet supported.""" + raise NotImplementedError('not supported in parquet-cpp') + + @property + def index_page_offset(self): + """Not yet supported.""" + raise NotImplementedError("parquet-cpp doesn't return valid values") + + @property + def total_compressed_size(self): + """Compressed size in bytes (int).""" + return self.metadata.total_compressed_size() + + @property + def total_uncompressed_size(self): + """Uncompressed size in bytes (int).""" + return self.metadata.total_uncompressed_size() + + @property + def has_offset_index(self): + """Whether the column chunk has an offset index""" + return self.metadata.GetOffsetIndexLocation().has_value() + + @property + def has_column_index(self): + """Whether the column chunk has a column index""" + return self.metadata.GetColumnIndexLocation().has_value() + + +cdef class SortingColumn: + """ + Sorting specification for a single column. + + Returned by :meth:`RowGroupMetaData.sorting_columns` and used in + :class:`ParquetWriter` to specify the sort order of the data. + + Parameters + ---------- + column_index : int + Index of column that data is sorted by. + descending : bool, default False + Whether column is sorted in descending order. + nulls_first : bool, default False + Whether null values appear before valid values. + + Notes + ----- + + Column indices are zero-based, refer only to leaf fields, and are in + depth-first order. This may make the column indices for nested schemas + different from what you expect. In most cases, it will be easier to + specify the sort order using column names instead of column indices + and converting using the ``from_ordering`` method. + + Examples + -------- + + In other APIs, sort order is specified by names, such as: + + >>> sort_order = [('id', 'ascending'), ('timestamp', 'descending')] + + For Parquet, the column index must be used instead: + + >>> import pyarrow.parquet as pq + >>> [pq.SortingColumn(0), pq.SortingColumn(1, descending=True)] + [SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)] + + Convert the sort_order into the list of sorting columns with + ``from_ordering`` (note that the schema must be provided as well): + + >>> import pyarrow as pa + >>> schema = pa.schema([('id', pa.int64()), ('timestamp', pa.timestamp('ms'))]) + >>> sorting_columns = pq.SortingColumn.from_ordering(schema, sort_order) + >>> sorting_columns + (SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)) + + Convert back to the sort order with ``to_ordering``: + + >>> pq.SortingColumn.to_ordering(schema, sorting_columns) + ((('id', 'ascending'), ('timestamp', 'descending')), 'at_end') + + See Also + -------- + RowGroupMetaData.sorting_columns + """ + cdef int column_index + cdef c_bool descending + cdef c_bool nulls_first + + def __init__(self, int column_index, c_bool descending=False, c_bool nulls_first=False): + self.column_index = column_index + self.descending = descending + self.nulls_first = nulls_first + + @classmethod + def from_ordering(cls, Schema schema, sort_keys, null_placement='at_end'): + """ + Create a tuple of SortingColumn objects from the same arguments as + :class:`pyarrow.compute.SortOptions`. + + Parameters + ---------- + schema : Schema + Schema of the input data. + sort_keys : Sequence of (name, order) tuples + Names of field/column keys (str) to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + null_placement : {'at_start', 'at_end'}, default 'at_end' + Where null values should appear in the sort order. + + Returns + ------- + sorting_columns : tuple of SortingColumn + """ + if null_placement == 'at_start': + nulls_first = True + elif null_placement == 'at_end': + nulls_first = False + else: + raise ValueError('null_placement must be "at_start" or "at_end"') + + col_map = _name_to_index_map(schema) + + sorting_columns = [] + + for sort_key in sort_keys: + if isinstance(sort_key, str): + name = sort_key + descending = False + elif (isinstance(sort_key, tuple) and len(sort_key) == 2 and + isinstance(sort_key[0], str) and + isinstance(sort_key[1], str)): + name, descending = sort_key + if descending == "descending": + descending = True + elif descending == "ascending": + descending = False + else: + raise ValueError("Invalid sort key direction: {0}" + .format(descending)) + else: + raise ValueError("Invalid sort key: {0}".format(sort_key)) + + try: + column_index = col_map[name] + except KeyError: + raise ValueError("Sort key name '{0}' not found in schema:\n{1}" + .format(name, schema)) + + sorting_columns.append( + cls(column_index, descending=descending, nulls_first=nulls_first) + ) + + return tuple(sorting_columns) + + @staticmethod + def to_ordering(Schema schema, sorting_columns): + """ + Convert a tuple of SortingColumn objects to the same format as + :class:`pyarrow.compute.SortOptions`. + + Parameters + ---------- + schema : Schema + Schema of the input data. + sorting_columns : tuple of SortingColumn + Columns to sort the input on. + + Returns + ------- + sort_keys : tuple of (name, order) tuples + null_placement : {'at_start', 'at_end'} + """ + col_map = {i: name for name, i in _name_to_index_map(schema).items()} + + sort_keys = [] + nulls_first = None + + for sorting_column in sorting_columns: + name = col_map[sorting_column.column_index] + if sorting_column.descending: + order = "descending" + else: + order = "ascending" + sort_keys.append((name, order)) + if nulls_first is None: + nulls_first = sorting_column.nulls_first + elif nulls_first != sorting_column.nulls_first: + raise ValueError("Sorting columns have inconsistent null placement") + + if nulls_first: + null_placement = "at_start" + else: + null_placement = "at_end" + + return tuple(sort_keys), null_placement + + def __repr__(self): + return """{}(column_index={}, descending={}, nulls_first={})""".format( + self.__class__.__name__, + self.column_index, self.descending, self.nulls_first) + + def __eq__(self, SortingColumn other): + return (self.column_index == other.column_index and + self.descending == other.descending and + self.nulls_first == other.nulls_first) + + def __hash__(self): + return hash((self.column_index, self.descending, self.nulls_first)) + + @property + def column_index(self): + """"Index of column data is sorted by (int).""" + return self.column_index + + @property + def descending(self): + """Whether column is sorted in descending order (bool).""" + return self.descending + + @property + def nulls_first(self): + """Whether null values appear before valid values (bool).""" + return self.nulls_first + + +cdef class RowGroupMetaData(_Weakrefable): + """Metadata for a single row group.""" + + def __cinit__(self, FileMetaData parent, int index): + if index < 0 or index >= parent.num_row_groups: + raise IndexError('{0} out of bounds'.format(index)) + self.up_metadata = parent._metadata.RowGroup(index) + self.metadata = self.up_metadata.get() + self.parent = parent + self.index = index + + def __reduce__(self): + return RowGroupMetaData, (self.parent, self.index) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, RowGroupMetaData other): + """ + Return whether the two row group metadata objects are equal. + + Parameters + ---------- + other : RowGroupMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self.metadata.Equals(deref(other.metadata)) + + def column(self, int i): + """ + Get column metadata at given index. + + Parameters + ---------- + i : int + Index of column to get metadata for. + + Returns + ------- + ColumnChunkMetaData + Metadata for column within this chunk. + """ + if i < 0 or i >= self.num_columns: + raise IndexError('{0} out of bounds'.format(i)) + chunk = ColumnChunkMetaData() + chunk.init(self, i) + return chunk + + def __repr__(self): + return """{0} + num_columns: {1} + num_rows: {2} + total_byte_size: {3} + sorting_columns: {4}""".format(object.__repr__(self), + self.num_columns, + self.num_rows, + self.total_byte_size, + self.sorting_columns) + + def to_dict(self): + """ + Get dictionary representation of the row group metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + columns = [] + d = dict( + num_columns=self.num_columns, + num_rows=self.num_rows, + total_byte_size=self.total_byte_size, + columns=columns, + sorting_columns=[col.to_dict() for col in self.sorting_columns] + ) + for i in range(self.num_columns): + columns.append(self.column(i).to_dict()) + return d + + @property + def num_columns(self): + """Number of columns in this row group (int).""" + return self.metadata.num_columns() + + @property + def num_rows(self): + """Number of rows in this row group (int).""" + return self.metadata.num_rows() + + @property + def total_byte_size(self): + """Total byte size of all the uncompressed column data in this row group (int).""" + return self.metadata.total_byte_size() + + @property + def sorting_columns(self): + """Columns the row group is sorted by (tuple of :class:`SortingColumn`)).""" + out = [] + cdef vector[CSortingColumn] sorting_columns = self.metadata.sorting_columns() + for sorting_col in sorting_columns: + out.append(SortingColumn( + sorting_col.column_idx, + sorting_col.descending, + sorting_col.nulls_first + )) + return tuple(out) + + +def _reconstruct_filemetadata(Buffer serialized): + cdef: + FileMetaData metadata = FileMetaData.__new__(FileMetaData) + CBuffer *buffer = serialized.buffer.get() + uint32_t metadata_len = buffer.size() + + metadata.init(CFileMetaData_Make(buffer.data(), &metadata_len)) + + return metadata + + +cdef class FileMetaData(_Weakrefable): + """Parquet metadata for a single file.""" + + def __cinit__(self): + pass + + def __reduce__(self): + cdef: + NativeFile sink = BufferOutputStream() + COutputStream* c_sink = sink.get_output_stream().get() + with nogil: + self._metadata.WriteTo(c_sink) + + cdef Buffer buffer = sink.getvalue() + return _reconstruct_filemetadata, (buffer,) + + def __hash__(self): + return hash((self.schema, + self.num_rows, + self.num_row_groups, + self.format_version, + self.serialized_size)) + + def __repr__(self): + return """{0} + created_by: {1} + num_columns: {2} + num_rows: {3} + num_row_groups: {4} + format_version: {5} + serialized_size: {6}""".format(object.__repr__(self), + self.created_by, self.num_columns, + self.num_rows, self.num_row_groups, + self.format_version, + self.serialized_size) + + def to_dict(self): + """ + Get dictionary representation of the file metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + row_groups = [] + d = dict( + created_by=self.created_by, + num_columns=self.num_columns, + num_rows=self.num_rows, + num_row_groups=self.num_row_groups, + row_groups=row_groups, + format_version=self.format_version, + serialized_size=self.serialized_size + ) + for i in range(self.num_row_groups): + row_groups.append(self.row_group(i).to_dict()) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, FileMetaData other not None): + """ + Return whether the two file metadata objects are equal. + + Parameters + ---------- + other : FileMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self._metadata.Equals(deref(other._metadata)) + + @property + def schema(self): + """Schema of the file (:class:`ParquetSchema`).""" + if self._schema is None: + self._schema = ParquetSchema(self) + return self._schema + + @property + def serialized_size(self): + """Size of the original thrift encoded metadata footer (int).""" + return self._metadata.size() + + @property + def num_columns(self): + """Number of columns in file (int).""" + return self._metadata.num_columns() + + @property + def num_rows(self): + """Total number of rows in file (int).""" + return self._metadata.num_rows() + + @property + def num_row_groups(self): + """Number of row groups in file (int).""" + return self._metadata.num_row_groups() + + @property + def format_version(self): + """ + Parquet format version used in file (str, such as '1.0', '2.4'). + + If version is missing or unparsable, will default to assuming '2.6'. + """ + cdef ParquetVersion version = self._metadata.version() + if version == ParquetVersion_V1: + return '1.0' + elif version == ParquetVersion_V2_0: + return 'pseudo-2.0' + elif version == ParquetVersion_V2_4: + return '2.4' + elif version == ParquetVersion_V2_6: + return '2.6' + else: + warnings.warn('Unrecognized file version, assuming 2.6: {}' + .format(version)) + return '2.6' + + @property + def created_by(self): + """ + String describing source of the parquet file (str). + + This typically includes library name and version number. For example, Arrow 7.0's + writer returns 'parquet-cpp-arrow version 7.0.0'. + """ + return frombytes(self._metadata.created_by()) + + @property + def metadata(self): + """Additional metadata as key value pairs (dict[bytes, bytes]).""" + cdef: + unordered_map[c_string, c_string] metadata + const CKeyValueMetadata* underlying_metadata + underlying_metadata = self._metadata.key_value_metadata().get() + if underlying_metadata != NULL: + underlying_metadata.ToUnorderedMap(&metadata) + return metadata + else: + return None + + def row_group(self, int i): + """ + Get metadata for row group at index i. + + Parameters + ---------- + i : int + Row group index to get. + + Returns + ------- + row_group_metadata : RowGroupMetaData + """ + return RowGroupMetaData(self, i) + + def set_file_path(self, path): + """ + Set ColumnChunk file paths to the given value. + + This method modifies the ``file_path`` field of each ColumnChunk + in the FileMetaData to be a particular value. + + Parameters + ---------- + path : str + The file path to set on all ColumnChunks. + """ + cdef: + c_string c_path = tobytes(path) + self._metadata.set_file_path(c_path) + + def append_row_groups(self, FileMetaData other): + """ + Append row groups from other FileMetaData object. + + Parameters + ---------- + other : FileMetaData + Other metadata to append row groups from. + """ + cdef shared_ptr[CFileMetaData] c_metadata + + c_metadata = other.sp_metadata + self._metadata.AppendRowGroups(deref(c_metadata)) + + def write_metadata_file(self, where): + """ + Write the metadata to a metadata-only Parquet file. + + Parameters + ---------- + where : path or file-like object + Where to write the metadata. Should be a writable path on + the local filesystem, or a writable file-like object. + """ + cdef: + shared_ptr[COutputStream] sink + c_string c_where + + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &sink) + else: + c_where = tobytes(where) + with nogil: + sink = GetResultValue(FileOutputStream.Open(c_where)) + + with nogil: + check_status( + WriteMetaDataFile(deref(self._metadata), sink.get())) + + +cdef class ParquetSchema(_Weakrefable): + """A Parquet schema.""" + + def __cinit__(self, FileMetaData container): + self.parent = container + self.schema = container._metadata.schema() + + def __repr__(self): + return "{0}\n{1}".format( + object.__repr__(self), + frombytes(self.schema.ToString(), safe=True)) + + def __reduce__(self): + return ParquetSchema, (self.parent,) + + def __len__(self): + return self.schema.num_columns() + + def __getitem__(self, i): + return self.column(i) + + def __hash__(self): + return hash(self.schema.ToString()) + + @property + def names(self): + """Name of each field (list of str).""" + return [self[i].name for i in range(len(self))] + + def to_arrow_schema(self): + """ + Convert Parquet schema to effective Arrow schema. + + Returns + ------- + schema : Schema + """ + cdef shared_ptr[CSchema] sp_arrow_schema + + with nogil: + check_status(FromParquetSchema( + self.schema, default_arrow_reader_properties(), + self.parent._metadata.key_value_metadata(), + &sp_arrow_schema)) + + return pyarrow_wrap_schema(sp_arrow_schema) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, ParquetSchema other): + """ + Return whether the two schemas are equal. + + Parameters + ---------- + other : ParquetSchema + Schema to compare against. + + Returns + ------- + are_equal : bool + """ + return self.schema.Equals(deref(other.schema)) + + def column(self, i): + """ + Return the schema for a single column. + + Parameters + ---------- + i : int + Index of column in schema. + + Returns + ------- + column_schema : ColumnSchema + """ + if i < 0 or i >= len(self): + raise IndexError('{0} out of bounds'.format(i)) + + return ColumnSchema(self, i) + + +cdef class ColumnSchema(_Weakrefable): + """Schema for a single column.""" + cdef: + int index + ParquetSchema parent + const ColumnDescriptor* descr + + def __cinit__(self, ParquetSchema schema, int index): + self.parent = schema + self.index = index # for pickling support + self.descr = schema.schema.Column(index) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __reduce__(self): + return ColumnSchema, (self.parent, self.index) + + def equals(self, ColumnSchema other): + """ + Return whether the two column schemas are equal. + + Parameters + ---------- + other : ColumnSchema + Schema to compare against. + + Returns + ------- + are_equal : bool + """ + return self.descr.Equals(deref(other.descr)) + + def __repr__(self): + physical_type = self.physical_type + converted_type = self.converted_type + if converted_type == 'DECIMAL': + converted_type = 'DECIMAL({0}, {1})'.format(self.precision, + self.scale) + elif physical_type == 'FIXED_LEN_BYTE_ARRAY': + converted_type = ('FIXED_LEN_BYTE_ARRAY(length={0})' + .format(self.length)) + + return """ + name: {0} + path: {1} + max_definition_level: {2} + max_repetition_level: {3} + physical_type: {4} + logical_type: {5} + converted_type (legacy): {6}""".format(self.name, self.path, + self.max_definition_level, + self.max_repetition_level, + physical_type, + str(self.logical_type), + converted_type) + + @property + def name(self): + """Name of field (str).""" + return frombytes(self.descr.name()) + + @property + def path(self): + """Nested path to field, separated by periods (str).""" + return frombytes(self.descr.path().get().ToDotString()) + + @property + def max_definition_level(self): + """Maximum definition level (int).""" + return self.descr.max_definition_level() + + @property + def max_repetition_level(self): + """Maximum repetition level (int).""" + return self.descr.max_repetition_level() + + @property + def physical_type(self): + """Name of physical type (str).""" + return physical_type_name_from_enum(self.descr.physical_type()) + + @property + def logical_type(self): + """Logical type of column (:class:`ParquetLogicalType`).""" + return wrap_logical_type(self.descr.logical_type()) + + @property + def converted_type(self): + """Legacy converted type (str or None).""" + return converted_type_name_from_enum(self.descr.converted_type()) + + # FIXED_LEN_BYTE_ARRAY attribute + @property + def length(self): + """Array length if fixed length byte array type, None otherwise (int or None).""" + return self.descr.type_length() + + # Decimal attributes + @property + def precision(self): + """Precision if decimal type, None otherwise (int or None).""" + return self.descr.type_precision() + + @property + def scale(self): + """Scale if decimal type, None otherwise (int or None).""" + return self.descr.type_scale() + + +cdef physical_type_name_from_enum(ParquetType type_): + return { + ParquetType_BOOLEAN: 'BOOLEAN', + ParquetType_INT32: 'INT32', + ParquetType_INT64: 'INT64', + ParquetType_INT96: 'INT96', + ParquetType_FLOAT: 'FLOAT', + ParquetType_DOUBLE: 'DOUBLE', + ParquetType_BYTE_ARRAY: 'BYTE_ARRAY', + ParquetType_FIXED_LEN_BYTE_ARRAY: 'FIXED_LEN_BYTE_ARRAY', + }.get(type_, 'UNKNOWN') + + +cdef logical_type_name_from_enum(ParquetLogicalTypeId type_): + return { + ParquetLogicalType_UNDEFINED: 'UNDEFINED', + ParquetLogicalType_STRING: 'STRING', + ParquetLogicalType_MAP: 'MAP', + ParquetLogicalType_LIST: 'LIST', + ParquetLogicalType_ENUM: 'ENUM', + ParquetLogicalType_DECIMAL: 'DECIMAL', + ParquetLogicalType_DATE: 'DATE', + ParquetLogicalType_TIME: 'TIME', + ParquetLogicalType_TIMESTAMP: 'TIMESTAMP', + ParquetLogicalType_INT: 'INT', + ParquetLogicalType_JSON: 'JSON', + ParquetLogicalType_BSON: 'BSON', + ParquetLogicalType_UUID: 'UUID', + ParquetLogicalType_NONE: 'NONE', + }.get(type_, 'UNKNOWN') + + +cdef converted_type_name_from_enum(ParquetConvertedType type_): + return { + ParquetConvertedType_NONE: 'NONE', + ParquetConvertedType_UTF8: 'UTF8', + ParquetConvertedType_MAP: 'MAP', + ParquetConvertedType_MAP_KEY_VALUE: 'MAP_KEY_VALUE', + ParquetConvertedType_LIST: 'LIST', + ParquetConvertedType_ENUM: 'ENUM', + ParquetConvertedType_DECIMAL: 'DECIMAL', + ParquetConvertedType_DATE: 'DATE', + ParquetConvertedType_TIME_MILLIS: 'TIME_MILLIS', + ParquetConvertedType_TIME_MICROS: 'TIME_MICROS', + ParquetConvertedType_TIMESTAMP_MILLIS: 'TIMESTAMP_MILLIS', + ParquetConvertedType_TIMESTAMP_MICROS: 'TIMESTAMP_MICROS', + ParquetConvertedType_UINT_8: 'UINT_8', + ParquetConvertedType_UINT_16: 'UINT_16', + ParquetConvertedType_UINT_32: 'UINT_32', + ParquetConvertedType_UINT_64: 'UINT_64', + ParquetConvertedType_INT_8: 'INT_8', + ParquetConvertedType_INT_16: 'INT_16', + ParquetConvertedType_INT_32: 'INT_32', + ParquetConvertedType_INT_64: 'INT_64', + ParquetConvertedType_JSON: 'JSON', + ParquetConvertedType_BSON: 'BSON', + ParquetConvertedType_INTERVAL: 'INTERVAL', + }.get(type_, 'UNKNOWN') + + +cdef encoding_name_from_enum(ParquetEncoding encoding_): + return { + ParquetEncoding_PLAIN: 'PLAIN', + ParquetEncoding_PLAIN_DICTIONARY: 'PLAIN_DICTIONARY', + ParquetEncoding_RLE: 'RLE', + ParquetEncoding_BIT_PACKED: 'BIT_PACKED', + ParquetEncoding_DELTA_BINARY_PACKED: 'DELTA_BINARY_PACKED', + ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY: 'DELTA_LENGTH_BYTE_ARRAY', + ParquetEncoding_DELTA_BYTE_ARRAY: 'DELTA_BYTE_ARRAY', + ParquetEncoding_RLE_DICTIONARY: 'RLE_DICTIONARY', + ParquetEncoding_BYTE_STREAM_SPLIT: 'BYTE_STREAM_SPLIT', + }.get(encoding_, 'UNKNOWN') + + +cdef encoding_enum_from_name(str encoding_name): + enc = { + 'PLAIN': ParquetEncoding_PLAIN, + 'BIT_PACKED': ParquetEncoding_BIT_PACKED, + 'RLE': ParquetEncoding_RLE, + 'BYTE_STREAM_SPLIT': ParquetEncoding_BYTE_STREAM_SPLIT, + 'DELTA_BINARY_PACKED': ParquetEncoding_DELTA_BINARY_PACKED, + 'DELTA_LENGTH_BYTE_ARRAY': ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY, + 'DELTA_BYTE_ARRAY': ParquetEncoding_DELTA_BYTE_ARRAY, + 'RLE_DICTIONARY': 'dict', + 'PLAIN_DICTIONARY': 'dict', + }.get(encoding_name, None) + if enc is None: + raise ValueError(f"Unsupported column encoding: {encoding_name!r}") + elif enc == 'dict': + raise ValueError(f"{encoding_name!r} is already used by default.") + else: + return enc + + +cdef compression_name_from_enum(ParquetCompression compression_): + return { + ParquetCompression_UNCOMPRESSED: 'UNCOMPRESSED', + ParquetCompression_SNAPPY: 'SNAPPY', + ParquetCompression_GZIP: 'GZIP', + ParquetCompression_LZO: 'LZO', + ParquetCompression_BROTLI: 'BROTLI', + ParquetCompression_LZ4: 'LZ4', + ParquetCompression_ZSTD: 'ZSTD', + }.get(compression_, 'UNKNOWN') + + +cdef int check_compression_name(name) except -1: + if name.upper() not in {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', + 'ZSTD'}: + raise ArrowException("Unsupported compression: " + name) + return 0 + + +cdef ParquetCompression compression_from_name(name): + name = name.upper() + if name == 'SNAPPY': + return ParquetCompression_SNAPPY + elif name == 'GZIP': + return ParquetCompression_GZIP + elif name == 'LZO': + return ParquetCompression_LZO + elif name == 'BROTLI': + return ParquetCompression_BROTLI + elif name == 'LZ4': + return ParquetCompression_LZ4 + elif name == 'ZSTD': + return ParquetCompression_ZSTD + else: + return ParquetCompression_UNCOMPRESSED + + +cdef class ParquetReader(_Weakrefable): + cdef: + object source + CMemoryPool* pool + UniquePtrNoGIL[FileReader] reader + FileMetaData _metadata + shared_ptr[CRandomAccessFile] rd_handle + + cdef public: + _column_idx_map + + def __cinit__(self, MemoryPool memory_pool=None): + self.pool = maybe_unbox_memory_pool(memory_pool) + self._metadata = None + + def open(self, object source not None, *, bint use_memory_map=False, + read_dictionary=None, FileMetaData metadata=None, + int buffer_size=0, bint pre_buffer=False, + coerce_int96_timestamp_unit=None, + FileDecryptionProperties decryption_properties=None, + thrift_string_size_limit=None, + thrift_container_size_limit=None, + page_checksum_verification=False): + """ + Open a parquet file for reading. + + Parameters + ---------- + source : str, pathlib.Path, pyarrow.NativeFile, or file-like object + use_memory_map : bool, default False + read_dictionary : iterable[int or str], optional + metadata : FileMetaData, optional + buffer_size : int, default 0 + pre_buffer : bool, default False + coerce_int96_timestamp_unit : str, optional + decryption_properties : FileDecryptionProperties, optional + thrift_string_size_limit : int, optional + thrift_container_size_limit : int, optional + page_checksum_verification : bool, default False + """ + cdef: + shared_ptr[CFileMetaData] c_metadata + CReaderProperties properties = default_reader_properties() + ArrowReaderProperties arrow_props = ( + default_arrow_reader_properties()) + FileReaderBuilder builder + + if metadata is not None: + c_metadata = metadata.sp_metadata + + if buffer_size > 0: + properties.enable_buffered_stream() + properties.set_buffer_size(buffer_size) + elif buffer_size == 0: + properties.disable_buffered_stream() + else: + raise ValueError('Buffer size must be larger than zero') + + if thrift_string_size_limit is not None: + if thrift_string_size_limit <= 0: + raise ValueError("thrift_string_size_limit " + "must be larger than zero") + properties.set_thrift_string_size_limit(thrift_string_size_limit) + if thrift_container_size_limit is not None: + if thrift_container_size_limit <= 0: + raise ValueError("thrift_container_size_limit " + "must be larger than zero") + properties.set_thrift_container_size_limit( + thrift_container_size_limit) + + if decryption_properties is not None: + properties.file_decryption_properties( + decryption_properties.unwrap()) + + arrow_props.set_pre_buffer(pre_buffer) + + properties.set_page_checksum_verification(page_checksum_verification) + + if coerce_int96_timestamp_unit is None: + # use the default defined in default_arrow_reader_properties() + pass + else: + arrow_props.set_coerce_int96_timestamp_unit( + string_to_timeunit(coerce_int96_timestamp_unit)) + + self.source = source + get_reader(source, use_memory_map, &self.rd_handle) + + with nogil: + check_status(builder.Open(self.rd_handle, properties, c_metadata)) + + # Set up metadata + with nogil: + c_metadata = builder.raw_reader().metadata() + self._metadata = result = FileMetaData() + result.init(c_metadata) + + if read_dictionary is not None: + self._set_read_dictionary(read_dictionary, &arrow_props) + + with nogil: + check_status(builder.memory_pool(self.pool) + .properties(arrow_props) + .Build(&self.reader)) + + cdef _set_read_dictionary(self, read_dictionary, + ArrowReaderProperties* props): + for column in read_dictionary: + if not isinstance(column, int): + column = self.column_name_idx(column) + props.set_read_dictionary(column, True) + + @property + def column_paths(self): + cdef: + FileMetaData container = self.metadata + const CFileMetaData* metadata = container._metadata + vector[c_string] path + int i = 0 + + paths = [] + for i in range(0, metadata.num_columns()): + path = (metadata.schema().Column(i) + .path().get().ToDotVector()) + paths.append([frombytes(x) for x in path]) + + return paths + + @property + def metadata(self): + return self._metadata + + @property + def schema_arrow(self): + cdef shared_ptr[CSchema] out + with nogil: + check_status(self.reader.get().GetSchema(&out)) + return pyarrow_wrap_schema(out) + + @property + def num_row_groups(self): + return self.reader.get().num_row_groups() + + def set_use_threads(self, bint use_threads): + """ + Parameters + ---------- + use_threads : bool + """ + self.reader.get().set_use_threads(use_threads) + + def set_batch_size(self, int64_t batch_size): + """ + Parameters + ---------- + batch_size : int64 + """ + self.reader.get().set_batch_size(batch_size) + + def iter_batches(self, int64_t batch_size, row_groups, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + batch_size : int64 + row_groups : list[int] + column_indices : list[int], optional + use_threads : bool, default True + + Yields + ------ + next : RecordBatch + """ + cdef: + vector[int] c_row_groups + vector[int] c_column_indices + shared_ptr[CRecordBatch] record_batch + UniquePtrNoGIL[CRecordBatchReader] recordbatchreader + + self.set_batch_size(batch_size) + + if use_threads: + self.set_use_threads(use_threads) + + for row_group in row_groups: + c_row_groups.push_back(row_group) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + with nogil: + check_status( + self.reader.get().GetRecordBatchReader( + c_row_groups, c_column_indices, &recordbatchreader + ) + ) + else: + with nogil: + check_status( + self.reader.get().GetRecordBatchReader( + c_row_groups, &recordbatchreader + ) + ) + + while True: + with nogil: + check_status( + recordbatchreader.get().ReadNext(&record_batch) + ) + if record_batch.get() == NULL: + break + + yield pyarrow_wrap_batch(record_batch) + + def read_row_group(self, int i, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + i : int + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + return self.read_row_groups([i], column_indices, use_threads) + + def read_row_groups(self, row_groups not None, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + row_groups : list[int] + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + cdef: + shared_ptr[CTable] ctable + vector[int] c_row_groups + vector[int] c_column_indices + + self.set_use_threads(use_threads) + + for row_group in row_groups: + c_row_groups.push_back(row_group) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + with nogil: + check_status(self.reader.get() + .ReadRowGroups(c_row_groups, c_column_indices, + &ctable)) + else: + # Read all columns + with nogil: + check_status(self.reader.get() + .ReadRowGroups(c_row_groups, &ctable)) + return pyarrow_wrap_table(ctable) + + def read_all(self, column_indices=None, bint use_threads=True): + """ + Parameters + ---------- + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + cdef: + shared_ptr[CTable] ctable + vector[int] c_column_indices + + self.set_use_threads(use_threads) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + with nogil: + check_status(self.reader.get() + .ReadTable(c_column_indices, &ctable)) + else: + # Read all columns + with nogil: + check_status(self.reader.get() + .ReadTable(&ctable)) + return pyarrow_wrap_table(ctable) + + def scan_contents(self, column_indices=None, batch_size=65536): + """ + Parameters + ---------- + column_indices : list[int], optional + batch_size : int32, default 65536 + + Returns + ------- + num_rows : int64 + """ + cdef: + vector[int] c_column_indices + int32_t c_batch_size + int64_t c_num_rows + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + c_batch_size = batch_size + + with nogil: + check_status(self.reader.get() + .ScanContents(c_column_indices, c_batch_size, + &c_num_rows)) + + return c_num_rows + + def column_name_idx(self, column_name): + """ + Find the index of a column by its name. + + Parameters + ---------- + column_name : str + Name of the column; separation of nesting levels is done via ".". + + Returns + ------- + column_idx : int + Integer index of the column in the schema. + """ + cdef: + FileMetaData container = self.metadata + const CFileMetaData* metadata = container._metadata + int i = 0 + + if self._column_idx_map is None: + self._column_idx_map = {} + for i in range(0, metadata.num_columns()): + col_bytes = tobytes(metadata.schema().Column(i) + .path().get().ToDotString()) + self._column_idx_map[col_bytes] = i + + return self._column_idx_map[tobytes(column_name)] + + def read_column(self, int column_index): + """ + Read the column at the specified index. + + Parameters + ---------- + column_index : int + Index of the column. + + Returns + ------- + column : pyarrow.ChunkedArray + """ + cdef shared_ptr[CChunkedArray] out + with nogil: + check_status(self.reader.get() + .ReadColumn(column_index, &out)) + return pyarrow_wrap_chunked_array(out) + + def close(self): + if not self.closed: + with nogil: + check_status(self.rd_handle.get().Close()) + + @property + def closed(self): + if self.rd_handle == NULL: + return True + with nogil: + closed = self.rd_handle.get().closed() + return closed + + +cdef CSortingColumn _convert_sorting_column(SortingColumn sorting_column): + cdef CSortingColumn c_sorting_column + + c_sorting_column.column_idx = sorting_column.column_index + c_sorting_column.descending = sorting_column.descending + c_sorting_column.nulls_first = sorting_column.nulls_first + + return c_sorting_column + + +cdef vector[CSortingColumn] _convert_sorting_columns(sorting_columns) except *: + if not (isinstance(sorting_columns, Sequence) + and all(isinstance(col, SortingColumn) for col in sorting_columns)): + raise ValueError( + "'sorting_columns' must be a list of `SortingColumn`") + + cdef vector[CSortingColumn] c_sorting_columns = [_convert_sorting_column(col) + for col in sorting_columns] + + return c_sorting_columns + + +cdef shared_ptr[WriterProperties] _create_writer_properties( + use_dictionary=None, + compression=None, + version=None, + write_statistics=None, + data_page_size=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + data_page_version=None, + FileEncryptionProperties encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None) except *: + """General writer properties""" + cdef: + shared_ptr[WriterProperties] properties + WriterProperties.Builder props + + # data_page_version + + if data_page_version is not None: + if data_page_version == "1.0": + props.data_page_version(ParquetDataPageVersion_V1) + elif data_page_version == "2.0": + props.data_page_version(ParquetDataPageVersion_V2) + else: + raise ValueError("Unsupported Parquet data page version: {0}" + .format(data_page_version)) + + # version + + if version is not None: + if version == "1.0": + props.version(ParquetVersion_V1) + elif version in ("2.0", "pseudo-2.0"): + warnings.warn( + "Parquet format '2.0' pseudo version is deprecated, use " + "'2.4' or '2.6' for fine-grained feature selection", + FutureWarning, stacklevel=2) + props.version(ParquetVersion_V2_0) + elif version == "2.4": + props.version(ParquetVersion_V2_4) + elif version == "2.6": + props.version(ParquetVersion_V2_6) + else: + raise ValueError("Unsupported Parquet format version: {0}" + .format(version)) + + # compression + + if isinstance(compression, basestring): + check_compression_name(compression) + props.compression(compression_from_name(compression)) + elif compression is not None: + for column, codec in compression.iteritems(): + check_compression_name(codec) + props.compression(tobytes(column), compression_from_name(codec)) + + if isinstance(compression_level, int): + props.compression_level(compression_level) + elif compression_level is not None: + for column, level in compression_level.iteritems(): + props.compression_level(tobytes(column), level) + + # use_dictionary + + if isinstance(use_dictionary, bool): + if use_dictionary: + props.enable_dictionary() + if column_encoding is not None: + raise ValueError( + "To use 'column_encoding' set 'use_dictionary' to False") + else: + props.disable_dictionary() + elif use_dictionary is not None: + # Deactivate dictionary encoding by default + props.disable_dictionary() + for column in use_dictionary: + props.enable_dictionary(tobytes(column)) + if (column_encoding is not None and + column_encoding.get(column) is not None): + raise ValueError( + "To use 'column_encoding' set 'use_dictionary' to False") + + # write_statistics + + if isinstance(write_statistics, bool): + if write_statistics: + props.enable_statistics() + else: + props.disable_statistics() + elif write_statistics is not None: + # Deactivate statistics by default and enable for specified columns + props.disable_statistics() + for column in write_statistics: + props.enable_statistics(tobytes(column)) + + # sorting_columns + + if sorting_columns is not None: + props.set_sorting_columns(_convert_sorting_columns(sorting_columns)) + + # use_byte_stream_split + + if isinstance(use_byte_stream_split, bool): + if use_byte_stream_split: + if column_encoding is not None: + raise ValueError( + "'use_byte_stream_split' cannot be passed" + "together with 'column_encoding'") + else: + props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + elif use_byte_stream_split is not None: + for column in use_byte_stream_split: + if column_encoding is None: + column_encoding = {column: 'BYTE_STREAM_SPLIT'} + elif column_encoding.get(column, None) is None: + column_encoding[column] = 'BYTE_STREAM_SPLIT' + else: + raise ValueError( + "'use_byte_stream_split' cannot be passed" + "together with 'column_encoding'") + + # column_encoding + # encoding map - encode individual columns + + if column_encoding is not None: + if isinstance(column_encoding, dict): + for column, _encoding in column_encoding.items(): + props.encoding(tobytes(column), + encoding_enum_from_name(_encoding)) + elif isinstance(column_encoding, str): + props.encoding(encoding_enum_from_name(column_encoding)) + else: + raise TypeError( + "'column_encoding' should be a dictionary or a string") + + if data_page_size is not None: + props.data_pagesize(data_page_size) + + if write_batch_size is not None: + props.write_batch_size(write_batch_size) + + if dictionary_pagesize_limit is not None: + props.dictionary_pagesize_limit(dictionary_pagesize_limit) + + # encryption + + if encryption_properties is not None: + props.encryption( + (encryption_properties).unwrap()) + + # For backwards compatibility reasons we cap the maximum row group size + # at 64Mi rows. This could be changed in the future, though it would be + # a breaking change. + # + # The user can always specify a smaller row group size (and the default + # is smaller) when calling write_table. If the call to write_table uses + # a size larger than this then it will be latched to this value. + props.max_row_group_length(_MAX_ROW_GROUP_SIZE) + + # checksum + + if write_page_checksum: + props.enable_page_checksum() + else: + props.disable_page_checksum() + + # page index + + if write_page_index: + props.enable_write_page_index() + else: + props.disable_write_page_index() + + properties = props.build() + + return properties + + +cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties( + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + writer_engine_version=None, + use_compliant_nested_type=True, + store_schema=True) except *: + """Arrow writer properties""" + cdef: + shared_ptr[ArrowWriterProperties] arrow_properties + ArrowWriterProperties.Builder arrow_props + + # Store the original Arrow schema so things like dictionary types can + # be automatically reconstructed + if store_schema: + arrow_props.store_schema() + + # int96 support + + if use_deprecated_int96_timestamps: + arrow_props.enable_deprecated_int96_timestamps() + else: + arrow_props.disable_deprecated_int96_timestamps() + + # coerce_timestamps + + if coerce_timestamps == 'ms': + arrow_props.coerce_timestamps(TimeUnit_MILLI) + elif coerce_timestamps == 'us': + arrow_props.coerce_timestamps(TimeUnit_MICRO) + elif coerce_timestamps is not None: + raise ValueError('Invalid value for coerce_timestamps: {0}' + .format(coerce_timestamps)) + + # allow_truncated_timestamps + + if allow_truncated_timestamps: + arrow_props.allow_truncated_timestamps() + else: + arrow_props.disallow_truncated_timestamps() + + # use_compliant_nested_type + + if use_compliant_nested_type: + arrow_props.enable_compliant_nested_types() + else: + arrow_props.disable_compliant_nested_types() + + # writer_engine_version + + if writer_engine_version == "V1": + warnings.warn("V1 parquet writer engine is a no-op. Use V2.") + arrow_props.set_engine_version(ArrowWriterEngineVersion.V1) + elif writer_engine_version != "V2": + raise ValueError("Unsupported Writer Engine Version: {0}" + .format(writer_engine_version)) + + arrow_properties = arrow_props.build() + + return arrow_properties + +cdef _name_to_index_map(Schema arrow_schema): + cdef: + shared_ptr[CSchema] sp_arrow_schema + shared_ptr[SchemaDescriptor] sp_parquet_schema + shared_ptr[WriterProperties] props = _create_writer_properties() + shared_ptr[ArrowWriterProperties] arrow_props = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + writer_engine_version="V2" + ) + + sp_arrow_schema = pyarrow_unwrap_schema(arrow_schema) + + with nogil: + check_status(ToParquetSchema( + sp_arrow_schema.get(), deref(props.get()), deref(arrow_props.get()), &sp_parquet_schema)) + + out = dict() + + cdef SchemaDescriptor* parquet_schema = sp_parquet_schema.get() + + for i in range(parquet_schema.num_columns()): + name = frombytes(parquet_schema.Column(i).path().get().ToDotString()) + out[name] = i + + return out + + +cdef class ParquetWriter(_Weakrefable): + cdef: + unique_ptr[FileWriter] writer + shared_ptr[COutputStream] sink + bint own_sink + + cdef readonly: + object use_dictionary + object use_deprecated_int96_timestamps + object use_byte_stream_split + object column_encoding + object coerce_timestamps + object allow_truncated_timestamps + object compression + object compression_level + object data_page_version + object use_compliant_nested_type + object version + object write_statistics + object writer_engine_version + int row_group_size + int64_t data_page_size + FileEncryptionProperties encryption_properties + int64_t write_batch_size + int64_t dictionary_pagesize_limit + object store_schema + + def __cinit__(self, where, Schema schema not None, use_dictionary=None, + compression=None, version=None, + write_statistics=None, + MemoryPool memory_pool=None, + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + data_page_size=None, + allow_truncated_timestamps=False, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + writer_engine_version=None, + data_page_version=None, + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + store_schema=True, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None): + cdef: + shared_ptr[WriterProperties] properties + shared_ptr[ArrowWriterProperties] arrow_properties + c_string c_where + CMemoryPool* pool + + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &self.sink) + self.own_sink = False + else: + c_where = tobytes(where) + with nogil: + self.sink = GetResultValue(FileOutputStream.Open(c_where)) + self.own_sink = True + + properties = _create_writer_properties( + use_dictionary=use_dictionary, + compression=compression, + version=version, + write_statistics=write_statistics, + data_page_size=data_page_size, + compression_level=compression_level, + use_byte_stream_split=use_byte_stream_split, + column_encoding=column_encoding, + data_page_version=data_page_version, + encryption_properties=encryption_properties, + write_batch_size=write_batch_size, + dictionary_pagesize_limit=dictionary_pagesize_limit, + write_page_index=write_page_index, + write_page_checksum=write_page_checksum, + sorting_columns=sorting_columns, + ) + arrow_properties = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=use_deprecated_int96_timestamps, + coerce_timestamps=coerce_timestamps, + allow_truncated_timestamps=allow_truncated_timestamps, + writer_engine_version=writer_engine_version, + use_compliant_nested_type=use_compliant_nested_type, + store_schema=store_schema, + ) + + pool = maybe_unbox_memory_pool(memory_pool) + with nogil: + self.writer = move(GetResultValue( + FileWriter.Open(deref(schema.schema), pool, + self.sink, properties, arrow_properties))) + + def close(self): + with nogil: + check_status(self.writer.get().Close()) + if self.own_sink: + check_status(self.sink.get().Close()) + + def write_table(self, Table table, row_group_size=None): + cdef: + CTable* ctable = table.table + int64_t c_row_group_size + + if row_group_size is None or row_group_size == -1: + c_row_group_size = min(ctable.num_rows(), _DEFAULT_ROW_GROUP_SIZE) + elif row_group_size == 0: + raise ValueError('Row group size cannot be 0') + else: + c_row_group_size = row_group_size + + with nogil: + check_status(self.writer.get() + .WriteTable(deref(ctable), c_row_group_size)) + + @property + def metadata(self): + cdef: + shared_ptr[CFileMetaData] metadata + FileMetaData result + with nogil: + metadata = self.writer.get().metadata() + if metadata: + result = FileMetaData() + result.init(metadata) + return result + raise RuntimeError( + 'file metadata is only available after writer close') diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d52669501a4044838e576d3dac8f8a422874eaa6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pxd @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libparquet_encryption cimport * +from pyarrow._parquet cimport (ParquetCipher, + CFileEncryptionProperties, + CFileDecryptionProperties, + FileEncryptionProperties, + FileDecryptionProperties, + ParquetCipher_AES_GCM_V1, + ParquetCipher_AES_GCM_CTR_V1) +from pyarrow.lib cimport _Weakrefable + +cdef class CryptoFactory(_Weakrefable): + cdef shared_ptr[CPyCryptoFactory] factory + cdef init(self, callable_client_factory) + cdef inline shared_ptr[CPyCryptoFactory] unwrap(self) + +cdef class EncryptionConfiguration(_Weakrefable): + cdef shared_ptr[CEncryptionConfiguration] configuration + cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil + +cdef class DecryptionConfiguration(_Weakrefable): + cdef shared_ptr[CDecryptionConfiguration] configuration + cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil + +cdef class KmsConnectionConfig(_Weakrefable): + cdef shared_ptr[CKmsConnectionConfig] configuration + cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil + + @staticmethod + cdef wrap(const CKmsConnectionConfig& config) + + +cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except * +cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except * +cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except * +cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except * diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d0a9a6612328c547bc724d6fcf2d37ae5e7badd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx @@ -0,0 +1,484 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from datetime import timedelta + +from cython.operator cimport dereference as deref +from libcpp.memory cimport shared_ptr +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable +from pyarrow.lib import tobytes, frombytes + + +cdef ParquetCipher cipher_from_name(name): + name = name.upper() + if name == 'AES_GCM_V1': + return ParquetCipher_AES_GCM_V1 + elif name == 'AES_GCM_CTR_V1': + return ParquetCipher_AES_GCM_CTR_V1 + else: + raise ValueError(f'Invalid cipher name: {name!r}') + + +cdef cipher_to_name(ParquetCipher cipher): + if ParquetCipher_AES_GCM_V1 == cipher: + return 'AES_GCM_V1' + elif ParquetCipher_AES_GCM_CTR_V1 == cipher: + return 'AES_GCM_CTR_V1' + else: + raise ValueError('Invalid cipher value: {0}'.format(cipher)) + +cdef class EncryptionConfiguration(_Weakrefable): + """Configuration of the encryption, such as which columns to encrypt""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, footer_key, *, column_keys=None, + encryption_algorithm=None, + plaintext_footer=None, double_wrapping=None, + cache_lifetime=None, internal_key_material=None, + data_key_length_bits=None): + self.configuration.reset( + new CEncryptionConfiguration(tobytes(footer_key))) + if column_keys is not None: + self.column_keys = column_keys + if encryption_algorithm is not None: + self.encryption_algorithm = encryption_algorithm + if plaintext_footer is not None: + self.plaintext_footer = plaintext_footer + if double_wrapping is not None: + self.double_wrapping = double_wrapping + if cache_lifetime is not None: + self.cache_lifetime = cache_lifetime + if internal_key_material is not None: + self.internal_key_material = internal_key_material + if data_key_length_bits is not None: + self.data_key_length_bits = data_key_length_bits + + @property + def footer_key(self): + """ID of the master key for footer encryption/signing""" + return frombytes(self.configuration.get().footer_key) + + @property + def column_keys(self): + """ + List of columns to encrypt, with master key IDs. + """ + column_keys_str = frombytes(self.configuration.get().column_keys) + # Convert from "masterKeyID:colName,colName;masterKeyID:colName..." + # (see HIVE-21848) to dictionary of master key ID to column name lists + column_keys_to_key_list_str = dict(subString.replace(" ", "").split( + ":") for subString in column_keys_str.split(";")) + column_keys_dict = {k: v.split( + ",") for k, v in column_keys_to_key_list_str.items()} + return column_keys_dict + + @column_keys.setter + def column_keys(self, dict value): + if value is not None: + # convert a dictionary such as + # '{"key1": ["col1 ", "col2"], "key2": ["col3 ", "col4"]}'' + # to the string defined by the spec + # 'key1: col1 , col2; key2: col3 , col4' + column_keys = "; ".join( + ["{}: {}".format(k, ", ".join(v)) for k, v in value.items()]) + self.configuration.get().column_keys = tobytes(column_keys) + + @property + def encryption_algorithm(self): + """Parquet encryption algorithm. + Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1".""" + return cipher_to_name(self.configuration.get().encryption_algorithm) + + @encryption_algorithm.setter + def encryption_algorithm(self, value): + cipher = cipher_from_name(value) + self.configuration.get().encryption_algorithm = cipher + + @property + def plaintext_footer(self): + """Write files with plaintext footer.""" + return self.configuration.get().plaintext_footer + + @plaintext_footer.setter + def plaintext_footer(self, value): + self.configuration.get().plaintext_footer = value + + @property + def double_wrapping(self): + """Use double wrapping - where data encryption keys (DEKs) are + encrypted with key encryption keys (KEKs), which in turn are + encrypted with master keys. + If set to false, use single wrapping - where DEKs are + encrypted directly with master keys.""" + return self.configuration.get().double_wrapping + + @double_wrapping.setter + def double_wrapping(self, value): + self.configuration.get().double_wrapping = value + + @property + def cache_lifetime(self): + """Lifetime of cached entities (key encryption keys, + local wrapping keys, KMS client objects).""" + return timedelta( + seconds=self.configuration.get().cache_lifetime_seconds) + + @cache_lifetime.setter + def cache_lifetime(self, value): + if not isinstance(value, timedelta): + raise TypeError("cache_lifetime should be a timedelta") + self.configuration.get().cache_lifetime_seconds = value.total_seconds() + + @property + def internal_key_material(self): + """Store key material inside Parquet file footers; this mode doesn’t + produce additional files. If set to false, key material is stored in + separate files in the same folder, which enables key rotation for + immutable Parquet files.""" + return self.configuration.get().internal_key_material + + @internal_key_material.setter + def internal_key_material(self, value): + self.configuration.get().internal_key_material = value + + @property + def data_key_length_bits(self): + """Length of data encryption keys (DEKs), randomly generated by parquet key + management tools. Can be 128, 192 or 256 bits.""" + return self.configuration.get().data_key_length_bits + + @data_key_length_bits.setter + def data_key_length_bits(self, value): + self.configuration.get().data_key_length_bits = value + + cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil: + return self.configuration + + +cdef class DecryptionConfiguration(_Weakrefable): + """Configuration of the decryption, such as cache timeout.""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, cache_lifetime=None): + self.configuration.reset(new CDecryptionConfiguration()) + + @property + def cache_lifetime(self): + """Lifetime of cached entities (key encryption keys, + local wrapping keys, KMS client objects).""" + return timedelta( + seconds=self.configuration.get().cache_lifetime_seconds) + + @cache_lifetime.setter + def cache_lifetime(self, value): + self.configuration.get().cache_lifetime_seconds = value.total_seconds() + + cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil: + return self.configuration + + +cdef class KmsConnectionConfig(_Weakrefable): + """Configuration of the connection to the Key Management Service (KMS)""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, kms_instance_id=None, kms_instance_url=None, + key_access_token=None, custom_kms_conf=None): + self.configuration.reset(new CKmsConnectionConfig()) + if kms_instance_id is not None: + self.kms_instance_id = kms_instance_id + if kms_instance_url is not None: + self.kms_instance_url = kms_instance_url + if key_access_token is None: + self.key_access_token = b'DEFAULT' + else: + self.key_access_token = key_access_token + if custom_kms_conf is not None: + self.custom_kms_conf = custom_kms_conf + + @property + def kms_instance_id(self): + """ID of the KMS instance that will be used for encryption + (if multiple KMS instances are available).""" + return frombytes(self.configuration.get().kms_instance_id) + + @kms_instance_id.setter + def kms_instance_id(self, value): + self.configuration.get().kms_instance_id = tobytes(value) + + @property + def kms_instance_url(self): + """URL of the KMS instance.""" + return frombytes(self.configuration.get().kms_instance_url) + + @kms_instance_url.setter + def kms_instance_url(self, value): + self.configuration.get().kms_instance_url = tobytes(value) + + @property + def key_access_token(self): + """Authorization token that will be passed to KMS.""" + return frombytes(self.configuration.get() + .refreshable_key_access_token.get().value()) + + @key_access_token.setter + def key_access_token(self, value): + self.refresh_key_access_token(value) + + @property + def custom_kms_conf(self): + """A dictionary with KMS-type-specific configuration""" + custom_kms_conf = { + frombytes(k): frombytes(v) + for k, v in self.configuration.get().custom_kms_conf + } + return custom_kms_conf + + @custom_kms_conf.setter + def custom_kms_conf(self, dict value): + if value is not None: + for k, v in value.items(): + if isinstance(k, str) and isinstance(v, str): + self.configuration.get().custom_kms_conf[tobytes(k)] = \ + tobytes(v) + else: + raise TypeError("Expected custom_kms_conf to be " + + "a dictionary of strings") + + def refresh_key_access_token(self, value): + cdef: + shared_ptr[CKeyAccessToken] c_key_access_token = \ + self.configuration.get().refreshable_key_access_token + + c_key_access_token.get().Refresh(tobytes(value)) + + cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil: + return self.configuration + + @staticmethod + cdef wrap(const CKmsConnectionConfig& config): + result = KmsConnectionConfig() + result.configuration = make_shared[CKmsConnectionConfig](move(config)) + return result + + +# Callback definitions for CPyKmsClientVtable +cdef void _cb_wrap_key( + handler, const c_string& key_bytes, + const c_string& master_key_identifier, c_string* out) except *: + mkid_str = frombytes(master_key_identifier) + wrapped_key = handler.wrap_key(key_bytes, mkid_str) + out[0] = tobytes(wrapped_key) + + +cdef void _cb_unwrap_key( + handler, const c_string& wrapped_key, + const c_string& master_key_identifier, c_string* out) except *: + mkid_str = frombytes(master_key_identifier) + wk_str = frombytes(wrapped_key) + key = handler.unwrap_key(wk_str, mkid_str) + out[0] = tobytes(key) + + +cdef class KmsClient(_Weakrefable): + """The abstract base class for KmsClient implementations.""" + cdef: + shared_ptr[CKmsClient] client + + def __init__(self): + self.init() + + cdef init(self): + cdef: + CPyKmsClientVtable vtable = CPyKmsClientVtable() + + vtable.wrap_key = _cb_wrap_key + vtable.unwrap_key = _cb_unwrap_key + + self.client.reset(new CPyKmsClient(self, vtable)) + + def wrap_key(self, key_bytes, master_key_identifier): + """Wrap a key - encrypt it with the master key.""" + raise NotImplementedError() + + def unwrap_key(self, wrapped_key, master_key_identifier): + """Unwrap a key - decrypt it with the master key.""" + raise NotImplementedError() + + cdef inline shared_ptr[CKmsClient] unwrap(self) nogil: + return self.client + + +# Callback definition for CPyKmsClientFactoryVtable +cdef void _cb_create_kms_client( + handler, + const CKmsConnectionConfig& kms_connection_config, + shared_ptr[CKmsClient]* out) except *: + connection_config = KmsConnectionConfig.wrap(kms_connection_config) + + result = handler(connection_config) + if not isinstance(result, KmsClient): + raise TypeError( + "callable must return KmsClient instances, but got {}".format( + type(result))) + + out[0] = ( result).unwrap() + + +cdef class CryptoFactory(_Weakrefable): + """ A factory that produces the low-level FileEncryptionProperties and + FileDecryptionProperties objects, from the high-level parameters.""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, kms_client_factory): + """Create CryptoFactory. + + Parameters + ---------- + kms_client_factory : a callable that accepts KmsConnectionConfig + and returns a KmsClient + """ + self.factory.reset(new CPyCryptoFactory()) + + if callable(kms_client_factory): + self.init(kms_client_factory) + else: + raise TypeError("Parameter kms_client_factory must be a callable") + + cdef init(self, callable_client_factory): + cdef: + CPyKmsClientFactoryVtable vtable + shared_ptr[CPyKmsClientFactory] kms_client_factory + + vtable.create_kms_client = _cb_create_kms_client + kms_client_factory.reset( + new CPyKmsClientFactory(callable_client_factory, vtable)) + # A KmsClientFactory object must be registered + # via this method before calling any of + # file_encryption_properties()/file_decryption_properties() methods. + self.factory.get().RegisterKmsClientFactory( + static_pointer_cast[CKmsClientFactory, CPyKmsClientFactory]( + kms_client_factory)) + + def file_encryption_properties(self, + KmsConnectionConfig kms_connection_config, + EncryptionConfiguration encryption_config): + """Create file encryption properties. + + Parameters + ---------- + kms_connection_config : KmsConnectionConfig + Configuration of connection to KMS + + encryption_config : EncryptionConfiguration + Configuration of the encryption, such as which columns to encrypt + + Returns + ------- + file_encryption_properties : FileEncryptionProperties + File encryption properties. + """ + cdef: + CResult[shared_ptr[CFileEncryptionProperties]] \ + file_encryption_properties_result + with nogil: + file_encryption_properties_result = \ + self.factory.get().SafeGetFileEncryptionProperties( + deref(kms_connection_config.unwrap().get()), + deref(encryption_config.unwrap().get())) + file_encryption_properties = GetResultValue( + file_encryption_properties_result) + return FileEncryptionProperties.wrap(file_encryption_properties) + + def file_decryption_properties( + self, + KmsConnectionConfig kms_connection_config, + DecryptionConfiguration decryption_config=None): + """Create file decryption properties. + + Parameters + ---------- + kms_connection_config : KmsConnectionConfig + Configuration of connection to KMS + + decryption_config : DecryptionConfiguration, default None + Configuration of the decryption, such as cache timeout. + Can be None. + + Returns + ------- + file_decryption_properties : FileDecryptionProperties + File decryption properties. + """ + cdef: + CDecryptionConfiguration c_decryption_config + CResult[shared_ptr[CFileDecryptionProperties]] \ + c_file_decryption_properties + if decryption_config is None: + c_decryption_config = CDecryptionConfiguration() + else: + c_decryption_config = deref(decryption_config.unwrap().get()) + with nogil: + c_file_decryption_properties = \ + self.factory.get().SafeGetFileDecryptionProperties( + deref(kms_connection_config.unwrap().get()), + c_decryption_config) + file_decryption_properties = GetResultValue( + c_file_decryption_properties) + return FileDecryptionProperties.wrap(file_decryption_properties) + + def remove_cache_entries_for_token(self, access_token): + self.factory.get().RemoveCacheEntriesForToken(tobytes(access_token)) + + def remove_cache_entries_for_all_tokens(self): + self.factory.get().RemoveCacheEntriesForAllTokens() + + cdef inline shared_ptr[CPyCryptoFactory] unwrap(self): + return self.factory + + +cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *: + if isinstance(crypto_factory, CryptoFactory): + pycf = ( crypto_factory).unwrap() + return static_pointer_cast[CCryptoFactory, CPyCryptoFactory](pycf) + raise TypeError("Expected CryptoFactory, got %s" % type(crypto_factory)) + + +cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *: + if isinstance(kmsconnectionconfig, KmsConnectionConfig): + return ( kmsconnectionconfig).unwrap() + raise TypeError("Expected KmsConnectionConfig, got %s" % type(kmsconnectionconfig)) + + +cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *: + if isinstance(encryptionconfig, EncryptionConfiguration): + return ( encryptionconfig).unwrap() + raise TypeError("Expected EncryptionConfiguration, got %s" % type(encryptionconfig)) + + +cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *: + if isinstance(decryptionconfig, DecryptionConfiguration): + return ( decryptionconfig).unwrap() + raise TypeError("Expected DecryptionConfiguration, got %s" % type(decryptionconfig)) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..828b5f56d0435e17e4cf5afe3fc87431cf303f22 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd new file mode 100644 index 0000000000000000000000000000000000000000..91c0220d7310870a7803ecceb2c32b8b32f8c11d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pxd @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport CStatus + + +ctypedef CStatus cb_test_func() + +cdef extern from "arrow/python/python_test.h" namespace "arrow::py::testing" nogil: + + cdef cppclass CTestCase "arrow::py::testing::TestCase": + c_string name + cb_test_func func + + vector[CTestCase] GetCppTestCases() diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_s3fs.pyx b/llmeval-env/lib/python3.10/site-packages/pyarrow/_s3fs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..f5bab99a49f7ae140606f265514062415cbe8277 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_s3fs.pyx @@ -0,0 +1,467 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + +from pyarrow.lib cimport (check_status, pyarrow_wrap_metadata, + pyarrow_unwrap_metadata) +from pyarrow.lib import frombytes, tobytes, KeyValueMetadata +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem + + +cpdef enum S3LogLevel: + Off = CS3LogLevel_Off + Fatal = CS3LogLevel_Fatal + Error = CS3LogLevel_Error + Warn = CS3LogLevel_Warn + Info = CS3LogLevel_Info + Debug = CS3LogLevel_Debug + Trace = CS3LogLevel_Trace + + +def initialize_s3(S3LogLevel log_level=S3LogLevel.Fatal, int num_event_loop_threads=1): + """ + Initialize S3 support + + Parameters + ---------- + log_level : S3LogLevel + level of logging + num_event_loop_threads : int, default 1 + how many threads to use for the AWS SDK's I/O event loop + + Examples + -------- + >>> fs.initialize_s3(fs.S3LogLevel.Error) # doctest: +SKIP + """ + cdef CS3GlobalOptions options + options.log_level = log_level + options.num_event_loop_threads = num_event_loop_threads + check_status(CInitializeS3(options)) + + +def ensure_s3_initialized(): + """ + Initialize S3 (with default options) if not already initialized + """ + check_status(CEnsureS3Initialized()) + + +def finalize_s3(): + check_status(CFinalizeS3()) + + +def ensure_s3_finalized(): + """ + Finalize S3 if already initialized + """ + check_status(CEnsureS3Finalized()) + + +def resolve_s3_region(bucket): + """ + Resolve the S3 region of a bucket. + + Parameters + ---------- + bucket : str + A S3 bucket name + + Returns + ------- + region : str + A S3 region name + + Examples + -------- + >>> fs.resolve_s3_region('voltrondata-labs-datasets') + 'us-east-2' + """ + cdef: + c_string c_bucket + c_string c_region + + ensure_s3_initialized() + + c_bucket = tobytes(bucket) + with nogil: + c_region = GetResultValue(ResolveS3BucketRegion(c_bucket)) + + return frombytes(c_region) + + +class S3RetryStrategy: + """ + Base class for AWS retry strategies for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + + def __init__(self, max_attempts=3): + self.max_attempts = max_attempts + + +class AwsStandardS3RetryStrategy(S3RetryStrategy): + """ + Represents an AWS Standard retry strategy for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + pass + + +class AwsDefaultS3RetryStrategy(S3RetryStrategy): + """ + Represents an AWS Default retry strategy for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + pass + + +cdef class S3FileSystem(FileSystem): + """ + S3-backed FileSystem implementation + + AWS access_key and secret_key can be provided explicitly. + + If role_arn is provided instead of access_key and secret_key, temporary + credentials will be fetched by issuing a request to STS to assume the + specified role. + + If neither access_key nor secret_key are provided, and role_arn is also not + provided, then attempts to establish the credentials automatically. + S3FileSystem will try the following methods, in order: + + * ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` environment variables + * configuration files such as ``~/.aws/credentials`` and ``~/.aws/config`` + * for nodes on Amazon EC2, the EC2 Instance Metadata Service + + Note: S3 buckets are special and the operations available on them may be + limited or more expensive than desired. + + When S3FileSystem creates new buckets (assuming allow_bucket_creation is + True), it does not pass any non-default settings. In AWS S3, the bucket and + all objects will be not publicly visible, and will have no bucket policies + and no resource tags. To have more control over how buckets are created, + use a different API to create them. + + Parameters + ---------- + access_key : str, default None + AWS Access Key ID. Pass None to use the standard AWS environment + variables and/or configuration file. + secret_key : str, default None + AWS Secret Access key. Pass None to use the standard AWS environment + variables and/or configuration file. + session_token : str, default None + AWS Session Token. An optional session token, required if access_key + and secret_key are temporary credentials from STS. + anonymous : boolean, default False + Whether to connect anonymously if access_key and secret_key are None. + If true, will not attempt to look up credentials using standard AWS + configuration methods. + role_arn : str, default None + AWS Role ARN. If provided instead of access_key and secret_key, + temporary credentials will be fetched by assuming this role. + session_name : str, default None + An optional identifier for the assumed role session. + external_id : str, default None + An optional unique identifier that might be required when you assume + a role in another account. + load_frequency : int, default 900 + The frequency (in seconds) with which temporary credentials from an + assumed role session will be refreshed. + region : str, default None + AWS region to connect to. If not set, the AWS SDK will attempt to + determine the region using heuristics such as environment variables, + configuration profile, EC2 metadata, or default to 'us-east-1' when SDK + version <1.8. One can also use :func:`pyarrow.fs.resolve_s3_region` to + automatically resolve the region from a bucket name. + request_timeout : double, default None + Socket read timeouts on Windows and macOS, in seconds. + If omitted, the AWS SDK default value is used (typically 3 seconds). + This option is ignored on non-Windows, non-macOS systems. + connect_timeout : double, default None + Socket connection timeout, in seconds. + If omitted, the AWS SDK default value is used (typically 1 second). + scheme : str, default 'https' + S3 connection transport scheme. + endpoint_override : str, default None + Override region with a connect string such as "localhost:9000" + background_writes : boolean, default True + Whether file writes will be issued in the background, without + blocking. + default_metadata : mapping or pyarrow.KeyValueMetadata, default None + Default metadata for open_output_stream. This will be ignored if + non-empty metadata is passed to open_output_stream. + proxy_options : dict or str, default None + If a proxy is used, provide the options here. Supported options are: + 'scheme' (str: 'http' or 'https'; required), 'host' (str; required), + 'port' (int; required), 'username' (str; optional), + 'password' (str; optional). + A proxy URI (str) can also be provided, in which case these options + will be derived from the provided URI. + The following are equivalent:: + + S3FileSystem(proxy_options='http://username:password@localhost:8020') + S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost', + 'port': 8020, 'username': 'username', + 'password': 'password'}) + allow_bucket_creation : bool, default False + Whether to allow CreateDir at the bucket-level. This option may also be + passed in a URI query parameter. + allow_bucket_deletion : bool, default False + Whether to allow DeleteDir at the bucket-level. This option may also be + passed in a URI query parameter. + retry_strategy : S3RetryStrategy, default AwsStandardS3RetryStrategy(max_attempts=3) + The retry strategy to use with S3; fail after max_attempts. Available + strategies are AwsStandardS3RetryStrategy, AwsDefaultS3RetryStrategy. + force_virtual_addressing : bool, default False + Whether to use virtual addressing of buckets. + If true, then virtual addressing is always enabled. + If false, then virtual addressing is only enabled if `endpoint_override` is empty. + This can be used for non-AWS backends that only support virtual hosted-style access. + + Examples + -------- + >>> from pyarrow import fs + >>> s3 = fs.S3FileSystem(region='us-west-2') + >>> s3.get_file_info(fs.FileSelector( + ... 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN', recursive=True + ... )) + [ wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.s3fs = wrapped.get() + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return S3FileSystem(**kwargs) + + def __reduce__(self): + cdef CS3Options opts = self.s3fs.options() + + # if creds were explicitly provided, then use them + # else obtain them as they were last time. + if opts.credentials_kind == CS3CredentialsKind_Explicit: + access_key = frombytes(opts.GetAccessKey()) + secret_key = frombytes(opts.GetSecretKey()) + session_token = frombytes(opts.GetSessionToken()) + else: + access_key = None + secret_key = None + session_token = None + + return ( + S3FileSystem._reconstruct, (dict( + access_key=access_key, + secret_key=secret_key, + session_token=session_token, + anonymous=(opts.credentials_kind == + CS3CredentialsKind_Anonymous), + region=frombytes(opts.region), + scheme=frombytes(opts.scheme), + connect_timeout=opts.connect_timeout, + request_timeout=opts.request_timeout, + endpoint_override=frombytes(opts.endpoint_override), + role_arn=frombytes(opts.role_arn), + session_name=frombytes(opts.session_name), + external_id=frombytes(opts.external_id), + load_frequency=opts.load_frequency, + background_writes=opts.background_writes, + allow_bucket_creation=opts.allow_bucket_creation, + allow_bucket_deletion=opts.allow_bucket_deletion, + default_metadata=pyarrow_wrap_metadata(opts.default_metadata), + proxy_options={'scheme': frombytes(opts.proxy_options.scheme), + 'host': frombytes(opts.proxy_options.host), + 'port': opts.proxy_options.port, + 'username': frombytes( + opts.proxy_options.username), + 'password': frombytes( + opts.proxy_options.password)}, + force_virtual_addressing=opts.force_virtual_addressing, + ),) + ) + + @property + def region(self): + """ + The AWS region this filesystem connects to. + """ + return frombytes(self.s3fs.region()) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/acero.py b/llmeval-env/lib/python3.10/site-packages/pyarrow/acero.py new file mode 100644 index 0000000000000000000000000000000000000000..619e1fce393ae486eb8de048b8a548fdc2eabb2c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/acero.py @@ -0,0 +1,395 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Implement Internal ExecPlan bindings + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.lib import Table +from pyarrow.compute import Expression, field + +try: + from pyarrow._acero import ( # noqa + Declaration, + ExecNodeOptions, + TableSourceNodeOptions, + FilterNodeOptions, + ProjectNodeOptions, + AggregateNodeOptions, + OrderByNodeOptions, + HashJoinNodeOptions, + AsofJoinNodeOptions, + ) +except ImportError as exc: + raise ImportError( + f"The pyarrow installation is not built with support for 'acero' ({str(exc)})" + ) from None + + +try: + import pyarrow.dataset as ds + from pyarrow._dataset import ScanNodeOptions +except ImportError: + class DatasetModuleStub: + class Dataset: + pass + + class InMemoryDataset: + pass + ds = DatasetModuleStub + + +def _dataset_to_decl(dataset, use_threads=True): + decl = Declaration("scan", ScanNodeOptions(dataset, use_threads=use_threads)) + + # Get rid of special dataset columns + # "__fragment_index", "__batch_index", "__last_in_fragment", "__filename" + projections = [field(f) for f in dataset.schema.names] + decl = Declaration.from_sequence( + [decl, Declaration("project", ProjectNodeOptions(projections))] + ) + + filter_expr = dataset._scan_options.get("filter") + if filter_expr is not None: + # Filters applied in CScanNodeOptions are "best effort" for the scan node itself + # so we always need to inject an additional Filter node to apply them for real. + decl = Declaration.from_sequence( + [decl, Declaration("filter", FilterNodeOptions(filter_expr))] + ) + + return decl + + +def _perform_join(join_type, left_operand, left_keys, + right_operand, right_keys, + left_suffix=None, right_suffix=None, + use_threads=True, coalesce_keys=False, + output_type=Table): + """ + Perform join of two tables or datasets. + + The result will be an output table with the result of the join operation + + Parameters + ---------- + join_type : str + One of supported join types. + left_operand : Table or Dataset + The left operand for the join operation. + left_keys : str or list[str] + The left key (or keys) on which the join operation should be performed. + right_operand : Table or Dataset + The right operand for the join operation. + right_keys : str or list[str] + The right key (or keys) on which the join operation should be performed. + left_suffix : str, default None + Which suffix to add to left column names. This prevents confusion + when the columns in left and right operands have colliding names. + right_suffix : str, default None + Which suffix to add to the right column names. This prevents confusion + when the columns in left and right operands have colliding names. + use_threads : bool, default True + Whether to use multithreading or not. + coalesce_keys : bool, default False + If the duplicated keys should be omitted from one of the sides + in the join result. + output_type: Table or InMemoryDataset + The output type for the exec plan result. + + Returns + ------- + result_table : Table or InMemoryDataset + """ + if not isinstance(left_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}") + if not isinstance(right_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}") + + # Prepare left and right tables Keys to send them to the C++ function + left_keys_order = {} + if not isinstance(left_keys, (tuple, list)): + left_keys = [left_keys] + for idx, key in enumerate(left_keys): + left_keys_order[key] = idx + + right_keys_order = {} + if not isinstance(right_keys, (list, tuple)): + right_keys = [right_keys] + for idx, key in enumerate(right_keys): + right_keys_order[key] = idx + + # By default expose all columns on both left and right table + left_columns = left_operand.schema.names + right_columns = right_operand.schema.names + + # Pick the join type + if join_type == "left semi" or join_type == "left anti": + right_columns = [] + elif join_type == "right semi" or join_type == "right anti": + left_columns = [] + elif join_type == "inner" or join_type == "left outer": + right_columns = [ + col for col in right_columns if col not in right_keys_order + ] + elif join_type == "right outer": + left_columns = [ + col for col in left_columns if col not in left_keys_order + ] + + # Turn the columns to vectors of FieldRefs + # and set aside indices of keys. + left_column_keys_indices = {} + for idx, colname in enumerate(left_columns): + if colname in left_keys: + left_column_keys_indices[colname] = idx + right_column_keys_indices = {} + for idx, colname in enumerate(right_columns): + if colname in right_keys: + right_column_keys_indices[colname] = idx + + # Add the join node to the execplan + if isinstance(left_operand, ds.Dataset): + left_source = _dataset_to_decl(left_operand, use_threads=use_threads) + else: + left_source = Declaration("table_source", TableSourceNodeOptions(left_operand)) + if isinstance(right_operand, ds.Dataset): + right_source = _dataset_to_decl(right_operand, use_threads=use_threads) + else: + right_source = Declaration( + "table_source", TableSourceNodeOptions(right_operand) + ) + + if coalesce_keys: + join_opts = HashJoinNodeOptions( + join_type, left_keys, right_keys, left_columns, right_columns, + output_suffix_for_left=left_suffix or "", + output_suffix_for_right=right_suffix or "", + ) + else: + join_opts = HashJoinNodeOptions( + join_type, left_keys, right_keys, + output_suffix_for_left=left_suffix or "", + output_suffix_for_right=right_suffix or "", + ) + decl = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source] + ) + + if coalesce_keys and join_type == "full outer": + # In case of full outer joins, the join operation will output all columns + # so that we can coalesce the keys and exclude duplicates in a subsequent + # projection. + left_columns_set = set(left_columns) + right_columns_set = set(right_columns) + # Where the right table columns start. + right_operand_index = len(left_columns) + projected_col_names = [] + projections = [] + for idx, col in enumerate(left_columns + right_columns): + if idx < len(left_columns) and col in left_column_keys_indices: + # Include keys only once and coalesce left+right table keys. + projected_col_names.append(col) + # Get the index of the right key that is being paired + # with this left key. We do so by retrieving the name + # of the right key that is in the same position in the provided keys + # and then looking up the index for that name in the right table. + right_key_index = right_column_keys_indices[ + right_keys[left_keys_order[col]]] + projections.append( + Expression._call("coalesce", [ + Expression._field(idx), Expression._field( + right_operand_index+right_key_index) + ]) + ) + elif idx >= right_operand_index and col in right_column_keys_indices: + # Do not include right table keys. As they would lead to duplicated keys + continue + else: + # For all the other columns include them as they are. + # Just recompute the suffixes that the join produced as the projection + # would lose them otherwise. + if ( + left_suffix and idx < right_operand_index + and col in right_columns_set + ): + col += left_suffix + if ( + right_suffix and idx >= right_operand_index + and col in left_columns_set + ): + col += right_suffix + projected_col_names.append(col) + projections.append( + Expression._field(idx) + ) + projection = Declaration( + "project", ProjectNodeOptions(projections, projected_col_names) + ) + decl = Declaration.from_sequence([decl, projection]) + + result_table = decl.to_table(use_threads=use_threads) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _perform_join_asof(left_operand, left_on, left_by, + right_operand, right_on, right_by, + tolerance, use_threads=True, + output_type=Table): + """ + Perform asof join of two tables or datasets. + + The result will be an output table with the result of the join operation + + Parameters + ---------- + left_operand : Table or Dataset + The left operand for the join operation. + left_on : str + The left key (or keys) on which the join operation should be performed. + left_by: str or list[str] + The left key (or keys) on which the join operation should be performed. + right_operand : Table or Dataset + The right operand for the join operation. + right_on : str or list[str] + The right key (or keys) on which the join operation should be performed. + right_by: str or list[str] + The right key (or keys) on which the join operation should be performed. + tolerance : int + The tolerance to use for the asof join. The tolerance is interpreted in + the same units as the "on" key. + output_type: Table or InMemoryDataset + The output type for the exec plan result. + + Returns + ------- + result_table : Table or InMemoryDataset + """ + if not isinstance(left_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(left_operand)}") + if not isinstance(right_operand, (Table, ds.Dataset)): + raise TypeError(f"Expected Table or Dataset, got {type(right_operand)}") + + if not isinstance(left_by, (tuple, list)): + left_by = [left_by] + if not isinstance(right_by, (tuple, list)): + right_by = [right_by] + + # AsofJoin does not return on or by columns for right_operand. + right_columns = [ + col for col in right_operand.schema.names + if col not in [right_on] + right_by + ] + columns_collisions = set(left_operand.schema.names) & set(right_columns) + if columns_collisions: + raise ValueError( + "Columns {} present in both tables. AsofJoin does not support " + "column collisions.".format(columns_collisions), + ) + + # Add the join node to the execplan + if isinstance(left_operand, ds.Dataset): + left_source = _dataset_to_decl(left_operand, use_threads=use_threads) + else: + left_source = Declaration( + "table_source", TableSourceNodeOptions(left_operand), + ) + if isinstance(right_operand, ds.Dataset): + right_source = _dataset_to_decl(right_operand, use_threads=use_threads) + else: + right_source = Declaration( + "table_source", TableSourceNodeOptions(right_operand) + ) + + join_opts = AsofJoinNodeOptions( + left_on, left_by, right_on, right_by, tolerance + ) + decl = Declaration( + "asofjoin", options=join_opts, inputs=[left_source, right_source] + ) + + result_table = decl.to_table(use_threads=use_threads) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _filter_table(table, expression): + """Filter rows of a table based on the provided expression. + + The result will be an output table with only the rows matching + the provided expression. + + Parameters + ---------- + table : Table or Dataset + Table or Dataset that should be filtered. + expression : Expression + The expression on which rows should be filtered. + + Returns + ------- + Table + """ + decl = Declaration.from_sequence([ + Declaration("table_source", options=TableSourceNodeOptions(table)), + Declaration("filter", options=FilterNodeOptions(expression)) + ]) + return decl.to_table(use_threads=True) + + +def _sort_source(table_or_dataset, sort_keys, output_type=Table, **kwargs): + + if isinstance(table_or_dataset, ds.Dataset): + data_source = _dataset_to_decl(table_or_dataset, use_threads=True) + else: + data_source = Declaration( + "table_source", TableSourceNodeOptions(table_or_dataset) + ) + + order_by = Declaration("order_by", OrderByNodeOptions(sort_keys, **kwargs)) + + decl = Declaration.from_sequence([data_source, order_by]) + result_table = decl.to_table(use_threads=True) + + if output_type == Table: + return result_table + elif output_type == ds.InMemoryDataset: + return ds.InMemoryDataset(result_table) + else: + raise TypeError("Unsupported output type") + + +def _group_by(table, aggregates, keys, use_threads=True): + + decl = Declaration.from_sequence([ + Declaration("table_source", TableSourceNodeOptions(table)), + Declaration("aggregate", AggregateNodeOptions(aggregates, keys=keys)) + ]) + return decl.to_table(use_threads=use_threads) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/compat.pxi b/llmeval-env/lib/python3.10/site-packages/pyarrow/compat.pxi new file mode 100644 index 0000000000000000000000000000000000000000..8cf106d5609b50dd84c082dcfd36aee5b16fbee4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/compat.pxi @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +def encode_file_path(path): + if isinstance(path, str): + # POSIX systems can handle utf-8. UTF8 is converted to utf16-le in + # libarrow + encoded_path = path.encode('utf-8') + else: + encoded_path = path + + # Windows file system requires utf-16le for file names; Arrow C++ libraries + # will convert utf8 to utf16 + return encoded_path + + +# Starting with Python 3.7, dicts are guaranteed to be insertion-ordered. +ordered_dict = dict + + +try: + import cloudpickle as pickle +except ImportError: + import pickle + + +def tobytes(o): + """ + Encode a unicode or bytes string to bytes. + + Parameters + ---------- + o : str or bytes + Input string. + """ + if isinstance(o, str): + return o.encode('utf8') + else: + return o + + +def frombytes(o, *, safe=False): + """ + Decode the given bytestring to unicode. + + Parameters + ---------- + o : bytes-like + Input object. + safe : bool, default False + If true, raise on encoding errors. + """ + if safe: + return o.decode('utf8', errors='replace') + else: + return o.decode('utf8') diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/compute.py b/llmeval-env/lib/python3.10/site-packages/pyarrow/compute.py new file mode 100644 index 0000000000000000000000000000000000000000..205ab393b8b099bab03c3b19d5b57f985c5f5a2a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/compute.py @@ -0,0 +1,731 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyarrow._compute import ( # noqa + Function, + FunctionOptions, + FunctionRegistry, + HashAggregateFunction, + HashAggregateKernel, + Kernel, + ScalarAggregateFunction, + ScalarAggregateKernel, + ScalarFunction, + ScalarKernel, + VectorFunction, + VectorKernel, + # Option classes + ArraySortOptions, + AssumeTimezoneOptions, + CastOptions, + CountOptions, + CumulativeOptions, + CumulativeSumOptions, + DayOfWeekOptions, + DictionaryEncodeOptions, + RunEndEncodeOptions, + ElementWiseAggregateOptions, + ExtractRegexOptions, + FilterOptions, + IndexOptions, + JoinOptions, + ListSliceOptions, + MakeStructOptions, + MapLookupOptions, + MatchSubstringOptions, + ModeOptions, + NullOptions, + PadOptions, + PairwiseOptions, + PartitionNthOptions, + QuantileOptions, + RandomOptions, + RankOptions, + ReplaceSliceOptions, + ReplaceSubstringOptions, + RoundBinaryOptions, + RoundOptions, + RoundTemporalOptions, + RoundToMultipleOptions, + ScalarAggregateOptions, + SelectKOptions, + SetLookupOptions, + SliceOptions, + SortOptions, + SplitOptions, + SplitPatternOptions, + StrftimeOptions, + StrptimeOptions, + StructFieldOptions, + TakeOptions, + TDigestOptions, + TrimOptions, + Utf8NormalizeOptions, + VarianceOptions, + WeekOptions, + # Functions + call_function, + function_registry, + get_function, + list_functions, + # Udf + call_tabular_function, + register_scalar_function, + register_tabular_function, + register_aggregate_function, + register_vector_function, + UdfContext, + # Expressions + Expression, +) + +from collections import namedtuple +import inspect +from textwrap import dedent +import warnings + +import pyarrow as pa +from pyarrow import _compute_docstrings +from pyarrow.vendored import docscrape + + +def _get_arg_names(func): + return func._doc.arg_names + + +_OptionsClassDoc = namedtuple('_OptionsClassDoc', ('params',)) + + +def _scrape_options_class_doc(options_class): + if not options_class.__doc__: + return None + doc = docscrape.NumpyDocString(options_class.__doc__) + return _OptionsClassDoc(doc['Parameters']) + + +def _decorate_compute_function(wrapper, exposed_name, func, options_class): + # Decorate the given compute function wrapper with useful metadata + # and documentation. + cpp_doc = func._doc + + wrapper.__arrow_compute_function__ = dict( + name=func.name, + arity=func.arity, + options_class=cpp_doc.options_class, + options_required=cpp_doc.options_required) + wrapper.__name__ = exposed_name + wrapper.__qualname__ = exposed_name + + doc_pieces = [] + + # 1. One-line summary + summary = cpp_doc.summary + if not summary: + arg_str = "arguments" if func.arity > 1 else "argument" + summary = ("Call compute function {!r} with the given {}" + .format(func.name, arg_str)) + + doc_pieces.append(f"{summary}.\n\n") + + # 2. Multi-line description + description = cpp_doc.description + if description: + doc_pieces.append(f"{description}\n\n") + + doc_addition = _compute_docstrings.function_doc_additions.get(func.name) + + # 3. Parameter description + doc_pieces.append(dedent("""\ + Parameters + ---------- + """)) + + # 3a. Compute function parameters + arg_names = _get_arg_names(func) + for arg_name in arg_names: + if func.kind in ('vector', 'scalar_aggregate'): + arg_type = 'Array-like' + else: + arg_type = 'Array-like or scalar-like' + doc_pieces.append(f"{arg_name} : {arg_type}\n") + doc_pieces.append(" Argument to compute function.\n") + + # 3b. Compute function option values + if options_class is not None: + options_class_doc = _scrape_options_class_doc(options_class) + if options_class_doc: + for p in options_class_doc.params: + doc_pieces.append(f"{p.name} : {p.type}\n") + for s in p.desc: + doc_pieces.append(f" {s}\n") + else: + warnings.warn(f"Options class {options_class.__name__} " + f"does not have a docstring", RuntimeWarning) + options_sig = inspect.signature(options_class) + for p in options_sig.parameters.values(): + doc_pieces.append(dedent("""\ + {0} : optional + Parameter for {1} constructor. Either `options` + or `{0}` can be passed, but not both at the same time. + """.format(p.name, options_class.__name__))) + doc_pieces.append(dedent(f"""\ + options : pyarrow.compute.{options_class.__name__}, optional + Alternative way of passing options. + """)) + + doc_pieces.append(dedent("""\ + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """)) + + # 4. Custom addition (e.g. examples) + if doc_addition is not None: + doc_pieces.append("\n{}\n".format(dedent(doc_addition).strip("\n"))) + + wrapper.__doc__ = "".join(doc_pieces) + return wrapper + + +def _get_options_class(func): + class_name = func._doc.options_class + if not class_name: + return None + try: + return globals()[class_name] + except KeyError: + warnings.warn("Python binding for {} not exposed" + .format(class_name), RuntimeWarning) + return None + + +def _handle_options(name, options_class, options, args, kwargs): + if args or kwargs: + if options is not None: + raise TypeError( + "Function {!r} called with both an 'options' argument " + "and additional arguments" + .format(name)) + return options_class(*args, **kwargs) + + if options is not None: + if isinstance(options, dict): + return options_class(**options) + elif isinstance(options, options_class): + return options + raise TypeError( + "Function {!r} expected a {} parameter, got {}" + .format(name, options_class, type(options))) + + return None + + +def _make_generic_wrapper(func_name, func, options_class, arity): + if options_class is None: + def wrapper(*args, memory_pool=None): + if arity is not Ellipsis and len(args) != arity: + raise TypeError( + f"{func_name} takes {arity} positional argument(s), " + f"but {len(args)} were given" + ) + if args and isinstance(args[0], Expression): + return Expression._call(func_name, list(args)) + return func.call(args, None, memory_pool) + else: + def wrapper(*args, memory_pool=None, options=None, **kwargs): + if arity is not Ellipsis: + if len(args) < arity: + raise TypeError( + f"{func_name} takes {arity} positional argument(s), " + f"but {len(args)} were given" + ) + option_args = args[arity:] + args = args[:arity] + else: + option_args = () + options = _handle_options(func_name, options_class, options, + option_args, kwargs) + if args and isinstance(args[0], Expression): + return Expression._call(func_name, list(args), options) + return func.call(args, options, memory_pool) + return wrapper + + +def _make_signature(arg_names, var_arg_names, options_class): + from inspect import Parameter + params = [] + for name in arg_names: + params.append(Parameter(name, Parameter.POSITIONAL_ONLY)) + for name in var_arg_names: + params.append(Parameter(name, Parameter.VAR_POSITIONAL)) + if options_class is not None: + options_sig = inspect.signature(options_class) + for p in options_sig.parameters.values(): + assert p.kind in (Parameter.POSITIONAL_OR_KEYWORD, + Parameter.KEYWORD_ONLY) + if var_arg_names: + # Cannot have a positional argument after a *args + p = p.replace(kind=Parameter.KEYWORD_ONLY) + params.append(p) + params.append(Parameter("options", Parameter.KEYWORD_ONLY, + default=None)) + params.append(Parameter("memory_pool", Parameter.KEYWORD_ONLY, + default=None)) + return inspect.Signature(params) + + +def _wrap_function(name, func): + options_class = _get_options_class(func) + arg_names = _get_arg_names(func) + has_vararg = arg_names and arg_names[-1].startswith('*') + if has_vararg: + var_arg_names = [arg_names.pop().lstrip('*')] + else: + var_arg_names = [] + + wrapper = _make_generic_wrapper( + name, func, options_class, arity=func.arity) + wrapper.__signature__ = _make_signature(arg_names, var_arg_names, + options_class) + return _decorate_compute_function(wrapper, name, func, options_class) + + +def _make_global_functions(): + """ + Make global functions wrapping each compute function. + + Note that some of the automatically-generated wrappers may be overridden + by custom versions below. + """ + g = globals() + reg = function_registry() + + # Avoid clashes with Python keywords + rewrites = {'and': 'and_', + 'or': 'or_'} + + for cpp_name in reg.list_functions(): + name = rewrites.get(cpp_name, cpp_name) + func = reg.get_function(cpp_name) + if func.kind == "hash_aggregate": + # Hash aggregate functions are not callable, + # so let's not expose them at module level. + continue + if func.kind == "scalar_aggregate" and func.arity == 0: + # Nullary scalar aggregate functions are not callable + # directly so let's not expose them at module level. + continue + assert name not in g, name + g[cpp_name] = g[name] = _wrap_function(name, func) + + +_make_global_functions() + + +def cast(arr, target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast array values to another data type. Can also be invoked as an array + instance method. + + Parameters + ---------- + arr : Array-like + target_type : DataType or str + Type to cast to + safe : bool, default True + Check for overflows or other unsafe conversions + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Examples + -------- + >>> from datetime import datetime + >>> import pyarrow as pa + >>> arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)]) + >>> arr.type + TimestampType(timestamp[us]) + + You can use ``pyarrow.DataType`` objects to specify the target type: + + >>> cast(arr, pa.timestamp('ms')) + + [ + 2010-01-01 00:00:00.000, + 2015-01-01 00:00:00.000 + ] + + >>> cast(arr, pa.timestamp('ms')).type + TimestampType(timestamp[ms]) + + Alternatively, it is also supported to use the string aliases for these + types: + + >>> arr.cast('timestamp[ms]') + + [ + 2010-01-01 00:00:00.000, + 2015-01-01 00:00:00.000 + ] + >>> arr.cast('timestamp[ms]').type + TimestampType(timestamp[ms]) + + Returns + ------- + casted : Array + The cast result as a new Array + """ + safe_vars_passed = (safe is not None) or (target_type is not None) + + if safe_vars_passed and (options is not None): + raise ValueError("Must either pass values for 'target_type' and 'safe'" + " or pass a value for 'options'") + + if options is None: + target_type = pa.types.lib.ensure_type(target_type) + if safe is False: + options = CastOptions.unsafe(target_type) + else: + options = CastOptions.safe(target_type) + return call_function("cast", [arr], options, memory_pool) + + +def index(data, value, start=None, end=None, *, memory_pool=None): + """ + Find the index of the first occurrence of a given value. + + Parameters + ---------- + data : Array-like + value : Scalar-like object + The value to search for. + start : int, optional + end : int, optional + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + index : int + the index, or -1 if not found + """ + if start is not None: + if end is not None: + data = data.slice(start, end - start) + else: + data = data.slice(start) + elif end is not None: + data = data.slice(0, end) + + if not isinstance(value, pa.Scalar): + value = pa.scalar(value, type=data.type) + elif data.type != value.type: + value = pa.scalar(value.as_py(), type=data.type) + options = IndexOptions(value=value) + result = call_function('index', [data], options, memory_pool) + if start is not None and result.as_py() >= 0: + result = pa.scalar(result.as_py() + start, type=pa.int64()) + return result + + +def take(data, indices, *, boundscheck=True, memory_pool=None): + """ + Select values (or records) from array- or table-like data given integer + selection indices. + + The result will be of the same type(s) as the input, with elements taken + from the input array (or record batch / table fields) at the given + indices. If an index is null then the corresponding value in the output + will be null. + + Parameters + ---------- + data : Array, ChunkedArray, RecordBatch, or Table + indices : Array, ChunkedArray + Must be of integer type + boundscheck : boolean, default True + Whether to boundscheck the indices. If False and there is an out of + bounds index, will likely cause the process to crash. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : depends on inputs + Selected values for the given indices + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> indices = pa.array([0, None, 4, 3]) + >>> arr.take(indices) + + [ + "a", + null, + "e", + null + ] + """ + options = TakeOptions(boundscheck=boundscheck) + return call_function('take', [data, indices], options, memory_pool) + + +def fill_null(values, fill_value): + """Replace each null element in values with a corresponding + element from fill_value. + + If fill_value is scalar-like, then every null element in values + will be replaced with fill_value. If fill_value is array-like, + then the i-th element in values will be replaced with the i-th + element in fill_value. + + The fill_value's type must be the same as that of values, or it + must be able to be implicitly casted to the array's type. + + This is an alias for :func:`coalesce`. + + Parameters + ---------- + values : Array, ChunkedArray, or Scalar-like object + Each null element is replaced with the corresponding value + from fill_value. + fill_value : Array, ChunkedArray, or Scalar-like object + If not same type as values, will attempt to cast. + + Returns + ------- + result : depends on inputs + Values with all null elements replaced + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array([1, 2, None, 3], type=pa.int8()) + >>> fill_value = pa.scalar(5, type=pa.int8()) + >>> arr.fill_null(fill_value) + + [ + 1, + 2, + 5, + 3 + ] + >>> arr = pa.array([1, 2, None, 4, None]) + >>> arr.fill_null(pa.array([10, 20, 30, 40, 50])) + + [ + 1, + 2, + 30, + 4, + 50 + ] + """ + if not isinstance(fill_value, (pa.Array, pa.ChunkedArray, pa.Scalar)): + fill_value = pa.scalar(fill_value, type=values.type) + elif values.type != fill_value.type: + fill_value = pa.scalar(fill_value.as_py(), type=values.type) + + return call_function("coalesce", [values, fill_value]) + + +def top_k_unstable(values, k, sort_keys=None, *, memory_pool=None): + """ + Select the indices of the top-k ordered elements from array- or table-like + data. + + This is a specialization for :func:`select_k_unstable`. Output is not + guaranteed to be stable. + + Parameters + ---------- + values : Array, ChunkedArray, RecordBatch, or Table + Data to sort and get top indices from. + k : int + The number of `k` elements to keep. + sort_keys : List-like + Column key names to order by when input is table-like data. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : Array + Indices of the top-k ordered elements + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> pc.top_k_unstable(arr, k=3) + + [ + 5, + 4, + 2 + ] + """ + if sort_keys is None: + sort_keys = [] + if isinstance(values, (pa.Array, pa.ChunkedArray)): + sort_keys.append(("dummy", "descending")) + else: + sort_keys = map(lambda key_name: (key_name, "descending"), sort_keys) + options = SelectKOptions(k, sort_keys) + return call_function("select_k_unstable", [values], options, memory_pool) + + +def bottom_k_unstable(values, k, sort_keys=None, *, memory_pool=None): + """ + Select the indices of the bottom-k ordered elements from + array- or table-like data. + + This is a specialization for :func:`select_k_unstable`. Output is not + guaranteed to be stable. + + Parameters + ---------- + values : Array, ChunkedArray, RecordBatch, or Table + Data to sort and get bottom indices from. + k : int + The number of `k` elements to keep. + sort_keys : List-like + Column key names to order by when input is table-like data. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : Array of indices + Indices of the bottom-k ordered elements + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> pc.bottom_k_unstable(arr, k=3) + + [ + 0, + 1, + 2 + ] + """ + if sort_keys is None: + sort_keys = [] + if isinstance(values, (pa.Array, pa.ChunkedArray)): + sort_keys.append(("dummy", "ascending")) + else: + sort_keys = map(lambda key_name: (key_name, "ascending"), sort_keys) + options = SelectKOptions(k, sort_keys) + return call_function("select_k_unstable", [values], options, memory_pool) + + +def random(n, *, initializer='system', options=None, memory_pool=None): + """ + Generate numbers in the range [0, 1). + + Generated values are uniformly-distributed, double-precision + in range [0, 1). Algorithm and seed can be changed via RandomOptions. + + Parameters + ---------- + n : int + Number of values to generate, must be greater than or equal to 0 + initializer : int or str + How to initialize the underlying random generator. + If an integer is given, it is used as a seed. + If "system" is given, the random generator is initialized with + a system-specific source of (hopefully true) randomness. + Other values are invalid. + options : pyarrow.compute.RandomOptions, optional + Alternative way of passing options. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """ + options = RandomOptions(initializer=initializer) + return call_function("random", [], options, memory_pool, length=n) + + +def field(*name_or_index): + """Reference a column of the dataset. + + Stores only the field's name. Type and other information is known only when + the expression is bound to a dataset having an explicit scheme. + + Nested references are allowed by passing multiple names or a tuple of + names. For example ``('foo', 'bar')`` references the field named "bar" + inside the field named "foo". + + Parameters + ---------- + *name_or_index : string, multiple strings, tuple or int + The name or index of the (possibly nested) field the expression + references to. + + Returns + ------- + field_expr : Expression + Reference to the given field + + Examples + -------- + >>> import pyarrow.compute as pc + >>> pc.field("a") + + >>> pc.field(1) + + >>> pc.field(("a", "b")) + >> pc.field("a", "b") + len(set(table.column_names)): + raise ValueError("cannot serialize duplicate column names") + + if compression is not None: + raise ValueError("Feather V1 files do not support compression " + "option") + + if chunksize is not None: + raise ValueError("Feather V1 files do not support chunksize " + "option") + else: + if compression is None and Codec.is_available('lz4_frame'): + compression = 'lz4' + elif (compression is not None and + compression not in _FEATHER_SUPPORTED_CODECS): + raise ValueError('compression="{}" not supported, must be ' + 'one of {}'.format(compression, + _FEATHER_SUPPORTED_CODECS)) + + try: + _feather.write_feather(table, dest, compression=compression, + compression_level=compression_level, + chunksize=chunksize, version=version) + except Exception: + if isinstance(dest, str): + try: + os.remove(dest) + except os.error: + pass + raise + + +def read_feather(source, columns=None, use_threads=True, + memory_map=False, **kwargs): + """ + Read a pandas.DataFrame from Feather format. To read as pyarrow.Table use + feather.read_table. + + Parameters + ---------- + source : str file path, or file-like object + You can use MemoryMappedFile as source, for explicitly use memory map. + columns : sequence, optional + Only read a specific set of columns. If not provided, all columns are + read. + use_threads : bool, default True + Whether to parallelize reading using multiple threads. If false the + restriction is used in the conversion to Pandas as well as in the + reading from Feather format. + memory_map : boolean, default False + Use memory mapping when opening file on disk, when source is a str. + **kwargs + Additional keyword arguments passed on to `pyarrow.Table.to_pandas`. + + Returns + ------- + df : pandas.DataFrame + The contents of the Feather file as a pandas.DataFrame + """ + return (read_table( + source, columns=columns, memory_map=memory_map, + use_threads=use_threads).to_pandas(use_threads=use_threads, **kwargs)) + + +def read_table(source, columns=None, memory_map=False, use_threads=True): + """ + Read a pyarrow.Table from Feather format + + Parameters + ---------- + source : str file path, or file-like object + You can use MemoryMappedFile as source, for explicitly use memory map. + columns : sequence, optional + Only read a specific set of columns. If not provided, all columns are + read. + memory_map : boolean, default False + Use memory mapping when opening file on disk, when source is a str + use_threads : bool, default True + Whether to parallelize reading using multiple threads. + + Returns + ------- + table : pyarrow.Table + The contents of the Feather file as a pyarrow.Table + """ + reader = _feather.FeatherReader( + source, use_memory_map=memory_map, use_threads=use_threads) + + if columns is None: + return reader.read() + + column_types = [type(column) for column in columns] + if all(map(lambda t: t == int, column_types)): + table = reader.read_indices(columns) + elif all(map(lambda t: t == str, column_types)): + table = reader.read_names(columns) + else: + column_type_names = [t.__name__ for t in column_types] + raise TypeError("Columns must be indices or names. " + "Got columns {} of types {}" + .format(columns, column_type_names)) + + # Feather v1 already respects the column selection + if reader.version < 3: + return table + # Feather v2 reads with sorted / deduplicated selection + elif sorted(set(columns)) == columns: + return table + else: + # follow exact order / selection of names + return table.select(columns) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/api.h new file mode 100644 index 0000000000000000000000000000000000000000..ac568a00eedc32984758f4675b58ac626c9c947a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/api.h @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Coarse public API while the library is in development + +#pragma once + +#include "arrow/array.h" // IWYU pragma: export +#include "arrow/array/array_run_end.h" // IWYU pragma: export +#include "arrow/array/concatenate.h" // IWYU pragma: export +#include "arrow/buffer.h" // IWYU pragma: export +#include "arrow/builder.h" // IWYU pragma: export +#include "arrow/chunked_array.h" // IWYU pragma: export +#include "arrow/compare.h" // IWYU pragma: export +#include "arrow/config.h" // IWYU pragma: export +#include "arrow/datum.h" // IWYU pragma: export +#include "arrow/extension_type.h" // IWYU pragma: export +#include "arrow/memory_pool.h" // IWYU pragma: export +#include "arrow/pretty_print.h" // IWYU pragma: export +#include "arrow/record_batch.h" // IWYU pragma: export +#include "arrow/result.h" // IWYU pragma: export +#include "arrow/status.h" // IWYU pragma: export +#include "arrow/table.h" // IWYU pragma: export +#include "arrow/table_builder.h" // IWYU pragma: export +#include "arrow/tensor.h" // IWYU pragma: export +#include "arrow/type.h" // IWYU pragma: export +#include "arrow/util/key_value_metadata.h" // IWYU pragma: export +#include "arrow/visit_array_inline.h" // IWYU pragma: export +#include "arrow/visit_scalar_inline.h" // IWYU pragma: export +#include "arrow/visitor.h" // IWYU pragma: export + +/// \brief Top-level namespace for Apache Arrow C++ API +namespace arrow {} diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array.h new file mode 100644 index 0000000000000000000000000000000000000000..4d72ea9506a414fd6e50d5c7d0af437084045e05 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array.h @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Kitchen-sink public API for arrow::Array data structures. C++ library code +// (especially header files) in Apache Arrow should use more specific headers +// unless it's a file that uses most or all Array types in which case using +// arrow/array.h is fine. + +#pragma once + +/// \defgroup numeric-arrays Concrete classes for numeric arrays +/// @{ +/// @} + +/// \defgroup binary-arrays Concrete classes for binary/string arrays +/// @{ +/// @} + +/// \defgroup nested-arrays Concrete classes for nested arrays +/// @{ +/// @} + +/// \defgroup run-end-encoded-arrays Concrete classes for run-end encoded arrays +/// @{ +/// @} + +#include "arrow/array/array_base.h" // IWYU pragma: keep +#include "arrow/array/array_binary.h" // IWYU pragma: keep +#include "arrow/array/array_decimal.h" // IWYU pragma: keep +#include "arrow/array/array_dict.h" // IWYU pragma: keep +#include "arrow/array/array_nested.h" // IWYU pragma: keep +#include "arrow/array/array_primitive.h" // IWYU pragma: keep +#include "arrow/array/array_run_end.h" // IWYU pragma: keep +#include "arrow/array/data.h" // IWYU pragma: keep +#include "arrow/array/util.h" // IWYU pragma: keep diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..fbf4a22e350cac7f6cffa766d96fe149ddb996db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h @@ -0,0 +1,587 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/device.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/span.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Buffer classes + +/// \class Buffer +/// \brief Object containing a pointer to a piece of contiguous memory with a +/// particular size. +/// +/// Buffers have two related notions of length: size and capacity. Size is +/// the number of bytes that might have valid data. Capacity is the number +/// of bytes that were allocated for the buffer in total. +/// +/// The Buffer base class does not own its memory, but subclasses often do. +/// +/// The following invariant is always true: Size <= Capacity +class ARROW_EXPORT Buffer { + public: + ARROW_DISALLOW_COPY_AND_ASSIGN(Buffer); + + /// \brief Construct from buffer and size without copying memory + /// + /// \param[in] data a memory buffer + /// \param[in] size buffer size + /// + /// \note The passed memory must be kept alive through some other means + Buffer(const uint8_t* data, int64_t size) + : is_mutable_(false), + is_cpu_(true), + data_(data), + size_(size), + capacity_(size), + device_type_(DeviceAllocationType::kCPU) { + SetMemoryManager(default_cpu_memory_manager()); + } + + Buffer(const uint8_t* data, int64_t size, std::shared_ptr mm, + std::shared_ptr parent = NULLPTR, + std::optional device_type_override = std::nullopt) + : is_mutable_(false), + data_(data), + size_(size), + capacity_(size), + parent_(std::move(parent)) { + // SetMemoryManager will also set device_type_ + SetMemoryManager(std::move(mm)); + // If a device type is specified, use that instead. Example of when this can be + // useful: the CudaMemoryManager can set device_type_ to kCUDA, but you can specify + // device_type_override=kCUDA_HOST as the device type to override it. + if (device_type_override != std::nullopt) { + device_type_ = *device_type_override; + } + } + + Buffer(uintptr_t address, int64_t size, std::shared_ptr mm, + std::shared_ptr parent = NULLPTR) + : Buffer(reinterpret_cast(address), size, std::move(mm), + std::move(parent)) {} + + /// \brief Construct from string_view without copying memory + /// + /// \param[in] data a string_view object + /// + /// \note The memory viewed by data must not be deallocated in the lifetime of the + /// Buffer; temporary rvalue strings must be stored in an lvalue somewhere + explicit Buffer(std::string_view data) + : Buffer(reinterpret_cast(data.data()), + static_cast(data.size())) {} + + virtual ~Buffer() = default; + + /// An offset into data that is owned by another buffer, but we want to be + /// able to retain a valid pointer to it even after other shared_ptr's to the + /// parent buffer have been destroyed + /// + /// This method makes no assertions about alignment or padding of the buffer but + /// in general we expected buffers to be aligned and padded to 64 bytes. In the future + /// we might add utility methods to help determine if a buffer satisfies this contract. + Buffer(const std::shared_ptr& parent, const int64_t offset, const int64_t size) + : Buffer(parent->data_ + offset, size) { + parent_ = parent; + SetMemoryManager(parent->memory_manager_); + } + + uint8_t operator[](std::size_t i) const { return data_[i]; } + + /// \brief Construct a new std::string with a hexadecimal representation of the buffer. + /// \return std::string + std::string ToHexString(); + + /// Return true if both buffers are the same size and contain the same bytes + /// up to the number of compared bytes + bool Equals(const Buffer& other, int64_t nbytes) const; + + /// Return true if both buffers are the same size and contain the same bytes + bool Equals(const Buffer& other) const; + + /// Copy a section of the buffer into a new Buffer. + Result> CopySlice( + const int64_t start, const int64_t nbytes, + MemoryPool* pool = default_memory_pool()) const; + + /// Zero bytes in padding, i.e. bytes between size_ and capacity_. + void ZeroPadding() { +#ifndef NDEBUG + CheckMutable(); +#endif + // A zero-capacity buffer can have a null data pointer + if (capacity_ != 0) { + memset(mutable_data() + size_, 0, static_cast(capacity_ - size_)); + } + } + + /// \brief Construct an immutable buffer that takes ownership of the contents + /// of an std::string (without copying it). + /// + /// \param[in] data a string to own + /// \return a new Buffer instance + static std::shared_ptr FromString(std::string data); + + /// \brief Construct an immutable buffer that takes ownership of the contents + /// of an std::vector (without copying it). Only vectors of TrivialType objects + /// (integers, floating point numbers, ...) can be wrapped by this function. + /// + /// \param[in] vec a vector to own + /// \return a new Buffer instance + template + static std::shared_ptr FromVector(std::vector vec) { + static_assert(std::is_trivial_v, + "Buffer::FromVector can only wrap vectors of trivial objects"); + + if (vec.empty()) { + return std::shared_ptr{new Buffer()}; + } + + auto* data = reinterpret_cast(vec.data()); + auto size_in_bytes = static_cast(vec.size() * sizeof(T)); + return std::shared_ptr{ + new Buffer{data, size_in_bytes}, + // Keep the vector's buffer alive inside the shared_ptr's destructor until after + // we have deleted the Buffer. Note we can't use this trick in FromString since + // std::string's data is inline for short strings so moving invalidates pointers + // into the string's buffer. + [vec = std::move(vec)](Buffer* buffer) { delete buffer; }}; + } + + /// \brief Create buffer referencing typed memory with some length without + /// copying + /// \param[in] data the typed memory as C array + /// \param[in] length the number of values in the array + /// \return a new shared_ptr + template + static std::shared_ptr Wrap(const T* data, SizeType length) { + return std::make_shared(reinterpret_cast(data), + static_cast(sizeof(T) * length)); + } + + /// \brief Create buffer referencing std::vector with some length without + /// copying + /// \param[in] data the vector to be referenced. If this vector is changed, + /// the buffer may become invalid + /// \return a new shared_ptr + template + static std::shared_ptr Wrap(const std::vector& data) { + return std::make_shared(reinterpret_cast(data.data()), + static_cast(sizeof(T) * data.size())); + } + + /// \brief Copy buffer contents into a new std::string + /// \return std::string + /// \note Can throw std::bad_alloc if buffer is large + std::string ToString() const; + + /// \brief View buffer contents as a std::string_view + /// \return std::string_view + explicit operator std::string_view() const { + return {reinterpret_cast(data_), static_cast(size_)}; + } + + /// \brief Return a pointer to the buffer's data + /// + /// The buffer has to be a CPU buffer (`is_cpu()` is true). + /// Otherwise, an assertion may be thrown or a null pointer may be returned. + /// + /// To get the buffer's data address regardless of its device, call `address()`. + const uint8_t* data() const { +#ifndef NDEBUG + CheckCPU(); +#endif + return ARROW_PREDICT_TRUE(is_cpu_) ? data_ : NULLPTR; + } + + /// \brief Return a pointer to the buffer's data cast to a specific type + /// + /// The buffer has to be a CPU buffer (`is_cpu()` is true). + /// Otherwise, an assertion may be thrown or a null pointer may be returned. + template + const T* data_as() const { + return reinterpret_cast(data()); + } + + /// \brief Return the buffer's data as a span + template + util::span span_as() const { + return util::span(data_as(), static_cast(size() / sizeof(T))); + } + + /// \brief Return a writable pointer to the buffer's data + /// + /// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()` + /// are true). Otherwise, an assertion may be thrown or a null pointer may + /// be returned. + /// + /// To get the buffer's mutable data address regardless of its device, call + /// `mutable_address()`. + uint8_t* mutable_data() { +#ifndef NDEBUG + CheckCPU(); + CheckMutable(); +#endif + return ARROW_PREDICT_TRUE(is_cpu_ && is_mutable_) ? const_cast(data_) + : NULLPTR; + } + + /// \brief Return a writable pointer to the buffer's data cast to a specific type + /// + /// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()` + /// are true). Otherwise, an assertion may be thrown or a null pointer may + /// be returned. + template + T* mutable_data_as() { + return reinterpret_cast(mutable_data()); + } + + /// \brief Return the buffer's mutable data as a span + template + util::span mutable_span_as() { + return util::span(mutable_data_as(), static_cast(size() / sizeof(T))); + } + + /// \brief Return the device address of the buffer's data + uintptr_t address() const { return reinterpret_cast(data_); } + + /// \brief Return a writable device address to the buffer's data + /// + /// The buffer has to be a mutable buffer (`is_mutable()` is true). + /// Otherwise, an assertion may be thrown or 0 may be returned. + uintptr_t mutable_address() const { +#ifndef NDEBUG + CheckMutable(); +#endif + return ARROW_PREDICT_TRUE(is_mutable_) ? reinterpret_cast(data_) : 0; + } + + /// \brief Return the buffer's size in bytes + int64_t size() const { return size_; } + + /// \brief Return the buffer's capacity (number of allocated bytes) + int64_t capacity() const { return capacity_; } + + /// \brief Whether the buffer is directly CPU-accessible + /// + /// If this function returns true, you can read directly from the buffer's + /// `data()` pointer. Otherwise, you'll have to `View()` or `Copy()` it. + bool is_cpu() const { return is_cpu_; } + + /// \brief Whether the buffer is mutable + /// + /// If this function returns true, you are allowed to modify buffer contents + /// using the pointer returned by `mutable_data()` or `mutable_address()`. + bool is_mutable() const { return is_mutable_; } + + const std::shared_ptr& device() const { return memory_manager_->device(); } + + const std::shared_ptr& memory_manager() const { return memory_manager_; } + + DeviceAllocationType device_type() const { return device_type_; } + + std::shared_ptr parent() const { return parent_; } + + /// \brief Get a RandomAccessFile for reading a buffer + /// + /// The returned file object reads from this buffer's underlying memory. + static Result> GetReader(std::shared_ptr); + + /// \brief Get a OutputStream for writing to a buffer + /// + /// The buffer must be mutable. The returned stream object writes into the buffer's + /// underlying memory (but it won't resize it). + static Result> GetWriter(std::shared_ptr); + + /// \brief Copy buffer + /// + /// The buffer contents will be copied into a new buffer allocated by the + /// given MemoryManager. This function supports cross-device copies. + static Result> Copy(std::shared_ptr source, + const std::shared_ptr& to); + + /// \brief Copy a non-owned buffer + /// + /// This is useful for cases where the source memory area is externally managed + /// (its lifetime not tied to the source Buffer), otherwise please use Copy(). + static Result> CopyNonOwned( + const Buffer& source, const std::shared_ptr& to); + + /// \brief View buffer + /// + /// Return a Buffer that reflects this buffer, seen potentially from another + /// device, without making an explicit copy of the contents. The underlying + /// mechanism is typically implemented by the kernel or device driver, and may + /// involve lazy caching of parts of the buffer contents on the destination + /// device's memory. + /// + /// If a non-copy view is unsupported for the buffer on the given device, + /// nullptr is returned. An error can be returned if some low-level + /// operation fails (such as an out-of-memory condition). + static Result> View(std::shared_ptr source, + const std::shared_ptr& to); + + /// \brief View or copy buffer + /// + /// Try to view buffer contents on the given MemoryManager's device, but + /// fall back to copying if a no-copy view isn't supported. + static Result> ViewOrCopy( + std::shared_ptr source, const std::shared_ptr& to); + + virtual std::shared_ptr device_sync_event() const { return NULLPTR; } + + protected: + bool is_mutable_; + bool is_cpu_; + const uint8_t* data_; + int64_t size_; + int64_t capacity_; + DeviceAllocationType device_type_; + + // null by default, but may be set + std::shared_ptr parent_; + + private: + // private so that subclasses are forced to call SetMemoryManager() + std::shared_ptr memory_manager_; + + protected: + Buffer(); + + void CheckMutable() const; + void CheckCPU() const; + + void SetMemoryManager(std::shared_ptr mm) { + memory_manager_ = std::move(mm); + is_cpu_ = memory_manager_->is_cpu(); + device_type_ = memory_manager_->device()->device_type(); + } +}; + +/// \defgroup buffer-slicing-functions Functions for slicing buffers +/// +/// @{ + +/// \brief Construct a view on a buffer at the given offset and length. +/// +/// This function cannot fail and does not check for errors (except in debug builds) +static inline std::shared_ptr SliceBuffer(const std::shared_ptr& buffer, + const int64_t offset, + const int64_t length) { + return std::make_shared(buffer, offset, length); +} + +/// \brief Construct a view on a buffer at the given offset, up to the buffer's end. +/// +/// This function cannot fail and does not check for errors (except in debug builds) +static inline std::shared_ptr SliceBuffer(const std::shared_ptr& buffer, + const int64_t offset) { + int64_t length = buffer->size() - offset; + return SliceBuffer(buffer, offset, length); +} + +/// \brief Input-checking version of SliceBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +ARROW_EXPORT +Result> SliceBufferSafe(const std::shared_ptr& buffer, + int64_t offset); +/// \brief Input-checking version of SliceBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size. +ARROW_EXPORT +Result> SliceBufferSafe(const std::shared_ptr& buffer, + int64_t offset, int64_t length); + +/// \brief Like SliceBuffer, but construct a mutable buffer slice. +/// +/// If the parent buffer is not mutable, behavior is undefined (it may abort +/// in debug builds). +ARROW_EXPORT +std::shared_ptr SliceMutableBuffer(const std::shared_ptr& buffer, + const int64_t offset, const int64_t length); + +/// \brief Like SliceBuffer, but construct a mutable buffer slice. +/// +/// If the parent buffer is not mutable, behavior is undefined (it may abort +/// in debug builds). +static inline std::shared_ptr SliceMutableBuffer( + const std::shared_ptr& buffer, const int64_t offset) { + int64_t length = buffer->size() - offset; + return SliceMutableBuffer(buffer, offset, length); +} + +/// \brief Input-checking version of SliceMutableBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +ARROW_EXPORT +Result> SliceMutableBufferSafe( + const std::shared_ptr& buffer, int64_t offset); +/// \brief Input-checking version of SliceMutableBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size. +ARROW_EXPORT +Result> SliceMutableBufferSafe( + const std::shared_ptr& buffer, int64_t offset, int64_t length); + +/// @} + +/// \class MutableBuffer +/// \brief A Buffer whose contents can be mutated. May or may not own its data. +class ARROW_EXPORT MutableBuffer : public Buffer { + public: + MutableBuffer(uint8_t* data, const int64_t size) : Buffer(data, size) { + is_mutable_ = true; + } + + MutableBuffer(uint8_t* data, const int64_t size, std::shared_ptr mm) + : Buffer(data, size, std::move(mm)) { + is_mutable_ = true; + } + + MutableBuffer(const std::shared_ptr& parent, const int64_t offset, + const int64_t size); + + /// \brief Create buffer referencing typed memory with some length + /// \param[in] data the typed memory as C array + /// \param[in] length the number of values in the array + /// \return a new shared_ptr + template + static std::shared_ptr Wrap(T* data, SizeType length) { + return std::make_shared(reinterpret_cast(data), + static_cast(sizeof(T) * length)); + } + + protected: + MutableBuffer() : Buffer(NULLPTR, 0) {} +}; + +/// \class ResizableBuffer +/// \brief A mutable buffer that can be resized +class ARROW_EXPORT ResizableBuffer : public MutableBuffer { + public: + /// Change buffer reported size to indicated size, allocating memory if + /// necessary. This will ensure that the capacity of the buffer is a multiple + /// of 64 bytes as defined in Layout.md. + /// Consider using ZeroPadding afterwards, to conform to the Arrow layout + /// specification. + /// + /// @param new_size The new size for the buffer. + /// @param shrink_to_fit Whether to shrink the capacity if new size < current size + virtual Status Resize(const int64_t new_size, bool shrink_to_fit) = 0; + Status Resize(const int64_t new_size) { + return Resize(new_size, /*shrink_to_fit=*/true); + } + + /// Ensure that buffer has enough memory allocated to fit the indicated + /// capacity (and meets the 64 byte padding requirement in Layout.md). + /// It does not change buffer's reported size and doesn't zero the padding. + virtual Status Reserve(const int64_t new_capacity) = 0; + + template + Status TypedResize(const int64_t new_nb_elements, bool shrink_to_fit = true) { + return Resize(sizeof(T) * new_nb_elements, shrink_to_fit); + } + + template + Status TypedReserve(const int64_t new_nb_elements) { + return Reserve(sizeof(T) * new_nb_elements); + } + + protected: + ResizableBuffer(uint8_t* data, int64_t size) : MutableBuffer(data, size) {} + ResizableBuffer(uint8_t* data, int64_t size, std::shared_ptr mm) + : MutableBuffer(data, size, std::move(mm)) {} +}; + +/// \defgroup buffer-allocation-functions Functions for allocating buffers +/// +/// @{ + +/// \brief Allocate a fixed size mutable buffer from a memory pool, zero its padding. +/// +/// \param[in] size size of buffer to allocate +/// \param[in] pool a memory pool +ARROW_EXPORT +Result> AllocateBuffer(const int64_t size, + MemoryPool* pool = NULLPTR); +ARROW_EXPORT +Result> AllocateBuffer(const int64_t size, int64_t alignment, + MemoryPool* pool = NULLPTR); + +/// \brief Allocate a resizeable buffer from a memory pool, zero its padding. +/// +/// \param[in] size size of buffer to allocate +/// \param[in] pool a memory pool +ARROW_EXPORT +Result> AllocateResizableBuffer( + const int64_t size, MemoryPool* pool = NULLPTR); +ARROW_EXPORT +Result> AllocateResizableBuffer( + const int64_t size, const int64_t alignment, MemoryPool* pool = NULLPTR); + +/// \brief Allocate a bitmap buffer from a memory pool +/// no guarantee on values is provided. +/// +/// \param[in] length size in bits of bitmap to allocate +/// \param[in] pool memory pool to allocate memory from +ARROW_EXPORT +Result> AllocateBitmap(int64_t length, + MemoryPool* pool = NULLPTR); + +/// \brief Allocate a zero-initialized bitmap buffer from a memory pool +/// +/// \param[in] length size in bits of bitmap to allocate +/// \param[in] pool memory pool to allocate memory from +ARROW_EXPORT +Result> AllocateEmptyBitmap(int64_t length, + MemoryPool* pool = NULLPTR); + +ARROW_EXPORT +Result> AllocateEmptyBitmap(int64_t length, int64_t alignment, + MemoryPool* pool = NULLPTR); + +/// \brief Concatenate multiple buffers into a single buffer +/// +/// \param[in] buffers to be concatenated +/// \param[in] pool memory pool to allocate the new buffer from +ARROW_EXPORT +Result> ConcatenateBuffers(const BufferVector& buffers, + MemoryPool* pool = NULLPTR); + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..a84c98b6b24917faf53a821c5c3e5f62471bb9aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h @@ -0,0 +1,484 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_generate.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Buffer builder classes + +/// \class BufferBuilder +/// \brief A class for incrementally building a contiguous chunk of in-memory +/// data +class ARROW_EXPORT BufferBuilder { + public: + explicit BufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : pool_(pool), + data_(/*ensure never null to make ubsan happy and avoid check penalties below*/ + util::MakeNonNull()), + capacity_(0), + size_(0), + alignment_(alignment) {} + + /// \brief Constructs new Builder that will start using + /// the provided buffer until Finish/Reset are called. + /// The buffer is not resized. + explicit BufferBuilder(std::shared_ptr buffer, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : buffer_(std::move(buffer)), + pool_(pool), + data_(buffer_->mutable_data()), + capacity_(buffer_->capacity()), + size_(buffer_->size()), + alignment_(alignment) {} + + /// \brief Resize the buffer to the nearest multiple of 64 bytes + /// + /// \param new_capacity the new capacity of the of the builder. Will be + /// rounded up to a multiple of 64 bytes for padding + /// \param shrink_to_fit if new capacity is smaller than the existing, + /// reallocate internal buffer. Set to false to avoid reallocations when + /// shrinking the builder. + /// \return Status + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + if (buffer_ == NULLPTR) { + ARROW_ASSIGN_OR_RAISE(buffer_, + AllocateResizableBuffer(new_capacity, alignment_, pool_)); + } else { + ARROW_RETURN_NOT_OK(buffer_->Resize(new_capacity, shrink_to_fit)); + } + capacity_ = buffer_->capacity(); + data_ = buffer_->mutable_data(); + return Status::OK(); + } + + /// \brief Ensure that builder can accommodate the additional number of bytes + /// without the need to perform allocations + /// + /// \param[in] additional_bytes number of additional bytes to make space for + /// \return Status + Status Reserve(const int64_t additional_bytes) { + auto min_capacity = size_ + additional_bytes; + if (min_capacity <= capacity_) { + return Status::OK(); + } + return Resize(GrowByFactor(capacity_, min_capacity), false); + } + + /// \brief Return a capacity expanded by the desired growth factor + static int64_t GrowByFactor(int64_t current_capacity, int64_t new_capacity) { + // Doubling capacity except for large Reserve requests. 2x growth strategy + // (versus 1.5x) seems to have slightly better performance when using + // jemalloc, but significantly better performance when using the system + // allocator. See ARROW-6450 for further discussion + return std::max(new_capacity, current_capacity * 2); + } + + /// \brief Append the given data to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(const void* data, const int64_t length) { + if (ARROW_PREDICT_FALSE(size_ + length > capacity_)) { + ARROW_RETURN_NOT_OK(Resize(GrowByFactor(capacity_, size_ + length), false)); + } + UnsafeAppend(data, length); + return Status::OK(); + } + + /// \brief Append the given data to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(std::string_view v) { return Append(v.data(), v.size()); } + + /// \brief Append copies of a value to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(const int64_t num_copies, uint8_t value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies)); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + // Advance pointer and zero out memory + Status Advance(const int64_t length) { return Append(length, 0); } + + // Advance pointer, but don't allocate or zero memory + void UnsafeAdvance(const int64_t length) { size_ += length; } + + // Unsafe methods don't check existing size + void UnsafeAppend(const void* data, const int64_t length) { + memcpy(data_ + size_, data, static_cast(length)); + size_ += length; + } + + void UnsafeAppend(std::string_view v) { + UnsafeAppend(v.data(), static_cast(v.size())); + } + + void UnsafeAppend(const int64_t num_copies, uint8_t value) { + memset(data_ + size_, value, static_cast(num_copies)); + size_ += num_copies; + } + + /// \brief Return result of builder as a Buffer object. + /// + /// The builder is reset and can be reused afterwards. + /// + /// \param[out] out the finalized Buffer object + /// \param shrink_to_fit if the buffer size is smaller than its capacity, + /// reallocate to fit more tightly in memory. Set to false to avoid + /// a reallocation, at the expense of potentially more memory consumption. + /// \return Status + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + ARROW_RETURN_NOT_OK(Resize(size_, shrink_to_fit)); + if (size_ != 0) buffer_->ZeroPadding(); + *out = buffer_; + if (*out == NULLPTR) { + ARROW_ASSIGN_OR_RAISE(*out, AllocateBuffer(0, alignment_, pool_)); + } + Reset(); + return Status::OK(); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using BufferBuilder + /// mostly for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + size_ = final_length; + return Finish(shrink_to_fit); + } + + void Reset() { + buffer_ = NULLPTR; + capacity_ = size_ = 0; + } + + /// \brief Set size to a smaller value without modifying builder + /// contents. For reusable BufferBuilder classes + /// \param[in] position must be non-negative and less than or equal + /// to the current length() + void Rewind(int64_t position) { size_ = position; } + + int64_t capacity() const { return capacity_; } + int64_t length() const { return size_; } + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return data_; } + template + const T* data_as() const { + return reinterpret_cast(data_); + } + template + T* mutable_data_as() { + return reinterpret_cast(data_); + } + + private: + std::shared_ptr buffer_; + MemoryPool* pool_; + uint8_t* data_; + int64_t capacity_; + int64_t size_; + int64_t alignment_; +}; + +template +class TypedBufferBuilder; + +/// \brief A BufferBuilder for building a buffer of arithmetic elements +template +class TypedBufferBuilder< + T, typename std::enable_if::value || + std::is_standard_layout::value>::type> { + public: + explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : bytes_builder_(pool, alignment) {} + + explicit TypedBufferBuilder(std::shared_ptr buffer, + MemoryPool* pool = default_memory_pool()) + : bytes_builder_(std::move(buffer), pool) {} + + explicit TypedBufferBuilder(BufferBuilder builder) + : bytes_builder_(std::move(builder)) {} + + BufferBuilder* bytes_builder() { return &bytes_builder_; } + + Status Append(T value) { + return bytes_builder_.Append(reinterpret_cast(&value), sizeof(T)); + } + + Status Append(const T* values, int64_t num_elements) { + return bytes_builder_.Append(reinterpret_cast(values), + num_elements * sizeof(T)); + } + + Status Append(const int64_t num_copies, T value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies + length())); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + void UnsafeAppend(T value) { + bytes_builder_.UnsafeAppend(reinterpret_cast(&value), sizeof(T)); + } + + void UnsafeAppend(const T* values, int64_t num_elements) { + bytes_builder_.UnsafeAppend(reinterpret_cast(values), + num_elements * sizeof(T)); + } + + template + void UnsafeAppend(Iter values_begin, Iter values_end) { + auto num_elements = static_cast(std::distance(values_begin, values_end)); + auto data = mutable_data() + length(); + bytes_builder_.UnsafeAdvance(num_elements * sizeof(T)); + std::copy(values_begin, values_end, data); + } + + void UnsafeAppend(const int64_t num_copies, T value) { + auto data = mutable_data() + length(); + bytes_builder_.UnsafeAdvance(num_copies * sizeof(T)); + std::fill(data, data + num_copies, value); + } + + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + return bytes_builder_.Resize(new_capacity * sizeof(T), shrink_to_fit); + } + + Status Reserve(const int64_t additional_elements) { + return bytes_builder_.Reserve(additional_elements * sizeof(T)); + } + + Status Advance(const int64_t length) { + return bytes_builder_.Advance(length * sizeof(T)); + } + + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + return bytes_builder_.Finish(out, shrink_to_fit); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using TypedBufferBuilder + /// only for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + return bytes_builder_.FinishWithLength(final_length * sizeof(T), shrink_to_fit); + } + + void Reset() { bytes_builder_.Reset(); } + + int64_t length() const { return bytes_builder_.length() / sizeof(T); } + int64_t capacity() const { return bytes_builder_.capacity() / sizeof(T); } + const T* data() const { return reinterpret_cast(bytes_builder_.data()); } + T* mutable_data() { return reinterpret_cast(bytes_builder_.mutable_data()); } + + private: + BufferBuilder bytes_builder_; +}; + +/// \brief A BufferBuilder for building a buffer containing a bitmap +template <> +class TypedBufferBuilder { + public: + explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : bytes_builder_(pool, alignment) {} + + explicit TypedBufferBuilder(BufferBuilder builder) + : bytes_builder_(std::move(builder)) {} + + BufferBuilder* bytes_builder() { return &bytes_builder_; } + + Status Append(bool value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(value); + return Status::OK(); + } + + Status Append(const uint8_t* valid_bytes, int64_t num_elements) { + ARROW_RETURN_NOT_OK(Reserve(num_elements)); + UnsafeAppend(valid_bytes, num_elements); + return Status::OK(); + } + + Status Append(const int64_t num_copies, bool value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies)); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + void UnsafeAppend(bool value) { + bit_util::SetBitTo(mutable_data(), bit_length_, value); + if (!value) { + ++false_count_; + } + ++bit_length_; + } + + /// \brief Append bits from an array of bytes (one value per byte) + void UnsafeAppend(const uint8_t* bytes, int64_t num_elements) { + if (num_elements == 0) return; + int64_t i = 0; + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] { + bool value = bytes[i++]; + false_count_ += !value; + return value; + }); + bit_length_ += num_elements; + } + + /// \brief Append bits from a packed bitmap + void UnsafeAppend(const uint8_t* bitmap, int64_t offset, int64_t num_elements) { + if (num_elements == 0) return; + internal::CopyBitmap(bitmap, offset, num_elements, mutable_data(), bit_length_); + false_count_ += num_elements - internal::CountSetBits(bitmap, offset, num_elements); + bit_length_ += num_elements; + } + + void UnsafeAppend(const int64_t num_copies, bool value) { + bit_util::SetBitsTo(mutable_data(), bit_length_, num_copies, value); + false_count_ += num_copies * !value; + bit_length_ += num_copies; + } + + template + void UnsafeAppend(const int64_t num_elements, Generator&& gen) { + if (num_elements == 0) return; + + if (count_falses) { + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] { + bool value = gen(); + false_count_ += !value; + return value; + }); + } else { + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, + std::forward(gen)); + } + bit_length_ += num_elements; + } + + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + const int64_t old_byte_capacity = bytes_builder_.capacity(); + ARROW_RETURN_NOT_OK( + bytes_builder_.Resize(bit_util::BytesForBits(new_capacity), shrink_to_fit)); + // Resize() may have chosen a larger capacity (e.g. for padding), + // so ask it again before calling memset(). + const int64_t new_byte_capacity = bytes_builder_.capacity(); + if (new_byte_capacity > old_byte_capacity) { + // The additional buffer space is 0-initialized for convenience, + // so that other methods can simply bump the length. + memset(mutable_data() + old_byte_capacity, 0, + static_cast(new_byte_capacity - old_byte_capacity)); + } + return Status::OK(); + } + + Status Reserve(const int64_t additional_elements) { + return Resize( + BufferBuilder::GrowByFactor(bit_length_, bit_length_ + additional_elements), + false); + } + + Status Advance(const int64_t length) { + ARROW_RETURN_NOT_OK(Reserve(length)); + bit_length_ += length; + false_count_ += length; + return Status::OK(); + } + + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + // set bytes_builder_.size_ == byte size of data + bytes_builder_.UnsafeAdvance(bit_util::BytesForBits(bit_length_) - + bytes_builder_.length()); + bit_length_ = false_count_ = 0; + return bytes_builder_.Finish(out, shrink_to_fit); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using TypedBufferBuilder + /// only for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + const auto final_byte_length = bit_util::BytesForBits(final_length); + bytes_builder_.UnsafeAdvance(final_byte_length - bytes_builder_.length()); + bit_length_ = false_count_ = 0; + return bytes_builder_.FinishWithLength(final_byte_length, shrink_to_fit); + } + + void Reset() { + bytes_builder_.Reset(); + bit_length_ = false_count_ = 0; + } + + int64_t length() const { return bit_length_; } + int64_t capacity() const { return bytes_builder_.capacity() * 8; } + const uint8_t* data() const { return bytes_builder_.data(); } + uint8_t* mutable_data() { return bytes_builder_.mutable_data(); } + int64_t false_count() const { return false_count_; } + + private: + BufferBuilder bytes_builder_; + int64_t bit_length_ = 0; + int64_t false_count_ = 0; +}; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h new file mode 100644 index 0000000000000000000000000000000000000000..f0aa14c1e0612d1872a5959998651a12668f449f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array/builder_adaptive.h" // IWYU pragma: keep +#include "arrow/array/builder_base.h" // IWYU pragma: keep +#include "arrow/array/builder_binary.h" // IWYU pragma: keep +#include "arrow/array/builder_decimal.h" // IWYU pragma: keep +#include "arrow/array/builder_dict.h" // IWYU pragma: keep +#include "arrow/array/builder_nested.h" // IWYU pragma: keep +#include "arrow/array/builder_primitive.h" // IWYU pragma: keep +#include "arrow/array/builder_run_end.h" // IWYU pragma: keep +#include "arrow/array/builder_time.h" // IWYU pragma: keep +#include "arrow/array/builder_union.h" // IWYU pragma: keep +#include "arrow/status.h" +#include "arrow/util/visibility.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h new file mode 100644 index 0000000000000000000000000000000000000000..c5dad1a17b18ee145a3840badd9f9317c9325c72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h @@ -0,0 +1,164 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow::internal { + +struct ChunkLocation { + /// \brief Index of the chunk in the array of chunks + /// + /// The value is always in the range `[0, chunks.size()]`. `chunks.size()` is used + /// to represent out-of-bounds locations. + int64_t chunk_index = 0; + + /// \brief Index of the value in the chunk + /// + /// The value is undefined if chunk_index >= chunks.size() + int64_t index_in_chunk = 0; +}; + +/// \brief An utility that incrementally resolves logical indices into +/// physical indices in a chunked array. +struct ARROW_EXPORT ChunkResolver { + private: + /// \brief Array containing `chunks.size() + 1` offsets. + /// + /// `offsets_[i]` is the starting logical index of chunk `i`. `offsets_[0]` is always 0 + /// and `offsets_[chunks.size()]` is the logical length of the chunked array. + std::vector offsets_; + + /// \brief Cache of the index of the last resolved chunk. + /// + /// \invariant `cached_chunk_ in [0, chunks.size()]` + mutable std::atomic cached_chunk_; + + public: + explicit ChunkResolver(const ArrayVector& chunks) noexcept; + explicit ChunkResolver(const std::vector& chunks) noexcept; + explicit ChunkResolver(const RecordBatchVector& batches) noexcept; + + ChunkResolver(ChunkResolver&& other) noexcept; + ChunkResolver& operator=(ChunkResolver&& other) noexcept; + + ChunkResolver(const ChunkResolver& other) noexcept; + ChunkResolver& operator=(const ChunkResolver& other) noexcept; + + /// \brief Resolve a logical index to a ChunkLocation. + /// + /// The returned ChunkLocation contains the chunk index and the within-chunk index + /// equivalent to the logical index. + /// + /// \pre index >= 0 + /// \post location.chunk_index in [0, chunks.size()] + /// \param index The logical index to resolve + /// \return ChunkLocation with a valid chunk_index if index is within + /// bounds, or with chunk_index == chunks.size() if logical index is + /// `>= chunked_array.length()`. + inline ChunkLocation Resolve(int64_t index) const { + const auto cached_chunk = cached_chunk_.load(std::memory_order_relaxed); + const auto chunk_index = + ResolveChunkIndex(index, cached_chunk); + return {chunk_index, index - offsets_[chunk_index]}; + } + + /// \brief Resolve a logical index to a ChunkLocation. + /// + /// The returned ChunkLocation contains the chunk index and the within-chunk index + /// equivalent to the logical index. + /// + /// \pre index >= 0 + /// \post location.chunk_index in [0, chunks.size()] + /// \param index The logical index to resolve + /// \param hint ChunkLocation{} or the last ChunkLocation returned by + /// this ChunkResolver. + /// \return ChunkLocation with a valid chunk_index if index is within + /// bounds, or with chunk_index == chunks.size() if logical index is + /// `>= chunked_array.length()`. + inline ChunkLocation ResolveWithChunkIndexHint(int64_t index, + ChunkLocation hint) const { + assert(hint.chunk_index < static_cast(offsets_.size())); + const auto chunk_index = + ResolveChunkIndex(index, hint.chunk_index); + return {chunk_index, index - offsets_[chunk_index]}; + } + + private: + template + inline int64_t ResolveChunkIndex(int64_t index, int64_t cached_chunk) const { + // It is common for algorithms sequentially processing arrays to make consecutive + // accesses at a relatively small distance from each other, hence often falling in the + // same chunk. + // + // This is guaranteed when merging (assuming each side of the merge uses its + // own resolver), and is the most common case in recursive invocations of + // partitioning. + const auto num_offsets = static_cast(offsets_.size()); + const int64_t* offsets = offsets_.data(); + if (ARROW_PREDICT_TRUE(index >= offsets[cached_chunk]) && + (cached_chunk + 1 == num_offsets || index < offsets[cached_chunk + 1])) { + return cached_chunk; + } + // lo < hi is guaranteed by `num_offsets = chunks.size() + 1` + const auto chunk_index = Bisect(index, offsets, /*lo=*/0, /*hi=*/num_offsets); + if constexpr (StoreCachedChunk) { + assert(chunk_index < static_cast(offsets_.size())); + cached_chunk_.store(chunk_index, std::memory_order_relaxed); + } + return chunk_index; + } + + /// \brief Find the index of the chunk that contains the logical index. + /// + /// Any non-negative index is accepted. When `hi=num_offsets`, the largest + /// possible return value is `num_offsets-1` which is equal to + /// `chunks.size()`. The is returned when the logical index is out-of-bounds. + /// + /// \pre index >= 0 + /// \pre lo < hi + /// \pre lo >= 0 && hi <= offsets_.size() + static inline int64_t Bisect(int64_t index, const int64_t* offsets, int64_t lo, + int64_t hi) { + // Similar to std::upper_bound(), but slightly different as our offsets + // array always starts with 0. + auto n = hi - lo; + // First iteration does not need to check for n > 1 + // (lo < hi is guaranteed by the precondition). + assert(n > 1 && "lo < hi is a precondition of Bisect"); + do { + const int64_t m = n >> 1; + const int64_t mid = lo + m; + if (index >= offsets[mid]) { + lo = mid; + n -= m; + } else { + n = m; + } + } while (n > 1); + return lo; + } +}; + +} // namespace arrow::internal diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h new file mode 100644 index 0000000000000000000000000000000000000000..5d300861d85c2c5d66d6689bfd010b275d50c745 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h @@ -0,0 +1,275 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/chunk_resolver.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; +class MemoryPool; +namespace stl { +template +class ChunkedArrayIterator; +} // namespace stl + +/// \class ChunkedArray +/// \brief A data structure managing a list of primitive Arrow arrays logically +/// as one large array +/// +/// Data chunking is treated throughout this project largely as an +/// implementation detail for performance and memory use optimization. +/// ChunkedArray allows Array objects to be collected and interpreted +/// as a single logical array without requiring an expensive concatenation +/// step. +/// +/// In some cases, data produced by a function may exceed the capacity of an +/// Array (like BinaryArray or StringArray) and so returning multiple Arrays is +/// the only possibility. In these cases, we recommend returning a ChunkedArray +/// instead of vector of Arrays or some alternative. +/// +/// When data is processed in parallel, it may not be practical or possible to +/// create large contiguous memory allocations and write output into them. With +/// some data types, like binary and string types, it is not possible at all to +/// produce non-chunked array outputs without requiring a concatenation step at +/// the end of processing. +/// +/// Application developers may tune chunk sizes based on analysis of +/// performance profiles but many developer-users will not need to be +/// especially concerned with the chunking details. +/// +/// Preserving the chunk layout/sizes in processing steps is generally not +/// considered to be a contract in APIs. A function may decide to alter the +/// chunking of its result. Similarly, APIs accepting multiple ChunkedArray +/// inputs should not expect the chunk layout to be the same in each input. +class ARROW_EXPORT ChunkedArray { + public: + ChunkedArray(ChunkedArray&&) = default; + ChunkedArray& operator=(ChunkedArray&&) = default; + + /// \brief Construct a chunked array from a single Array + explicit ChunkedArray(std::shared_ptr chunk) + : ChunkedArray(ArrayVector{std::move(chunk)}) {} + + /// \brief Construct a chunked array from a vector of arrays and an optional data type + /// + /// The vector elements must have the same data type. + /// If the data type is passed explicitly, the vector may be empty. + /// If the data type is omitted, the vector must be non-empty. + explicit ChunkedArray(ArrayVector chunks, std::shared_ptr type = NULLPTR); + + // \brief Constructor with basic input validation. + static Result> Make( + ArrayVector chunks, std::shared_ptr type = NULLPTR); + + /// \brief Create an empty ChunkedArray of a given type + /// + /// The output ChunkedArray will have one chunk with an empty + /// array of the given type. + /// + /// \param[in] type the data type of the empty ChunkedArray + /// \param[in] pool the memory pool to allocate memory from + /// \return the resulting ChunkedArray + static Result> MakeEmpty( + std::shared_ptr type, MemoryPool* pool = default_memory_pool()); + + /// \return the total length of the chunked array; computed on construction + int64_t length() const { return length_; } + + /// \return the total number of nulls among all chunks + int64_t null_count() const { return null_count_; } + + /// \return the total number of chunks in the chunked array + int num_chunks() const { return static_cast(chunks_.size()); } + + /// \return chunk a particular chunk from the chunked array + const std::shared_ptr& chunk(int i) const { return chunks_[i]; } + + /// \return an ArrayVector of chunks + const ArrayVector& chunks() const { return chunks_; } + + /// \brief Construct a zero-copy slice of the chunked array with the + /// indicated offset and length + /// + /// \param[in] offset the position of the first element in the constructed + /// slice + /// \param[in] length the length of the slice. If there are not enough + /// elements in the chunked array, the length will be adjusted accordingly + /// + /// \return a new object wrapped in std::shared_ptr + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// \brief Slice from offset until end of the chunked array + std::shared_ptr Slice(int64_t offset) const; + + /// \brief Flatten this chunked array as a vector of chunked arrays, one + /// for each struct field + /// + /// \param[in] pool The pool for buffer allocations, if any + Result>> Flatten( + MemoryPool* pool = default_memory_pool()) const; + + /// Construct a zero-copy view of this chunked array with the given + /// type. Calls Array::View on each constituent chunk. Always succeeds if + /// there are zero chunks + Result> View(const std::shared_ptr& type) const; + + /// \brief Return the type of the chunked array + const std::shared_ptr& type() const { return type_; } + + /// \brief Return a Scalar containing the value of this array at index + Result> GetScalar(int64_t index) const; + + /// \brief Determine if two chunked arrays are equal. + /// + /// Two chunked arrays can be equal only if they have equal datatypes. + /// However, they may be equal even if they have different chunkings. + bool Equals(const ChunkedArray& other, + const EqualOptions& opts = EqualOptions::Defaults()) const; + /// \brief Determine if two chunked arrays are equal. + bool Equals(const std::shared_ptr& other, + const EqualOptions& opts = EqualOptions::Defaults()) const; + /// \brief Determine if two chunked arrays approximately equal + bool ApproxEquals(const ChunkedArray& other, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \return PrettyPrint representation suitable for debugging + std::string ToString() const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the chunk array's internal data. + /// + /// This is O(k*m) where k is the number of array descendents, + /// and m is the number of chunks. + /// + /// \return Status + Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the chunk array's internal data. + /// + /// This is O(k*n) where k is the number of array descendents, + /// and n is the length in elements. + /// + /// \return Status + Status ValidateFull() const; + + protected: + ArrayVector chunks_; + std::shared_ptr type_; + int64_t length_; + int64_t null_count_; + + private: + template + friend class ::arrow::stl::ChunkedArrayIterator; + internal::ChunkResolver chunk_resolver_; + ARROW_DISALLOW_COPY_AND_ASSIGN(ChunkedArray); +}; + +namespace internal { + +/// \brief EXPERIMENTAL: Utility for incremental iteration over contiguous +/// pieces of potentially differently-chunked ChunkedArray objects +class ARROW_EXPORT MultipleChunkIterator { + public: + MultipleChunkIterator(const ChunkedArray& left, const ChunkedArray& right) + : left_(left), + right_(right), + pos_(0), + length_(left.length()), + chunk_idx_left_(0), + chunk_idx_right_(0), + chunk_pos_left_(0), + chunk_pos_right_(0) {} + + bool Next(std::shared_ptr* next_left, std::shared_ptr* next_right); + + int64_t position() const { return pos_; } + + private: + const ChunkedArray& left_; + const ChunkedArray& right_; + + // The amount of the entire ChunkedArray consumed + int64_t pos_; + + // Length of the chunked array(s) + int64_t length_; + + // Current left chunk + int chunk_idx_left_; + + // Current right chunk + int chunk_idx_right_; + + // Offset into the current left chunk + int64_t chunk_pos_left_; + + // Offset into the current right chunk + int64_t chunk_pos_right_; +}; + +/// \brief Evaluate binary function on two ChunkedArray objects having possibly +/// different chunk layouts. The passed binary function / functor should have +/// the following signature. +/// +/// Status(const Array&, const Array&, int64_t) +/// +/// The third argument is the absolute position relative to the start of each +/// ChunkedArray. The function is executed against each contiguous pair of +/// array segments, slicing if necessary. +/// +/// For example, if two arrays have chunk sizes +/// +/// left: [10, 10, 20] +/// right: [15, 10, 15] +/// +/// Then the following invocations take place (pseudocode) +/// +/// func(left.chunk[0][0:10], right.chunk[0][0:10], 0) +/// func(left.chunk[1][0:5], right.chunk[0][10:15], 10) +/// func(left.chunk[1][5:10], right.chunk[1][0:5], 15) +/// func(left.chunk[2][0:5], right.chunk[1][5:10], 20) +/// func(left.chunk[2][5:20], right.chunk[2][:], 25) +template +Status ApplyBinaryChunked(const ChunkedArray& left, const ChunkedArray& right, + Action&& action) { + MultipleChunkIterator iterator(left, right); + std::shared_ptr left_piece, right_piece; + while (iterator.Next(&left_piece, &right_piece)) { + ARROW_RETURN_NOT_OK(action(*left_piece, *right_piece, iterator.position())); + } + return Status::OK(); +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h new file mode 100644 index 0000000000000000000000000000000000000000..6dbacfa86af592c1e2aecf22aea2322ce5bc5090 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for comparing Arrow data structures + +#pragma once + +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; +class Tensor; +class SparseTensor; +struct Scalar; + +static constexpr double kDefaultAbsoluteTolerance = 1E-5; + +/// A container of options for equality comparisons +class EqualOptions { + public: + /// Whether or not NaNs are considered equal. + bool nans_equal() const { return nans_equal_; } + + /// Return a new EqualOptions object with the "nans_equal" property changed. + EqualOptions nans_equal(bool v) const { + auto res = EqualOptions(*this); + res.nans_equal_ = v; + return res; + } + + /// Whether or not zeros with differing signs are considered equal. + bool signed_zeros_equal() const { return signed_zeros_equal_; } + + /// Return a new EqualOptions object with the "signed_zeros_equal" property changed. + EqualOptions signed_zeros_equal(bool v) const { + auto res = EqualOptions(*this); + res.signed_zeros_equal_ = v; + return res; + } + + /// The absolute tolerance for approximate comparisons of floating-point values. + double atol() const { return atol_; } + + /// Return a new EqualOptions object with the "atol" property changed. + EqualOptions atol(double v) const { + auto res = EqualOptions(*this); + res.atol_ = v; + return res; + } + + /// The ostream to which a diff will be formatted if arrays disagree. + /// If this is null (the default) no diff will be formatted. + std::ostream* diff_sink() const { return diff_sink_; } + + /// Return a new EqualOptions object with the "diff_sink" property changed. + /// This option will be ignored if diff formatting of the types of compared arrays is + /// not supported. + EqualOptions diff_sink(std::ostream* diff_sink) const { + auto res = EqualOptions(*this); + res.diff_sink_ = diff_sink; + return res; + } + + static EqualOptions Defaults() { return {}; } + + protected: + double atol_ = kDefaultAbsoluteTolerance; + bool nans_equal_ = false; + bool signed_zeros_equal_ = true; + + std::ostream* diff_sink_ = NULLPTR; +}; + +/// Returns true if the arrays are exactly equal +ARROW_EXPORT bool ArrayEquals(const Array& left, const Array& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if the arrays are approximately equal. For non-floating point +/// types, this is equivalent to ArrayEquals(left, right) +ARROW_EXPORT bool ArrayApproxEquals(const Array& left, const Array& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if indicated equal-length segment of arrays are exactly equal +ARROW_EXPORT bool ArrayRangeEquals(const Array& left, const Array& right, + int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if indicated equal-length segment of arrays are approximately equal +ARROW_EXPORT bool ArrayRangeApproxEquals(const Array& left, const Array& right, + int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()); + +ARROW_EXPORT bool TensorEquals(const Tensor& left, const Tensor& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// EXPERIMENTAL: Returns true if the given sparse tensors are exactly equal +ARROW_EXPORT bool SparseTensorEquals(const SparseTensor& left, const SparseTensor& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if the type metadata are exactly equal +/// \param[in] left a DataType +/// \param[in] right a DataType +/// \param[in] check_metadata whether to compare KeyValueMetadata for child +/// fields +ARROW_EXPORT bool TypeEquals(const DataType& left, const DataType& right, + bool check_metadata = true); + +/// Returns true if scalars are equal +/// \param[in] left a Scalar +/// \param[in] right a Scalar +/// \param[in] options comparison options +ARROW_EXPORT bool ScalarEquals(const Scalar& left, const Scalar& right, + const EqualOptions& options = EqualOptions::Defaults()); + +/// Returns true if scalars are approximately equal +/// \param[in] left a Scalar +/// \param[in] right a Scalar +/// \param[in] options comparison options +ARROW_EXPORT bool ScalarApproxEquals( + const Scalar& left, const Scalar& right, + const EqualOptions& options = EqualOptions::Defaults()); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..bad34f4a37881e82b3b0787f2d2c9c7c8d4a0461 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h @@ -0,0 +1,1717 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Eager evaluation convenience APIs for invoking common functions, including +// necessary memory allocations + +#pragma once + +#include +#include +#include + +#include "arrow/compute/function_options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-concrete-options +/// +/// @{ + +class ARROW_EXPORT ArithmeticOptions : public FunctionOptions { + public: + explicit ArithmeticOptions(bool check_overflow = false); + static constexpr char const kTypeName[] = "ArithmeticOptions"; + bool check_overflow; +}; + +class ARROW_EXPORT ElementWiseAggregateOptions : public FunctionOptions { + public: + explicit ElementWiseAggregateOptions(bool skip_nulls = true); + static constexpr char const kTypeName[] = "ElementWiseAggregateOptions"; + static ElementWiseAggregateOptions Defaults() { return ElementWiseAggregateOptions{}; } + bool skip_nulls; +}; + +/// Rounding and tie-breaking modes for round compute functions. +/// Additional details and examples are provided in compute.rst. +enum class RoundMode : int8_t { + /// Round to nearest integer less than or equal in magnitude (aka "floor") + DOWN, + /// Round to nearest integer greater than or equal in magnitude (aka "ceil") + UP, + /// Get the integral part without fractional digits (aka "trunc") + TOWARDS_ZERO, + /// Round negative values with DOWN rule + /// and positive values with UP rule (aka "away from zero") + TOWARDS_INFINITY, + /// Round ties with DOWN rule (also called "round half towards negative infinity") + HALF_DOWN, + /// Round ties with UP rule (also called "round half towards positive infinity") + HALF_UP, + /// Round ties with TOWARDS_ZERO rule (also called "round half away from infinity") + HALF_TOWARDS_ZERO, + /// Round ties with TOWARDS_INFINITY rule (also called "round half away from zero") + HALF_TOWARDS_INFINITY, + /// Round ties to nearest even integer + HALF_TO_EVEN, + /// Round ties to nearest odd integer + HALF_TO_ODD, +}; + +class ARROW_EXPORT RoundOptions : public FunctionOptions { + public: + explicit RoundOptions(int64_t ndigits = 0, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundOptions"; + static RoundOptions Defaults() { return RoundOptions(); } + /// Rounding precision (number of digits to round to) + int64_t ndigits; + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +class ARROW_EXPORT RoundBinaryOptions : public FunctionOptions { + public: + explicit RoundBinaryOptions(RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundBinaryOptions"; + static RoundBinaryOptions Defaults() { return RoundBinaryOptions(); } + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +enum class CalendarUnit : int8_t { + NANOSECOND, + MICROSECOND, + MILLISECOND, + SECOND, + MINUTE, + HOUR, + DAY, + WEEK, + MONTH, + QUARTER, + YEAR +}; + +class ARROW_EXPORT RoundTemporalOptions : public FunctionOptions { + public: + explicit RoundTemporalOptions(int multiple = 1, CalendarUnit unit = CalendarUnit::DAY, + bool week_starts_monday = true, + bool ceil_is_strictly_greater = false, + bool calendar_based_origin = false); + static constexpr char const kTypeName[] = "RoundTemporalOptions"; + static RoundTemporalOptions Defaults() { return RoundTemporalOptions(); } + + /// Number of units to round to + int multiple; + /// The unit used for rounding of time + CalendarUnit unit; + /// What day does the week start with (Monday=true, Sunday=false) + bool week_starts_monday; + /// Enable this flag to return a rounded value that is strictly greater than the input. + /// For example: ceiling 1970-01-01T00:00:00 to 3 hours would yield 1970-01-01T03:00:00 + /// if set to true and 1970-01-01T00:00:00 if set to false. + /// This applies for ceiling only. + bool ceil_is_strictly_greater; + /// By default time is rounded to a multiple of units since 1970-01-01T00:00:00. + /// By setting calendar_based_origin to true, time will be rounded to a number + /// of units since the last greater calendar unit. + /// For example: rounding to a multiple of days since the beginning of the month or + /// to hours since the beginning of the day. + /// Exceptions: week and quarter are not used as greater units, therefore days will + /// will be rounded to the beginning of the month not week. Greater unit of week + /// is year. + /// Note that ceiling and rounding might change sorting order of an array near greater + /// unit change. For example rounding YYYY-mm-dd 23:00:00 to 5 hours will ceil and + /// round to YYYY-mm-dd+1 01:00:00 and floor to YYYY-mm-dd 20:00:00. On the other hand + /// YYYY-mm-dd+1 00:00:00 will ceil, round and floor to YYYY-mm-dd+1 00:00:00. This + /// can break the order of an already ordered array. + bool calendar_based_origin; +}; + +class ARROW_EXPORT RoundToMultipleOptions : public FunctionOptions { + public: + explicit RoundToMultipleOptions(double multiple = 1.0, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + explicit RoundToMultipleOptions(std::shared_ptr multiple, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundToMultipleOptions"; + static RoundToMultipleOptions Defaults() { return RoundToMultipleOptions(); } + /// Rounding scale (multiple to round to). + /// + /// Should be a positive numeric scalar of a type compatible with the + /// argument to be rounded. The cast kernel is used to convert the rounding + /// multiple to match the result type. + std::shared_ptr multiple; + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +/// Options for var_args_join. +class ARROW_EXPORT JoinOptions : public FunctionOptions { + public: + /// How to handle null values. (A null separator always results in a null output.) + enum NullHandlingBehavior { + /// A null in any input results in a null in the output. + EMIT_NULL, + /// Nulls in inputs are skipped. + SKIP, + /// Nulls in inputs are replaced with the replacement string. + REPLACE, + }; + explicit JoinOptions(NullHandlingBehavior null_handling = EMIT_NULL, + std::string null_replacement = ""); + static constexpr char const kTypeName[] = "JoinOptions"; + static JoinOptions Defaults() { return JoinOptions(); } + NullHandlingBehavior null_handling; + std::string null_replacement; +}; + +class ARROW_EXPORT MatchSubstringOptions : public FunctionOptions { + public: + explicit MatchSubstringOptions(std::string pattern, bool ignore_case = false); + MatchSubstringOptions(); + static constexpr char const kTypeName[] = "MatchSubstringOptions"; + + /// The exact substring (or regex, depending on kernel) to look for inside input values. + std::string pattern; + /// Whether to perform a case-insensitive match. + bool ignore_case; +}; + +class ARROW_EXPORT SplitOptions : public FunctionOptions { + public: + explicit SplitOptions(int64_t max_splits = -1, bool reverse = false); + static constexpr char const kTypeName[] = "SplitOptions"; + + /// Maximum number of splits allowed, or unlimited when -1 + int64_t max_splits; + /// Start splitting from the end of the string (only relevant when max_splits != -1) + bool reverse; +}; + +class ARROW_EXPORT SplitPatternOptions : public FunctionOptions { + public: + explicit SplitPatternOptions(std::string pattern, int64_t max_splits = -1, + bool reverse = false); + SplitPatternOptions(); + static constexpr char const kTypeName[] = "SplitPatternOptions"; + + /// The exact substring to split on. + std::string pattern; + /// Maximum number of splits allowed, or unlimited when -1 + int64_t max_splits; + /// Start splitting from the end of the string (only relevant when max_splits != -1) + bool reverse; +}; + +class ARROW_EXPORT ReplaceSliceOptions : public FunctionOptions { + public: + explicit ReplaceSliceOptions(int64_t start, int64_t stop, std::string replacement); + ReplaceSliceOptions(); + static constexpr char const kTypeName[] = "ReplaceSliceOptions"; + + /// Index to start slicing at + int64_t start; + /// Index to stop slicing at + int64_t stop; + /// String to replace the slice with + std::string replacement; +}; + +class ARROW_EXPORT ReplaceSubstringOptions : public FunctionOptions { + public: + explicit ReplaceSubstringOptions(std::string pattern, std::string replacement, + int64_t max_replacements = -1); + ReplaceSubstringOptions(); + static constexpr char const kTypeName[] = "ReplaceSubstringOptions"; + + /// Pattern to match, literal, or regular expression depending on which kernel is used + std::string pattern; + /// String to replace the pattern with + std::string replacement; + /// Max number of substrings to replace (-1 means unbounded) + int64_t max_replacements; +}; + +class ARROW_EXPORT ExtractRegexOptions : public FunctionOptions { + public: + explicit ExtractRegexOptions(std::string pattern); + ExtractRegexOptions(); + static constexpr char const kTypeName[] = "ExtractRegexOptions"; + + /// Regular expression with named capture fields + std::string pattern; +}; + +/// Options for IsIn and IndexIn functions +class ARROW_EXPORT SetLookupOptions : public FunctionOptions { + public: + /// How to handle null values. + enum NullMatchingBehavior { + /// MATCH, any null in `value_set` is successfully matched in + /// the input. + MATCH, + /// SKIP, any null in `value_set` is ignored and nulls in the input + /// produce null (IndexIn) or false (IsIn) values in the output. + SKIP, + /// EMIT_NULL, any null in `value_set` is ignored and nulls in the + /// input produce null (IndexIn and IsIn) values in the output. + EMIT_NULL, + /// INCONCLUSIVE, null values are regarded as unknown values, which is + /// sql-compatible. nulls in the input produce null (IndexIn and IsIn) + /// values in the output. Besides, if `value_set` contains a null, + /// non-null unmatched values in the input also produce null values + /// (IndexIn and IsIn) in the output. + INCONCLUSIVE + }; + + explicit SetLookupOptions(Datum value_set, NullMatchingBehavior = MATCH); + SetLookupOptions(); + + // DEPRECATED(will be removed after removing of skip_nulls) + explicit SetLookupOptions(Datum value_set, bool skip_nulls); + + static constexpr char const kTypeName[] = "SetLookupOptions"; + + /// The set of values to look up input values into. + Datum value_set; + + NullMatchingBehavior null_matching_behavior; + + // DEPRECATED(will be removed after removing of skip_nulls) + NullMatchingBehavior GetNullMatchingBehavior() const; + + // DEPRECATED(use null_matching_behavior instead) + /// Whether nulls in `value_set` count for lookup. + /// + /// If true, any null in `value_set` is ignored and nulls in the input + /// produce null (IndexIn) or false (IsIn) values in the output. + /// If false, any null in `value_set` is successfully matched in + /// the input. + std::optional skip_nulls; +}; + +/// Options for struct_field function +class ARROW_EXPORT StructFieldOptions : public FunctionOptions { + public: + explicit StructFieldOptions(std::vector indices); + explicit StructFieldOptions(std::initializer_list); + explicit StructFieldOptions(FieldRef field_ref); + StructFieldOptions(); + static constexpr char const kTypeName[] = "StructFieldOptions"; + + /// The FieldRef specifying what to extract from struct or union. + FieldRef field_ref; +}; + +class ARROW_EXPORT StrptimeOptions : public FunctionOptions { + public: + explicit StrptimeOptions(std::string format, TimeUnit::type unit, + bool error_is_null = false); + StrptimeOptions(); + static constexpr char const kTypeName[] = "StrptimeOptions"; + + /// The desired format string. + std::string format; + /// The desired time resolution + TimeUnit::type unit; + /// Return null on parsing errors if true or raise if false + bool error_is_null; +}; + +class ARROW_EXPORT StrftimeOptions : public FunctionOptions { + public: + explicit StrftimeOptions(std::string format, std::string locale = "C"); + StrftimeOptions(); + + static constexpr char const kTypeName[] = "StrftimeOptions"; + + static constexpr const char* kDefaultFormat = "%Y-%m-%dT%H:%M:%S"; + + /// The desired format string. + std::string format; + /// The desired output locale string. + std::string locale; +}; + +class ARROW_EXPORT PadOptions : public FunctionOptions { + public: + explicit PadOptions(int64_t width, std::string padding = " "); + PadOptions(); + static constexpr char const kTypeName[] = "PadOptions"; + + /// The desired string length. + int64_t width; + /// What to pad the string with. Should be one codepoint (Unicode)/byte (ASCII). + std::string padding; +}; + +class ARROW_EXPORT TrimOptions : public FunctionOptions { + public: + explicit TrimOptions(std::string characters); + TrimOptions(); + static constexpr char const kTypeName[] = "TrimOptions"; + + /// The individual characters to be trimmed from the string. + std::string characters; +}; + +class ARROW_EXPORT SliceOptions : public FunctionOptions { + public: + explicit SliceOptions(int64_t start, int64_t stop = std::numeric_limits::max(), + int64_t step = 1); + SliceOptions(); + static constexpr char const kTypeName[] = "SliceOptions"; + int64_t start, stop, step; +}; + +class ARROW_EXPORT ListSliceOptions : public FunctionOptions { + public: + explicit ListSliceOptions(int64_t start, std::optional stop = std::nullopt, + int64_t step = 1, + std::optional return_fixed_size_list = std::nullopt); + ListSliceOptions(); + static constexpr char const kTypeName[] = "ListSliceOptions"; + /// The start of list slicing. + int64_t start; + /// Optional stop of list slicing. If not set, then slice to end. (NotImplemented) + std::optional stop; + /// Slicing step + int64_t step; + // Whether to return a FixedSizeListArray. If true _and_ stop is after + // a list element's length, nulls will be appended to create the requested slice size. + // Default of `nullopt` will return whatever type it got in. + std::optional return_fixed_size_list; +}; + +class ARROW_EXPORT NullOptions : public FunctionOptions { + public: + explicit NullOptions(bool nan_is_null = false); + static constexpr char const kTypeName[] = "NullOptions"; + static NullOptions Defaults() { return NullOptions{}; } + + bool nan_is_null; +}; + +enum CompareOperator : int8_t { + EQUAL, + NOT_EQUAL, + GREATER, + GREATER_EQUAL, + LESS, + LESS_EQUAL, +}; + +struct ARROW_EXPORT CompareOptions { + explicit CompareOptions(CompareOperator op) : op(op) {} + CompareOptions() : CompareOptions(CompareOperator::EQUAL) {} + enum CompareOperator op; +}; + +class ARROW_EXPORT MakeStructOptions : public FunctionOptions { + public: + MakeStructOptions(std::vector n, std::vector r, + std::vector> m); + explicit MakeStructOptions(std::vector n); + MakeStructOptions(); + static constexpr char const kTypeName[] = "MakeStructOptions"; + + /// Names for wrapped columns + std::vector field_names; + + /// Nullability bits for wrapped columns + std::vector field_nullability; + + /// Metadata attached to wrapped columns + std::vector> field_metadata; +}; + +struct ARROW_EXPORT DayOfWeekOptions : public FunctionOptions { + public: + explicit DayOfWeekOptions(bool count_from_zero = true, uint32_t week_start = 1); + static constexpr char const kTypeName[] = "DayOfWeekOptions"; + static DayOfWeekOptions Defaults() { return DayOfWeekOptions(); } + + /// Number days from 0 if true and from 1 if false + bool count_from_zero; + /// What day does the week start with (Monday=1, Sunday=7). + /// The numbering is unaffected by the count_from_zero parameter. + uint32_t week_start; +}; + +/// Used to control timestamp timezone conversion and handling ambiguous/nonexistent +/// times. +struct ARROW_EXPORT AssumeTimezoneOptions : public FunctionOptions { + public: + /// \brief How to interpret ambiguous local times that can be interpreted as + /// multiple instants (normally two) due to DST shifts. + /// + /// AMBIGUOUS_EARLIEST emits the earliest instant amongst possible interpretations. + /// AMBIGUOUS_LATEST emits the latest instant amongst possible interpretations. + enum Ambiguous { AMBIGUOUS_RAISE, AMBIGUOUS_EARLIEST, AMBIGUOUS_LATEST }; + + /// \brief How to handle local times that do not exist due to DST shifts. + /// + /// NONEXISTENT_EARLIEST emits the instant "just before" the DST shift instant + /// in the given timestamp precision (for example, for a nanoseconds precision + /// timestamp, this is one nanosecond before the DST shift instant). + /// NONEXISTENT_LATEST emits the DST shift instant. + enum Nonexistent { NONEXISTENT_RAISE, NONEXISTENT_EARLIEST, NONEXISTENT_LATEST }; + + explicit AssumeTimezoneOptions(std::string timezone, + Ambiguous ambiguous = AMBIGUOUS_RAISE, + Nonexistent nonexistent = NONEXISTENT_RAISE); + AssumeTimezoneOptions(); + static constexpr char const kTypeName[] = "AssumeTimezoneOptions"; + + /// Timezone to convert timestamps from + std::string timezone; + + /// How to interpret ambiguous local times (due to DST shifts) + Ambiguous ambiguous; + /// How to interpret nonexistent local times (due to DST shifts) + Nonexistent nonexistent; +}; + +struct ARROW_EXPORT WeekOptions : public FunctionOptions { + public: + explicit WeekOptions(bool week_starts_monday = true, bool count_from_zero = false, + bool first_week_is_fully_in_year = false); + static constexpr char const kTypeName[] = "WeekOptions"; + static WeekOptions Defaults() { return WeekOptions{}; } + static WeekOptions ISODefaults() { + return WeekOptions{/*week_starts_monday*/ true, + /*count_from_zero=*/false, + /*first_week_is_fully_in_year=*/false}; + } + static WeekOptions USDefaults() { + return WeekOptions{/*week_starts_monday*/ false, + /*count_from_zero=*/false, + /*first_week_is_fully_in_year=*/false}; + } + + /// What day does the week start with (Monday=true, Sunday=false) + bool week_starts_monday; + /// Dates from current year that fall into last ISO week of the previous year return + /// 0 if true and 52 or 53 if false. + bool count_from_zero; + /// Must the first week be fully in January (true), or is a week that begins on + /// December 29, 30, or 31 considered to be the first week of the new year (false)? + bool first_week_is_fully_in_year; +}; + +struct ARROW_EXPORT Utf8NormalizeOptions : public FunctionOptions { + public: + enum Form { NFC, NFKC, NFD, NFKD }; + + explicit Utf8NormalizeOptions(Form form = NFC); + static Utf8NormalizeOptions Defaults() { return Utf8NormalizeOptions(); } + static constexpr char const kTypeName[] = "Utf8NormalizeOptions"; + + /// The Unicode normalization form to apply + Form form; +}; + +class ARROW_EXPORT RandomOptions : public FunctionOptions { + public: + enum Initializer { SystemRandom, Seed }; + + static RandomOptions FromSystemRandom() { return RandomOptions{SystemRandom, 0}; } + static RandomOptions FromSeed(uint64_t seed) { return RandomOptions{Seed, seed}; } + + RandomOptions(Initializer initializer, uint64_t seed); + RandomOptions(); + static constexpr char const kTypeName[] = "RandomOptions"; + static RandomOptions Defaults() { return RandomOptions(); } + + /// The type of initialization for random number generation - system or provided seed. + Initializer initializer; + /// The seed value used to initialize the random number generation. + uint64_t seed; +}; + +/// Options for map_lookup function +class ARROW_EXPORT MapLookupOptions : public FunctionOptions { + public: + enum Occurrence { + /// Return the first matching value + FIRST, + /// Return the last matching value + LAST, + /// Return all matching values + ALL + }; + + explicit MapLookupOptions(std::shared_ptr query_key, Occurrence occurrence); + MapLookupOptions(); + + constexpr static char const kTypeName[] = "MapLookupOptions"; + + /// The key to lookup in the map + std::shared_ptr query_key; + + /// Whether to return the first, last, or all matching values + Occurrence occurrence; +}; + +/// @} + +/// \brief Get the absolute value of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value transformed +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise absolute value +ARROW_EXPORT +Result AbsoluteValue(const Datum& arg, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Add two values together. Array values must be the same length. If +/// either addend is null the result will be null. +/// +/// \param[in] left the first addend +/// \param[in] right the second addend +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise sum +ARROW_EXPORT +Result Add(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Subtract two values. Array values must be the same length. If the +/// minuend or subtrahend is null the result will be null. +/// +/// \param[in] left the value subtracted from (minuend) +/// \param[in] right the value by which the minuend is reduced (subtrahend) +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise difference +ARROW_EXPORT +Result Subtract(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Multiply two values. Array values must be the same length. If either +/// factor is null the result will be null. +/// +/// \param[in] left the first factor +/// \param[in] right the second factor +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise product +ARROW_EXPORT +Result Multiply(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Divide two values. Array values must be the same length. If either +/// argument is null the result will be null. For integer types, if there is +/// a zero divisor, an error will be raised. +/// +/// \param[in] left the dividend +/// \param[in] right the divisor +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise quotient +ARROW_EXPORT +Result Divide(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Negate values. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value negated +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise negation +ARROW_EXPORT +Result Negate(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Raise the values of base array to the power of the exponent array values. +/// Array values must be the same length. If either base or exponent is null the result +/// will be null. +/// +/// \param[in] left the base +/// \param[in] right the exponent +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise base value raised to the power of exponent +ARROW_EXPORT +Result Power(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Raise Euler's number to the power of specified exponent, element-wise. +/// If the exponent value is null the result will be null. +/// +/// \param[in] arg the exponent +/// \param[in] ctx the function execution context, optional +/// \return the element-wise Euler's number raised to the power of exponent +ARROW_EXPORT +Result Exp(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Left shift the left array by the right array. Array values must be the +/// same length. If either operand is null, the result will be null. +/// +/// \param[in] left the value to shift +/// \param[in] right the value to shift by +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise left value shifted left by the right value +ARROW_EXPORT +Result ShiftLeft(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Right shift the left array by the right array. Array values must be the +/// same length. If either operand is null, the result will be null. Performs a +/// logical shift for unsigned values, and an arithmetic shift for signed values. +/// +/// \param[in] left the value to shift +/// \param[in] right the value to shift by +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise left value shifted right by the right value +ARROW_EXPORT +Result ShiftRight(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the sine of the array values. +/// \param[in] arg The values to compute the sine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise sine of the values +ARROW_EXPORT +Result Sin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cosine of the array values. +/// \param[in] arg The values to compute the cosine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise cosine of the values +ARROW_EXPORT +Result Cos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse sine (arcsine) of the array values. +/// \param[in] arg The values to compute the inverse sine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse sine of the values +ARROW_EXPORT +Result Asin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse cosine (arccosine) of the array values. +/// \param[in] arg The values to compute the inverse cosine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse cosine of the values +ARROW_EXPORT +Result Acos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the tangent of the array values. +/// \param[in] arg The values to compute the tangent for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise tangent of the values +ARROW_EXPORT +Result Tan(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse tangent (arctangent) of the array values. +/// \param[in] arg The values to compute the inverse tangent for. +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse tangent of the values +ARROW_EXPORT +Result Atan(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse tangent (arctangent) of y/x, using the +/// argument signs to determine the correct quadrant. +/// \param[in] y The y-values to compute the inverse tangent for. +/// \param[in] x The x-values to compute the inverse tangent for. +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse tangent of the values +ARROW_EXPORT +Result Atan2(const Datum& y, const Datum& x, ExecContext* ctx = NULLPTR); + +/// \brief Get the natural log of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise natural log +ARROW_EXPORT +Result Ln(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log base 10 of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log base 10 +ARROW_EXPORT +Result Log10(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log base 2 of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log base 2 +ARROW_EXPORT +Result Log2(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the natural log of (1 + value). +/// +/// If argument is null the result will be null. +/// This function may be more accurate than Log(1 + value) for values close to zero. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise natural log +ARROW_EXPORT +Result Log1p(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log of a value to the given base. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] base The given base. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log to the given base +ARROW_EXPORT +Result Logb(const Datum& arg, const Datum& base, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the square-root of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the square-root for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise square-root +ARROW_EXPORT +Result Sqrt(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Round to the nearest integer less than or equal in magnitude to the +/// argument. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] ctx the function execution context, optional +/// \return the rounded value +ARROW_EXPORT +Result Floor(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Round to the nearest integer greater than or equal in magnitude to the +/// argument. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] ctx the function execution context, optional +/// \return the rounded value +ARROW_EXPORT +Result Ceil(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Get the integral part without fractional digits. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to truncate +/// \param[in] ctx the function execution context, optional +/// \return the truncated value +ARROW_EXPORT +Result Trunc(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Find the element-wise maximum of any number of arrays or scalars. +/// Array values must be the same length. +/// +/// \param[in] args arrays or scalars to operate on. +/// \param[in] options options for handling nulls, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise maximum +ARROW_EXPORT +Result MaxElementWise( + const std::vector& args, + ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Find the element-wise minimum of any number of arrays or scalars. +/// Array values must be the same length. +/// +/// \param[in] args arrays or scalars to operate on. +/// \param[in] options options for handling nulls, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise minimum +ARROW_EXPORT +Result MinElementWise( + const std::vector& args, + ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the sign of a value. Array values can be of arbitrary length. If argument +/// is null the result will be null. +/// +/// \param[in] arg the value to extract sign from +/// \param[in] ctx the function execution context, optional +/// \return the element-wise sign function +ARROW_EXPORT +Result Sign(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given precision. +/// +/// If arg is null the result will be null. +/// +/// \param[in] arg the value to be rounded +/// \param[in] options rounding options (rounding mode and number of digits), optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result Round(const Datum& arg, RoundOptions options = RoundOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given precision. +/// +/// If arg1 is null the result will be null. +/// If arg2 is null then the result will be null. If arg2 is negative, then the rounding +/// place will be shifted to the left (thus -1 would correspond to rounding to the nearest +/// ten). If positive, the rounding place will shift to the right (and +1 would +/// correspond to rounding to the nearest tenth). +/// +/// \param[in] arg1 the value to be rounded +/// \param[in] arg2 the number of significant digits to round to +/// \param[in] options rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result RoundBinary(const Datum& arg1, const Datum& arg2, + RoundBinaryOptions options = RoundBinaryOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given multiple. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] options rounding options (rounding mode and multiple), optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result RoundToMultiple( + const Datum& arg, RoundToMultipleOptions options = RoundToMultipleOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Ceil a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to ceil +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result CeilTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Floor a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to floor +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result FloorTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to round +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RoundTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Invert the values of a boolean datum +/// \param[in] value datum to invert +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Invert(const Datum& value, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND of two boolean datums which always propagates nulls +/// (null and false is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result And(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND of two boolean datums with a Kleene truth table +/// (null and false is false). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneAnd(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Element-wise OR of two boolean datums which always propagates nulls +/// (null and true is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Or(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise OR of two boolean datums with a Kleene truth table +/// (null or true is true). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneOr(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise XOR of two boolean datums +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Xor(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND NOT of two boolean datums which always propagates nulls +/// (null and not true is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result AndNot(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND NOT of two boolean datums with a Kleene truth table +/// (false and not null is false, null and not true is false). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneAndNot(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief IsIn returns true for each element of `values` that is contained in +/// `value_set` +/// +/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls. +/// +/// \param[in] values array-like input to look up in value_set +/// \param[in] options SetLookupOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsIn(const Datum& values, const SetLookupOptions& options, + ExecContext* ctx = NULLPTR); +ARROW_EXPORT +Result IsIn(const Datum& values, const Datum& value_set, + ExecContext* ctx = NULLPTR); + +/// \brief IndexIn examines each slot in the values against a value_set array. +/// If the value is not found in value_set, null will be output. +/// If found, the index of occurrence within value_set (ignoring duplicates) +/// will be output. +/// +/// For example given values = [99, 42, 3, null] and +/// value_set = [3, 3, 99], the output will be = [2, null, 0, null] +/// +/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls. +/// +/// \param[in] values array-like input +/// \param[in] options SetLookupOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IndexIn(const Datum& values, const SetLookupOptions& options, + ExecContext* ctx = NULLPTR); +ARROW_EXPORT +Result IndexIn(const Datum& values, const Datum& value_set, + ExecContext* ctx = NULLPTR); + +/// \brief IsValid returns true for each element of `values` that is not null, +/// false otherwise +/// +/// \param[in] values input to examine for validity +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsValid(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IsNull returns true for each element of `values` that is null, +/// false otherwise +/// +/// \param[in] values input to examine for nullity +/// \param[in] options NullOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsNull(const Datum& values, NullOptions options = NullOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief IsNan returns true for each element of `values` that is NaN, +/// false otherwise +/// +/// \param[in] values input to look for NaN +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsNan(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IfElse returns elements chosen from `left` or `right` +/// depending on `cond`. `null` values in `cond` will be promoted to the result +/// +/// \param[in] cond `Boolean` condition Scalar/ Array +/// \param[in] left Scalar/ Array +/// \param[in] right Scalar/ Array +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IfElse(const Datum& cond, const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief CaseWhen behaves like a switch/case or if-else if-else statement: for +/// each row, select the first value for which the corresponding condition is +/// true, or (if given) select the 'else' value, else emit null. Note that a +/// null condition is the same as false. +/// +/// \param[in] cond Conditions (Boolean) +/// \param[in] cases Values (any type), along with an optional 'else' value. +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result CaseWhen(const Datum& cond, const std::vector& cases, + ExecContext* ctx = NULLPTR); + +/// \brief Year returns year for each element of `values` +/// +/// \param[in] values input to extract year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Year(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IsLeapYear returns if a year is a leap year for each element of `values` +/// +/// \param[in] values input to extract leap year indicator from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsLeapYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Month returns month for each element of `values`. +/// Month is encoded as January=1, December=12 +/// +/// \param[in] values input to extract month from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Month(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Day returns day number for each element of `values` +/// +/// \param[in] values input to extract day from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Day(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief YearMonthDay returns a struct containing the Year, Month and Day value for +/// each element of `values`. +/// +/// \param[in] values input to extract (year, month, day) struct from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result YearMonthDay(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief DayOfWeek returns number of the day of the week value for each element of +/// `values`. +/// +/// By default week starts on Monday denoted by 0 and ends on Sunday denoted +/// by 6. Start day of the week (Monday=1, Sunday=7) and numbering base (0 or 1) can be +/// set using DayOfWeekOptions +/// +/// \param[in] values input to extract number of the day of the week from +/// \param[in] options for setting start of the week and day numbering +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayOfWeek(const Datum& values, + DayOfWeekOptions options = DayOfWeekOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief DayOfYear returns number of day of the year for each element of `values`. +/// January 1st maps to day number 1, February 1st to 32, etc. +/// +/// \param[in] values input to extract number of day of the year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayOfYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief ISOYear returns ISO year number for each element of `values`. +/// First week of an ISO year has the majority (4 or more) of its days in January. +/// +/// \param[in] values input to extract ISO year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result ISOYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief USYear returns US epidemiological year number for each element of `values`. +/// First week of US epidemiological year has the majority (4 or more) of it's +/// days in January. Last week of US epidemiological year has the year's last +/// Wednesday in it. US epidemiological week starts on Sunday. +/// +/// \param[in] values input to extract US epidemiological year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result USYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief ISOWeek returns ISO week of year number for each element of `values`. +/// First ISO week has the majority (4 or more) of its days in January. +/// ISO week starts on Monday. Year can have 52 or 53 weeks. +/// Week numbering can start with 1. +/// +/// \param[in] values input to extract ISO week of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result ISOWeek(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief USWeek returns US week of year number for each element of `values`. +/// First US week has the majority (4 or more) of its days in January. +/// US week starts on Sunday. Year can have 52 or 53 weeks. +/// Week numbering starts with 1. +/// +/// \param[in] values input to extract US week of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result USWeek(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Week returns week of year number for each element of `values`. +/// First ISO week has the majority (4 or more) of its days in January. +/// Year can have 52 or 53 weeks. Week numbering can start with 0 or 1 +/// depending on DayOfWeekOptions.count_from_zero. +/// +/// \param[in] values input to extract week of year from +/// \param[in] options for setting numbering start +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Week(const Datum& values, WeekOptions options = WeekOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief ISOCalendar returns a (ISO year, ISO week, ISO day of week) struct for +/// each element of `values`. +/// ISO week starts on Monday denoted by 1 and ends on Sunday denoted by 7. +/// +/// \param[in] values input to ISO calendar struct from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result ISOCalendar(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Quarter returns the quarter of year number for each element of `values` +/// First quarter maps to 1 and fourth quarter maps to 4. +/// +/// \param[in] values input to extract quarter of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Quarter(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Hour returns hour value for each element of `values` +/// +/// \param[in] values input to extract hour from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Hour(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Minute returns minutes value for each element of `values` +/// +/// \param[in] values input to extract minutes from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Minute(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Second returns seconds value for each element of `values` +/// +/// \param[in] values input to extract seconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Second(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Millisecond returns number of milliseconds since the last full second +/// for each element of `values` +/// +/// \param[in] values input to extract milliseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Millisecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Microsecond returns number of microseconds since the last full millisecond +/// for each element of `values` +/// +/// \param[in] values input to extract microseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Microsecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Nanosecond returns number of nanoseconds since the last full millisecond +/// for each element of `values` +/// +/// \param[in] values input to extract nanoseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Nanosecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Subsecond returns the fraction of second elapsed since last full second +/// as a float for each element of `values` +/// +/// \param[in] values input to extract subsecond from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Subsecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Format timestamps according to a format string +/// +/// Return formatted time strings according to the format string +/// `StrftimeOptions::format` and to the locale specifier `Strftime::locale`. +/// +/// \param[in] values input timestamps +/// \param[in] options for setting format string and locale +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Strftime(const Datum& values, StrftimeOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief Parse timestamps according to a format string +/// +/// Return parsed timestamps according to the format string +/// `StrptimeOptions::format` at time resolution `Strftime::unit`. Parse errors are +/// raised depending on the `Strftime::error_is_null` setting. +/// +/// \param[in] values input strings +/// \param[in] options for setting format string, unit and error_is_null +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Strptime(const Datum& values, StrptimeOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief Converts timestamps from local timestamp without a timezone to a timestamp with +/// timezone, interpreting the local timestamp as being in the specified timezone for each +/// element of `values` +/// +/// \param[in] values input to convert +/// \param[in] options for setting source timezone, exception and ambiguous timestamp +/// handling. +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result AssumeTimezone(const Datum& values, + AssumeTimezoneOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief IsDaylightSavings extracts if currently observing daylight savings for each +/// element of `values` +/// +/// \param[in] values input to extract daylight savings indicator from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result IsDaylightSavings(const Datum& values, + ExecContext* ctx = NULLPTR); + +/// \brief LocalTimestamp converts timestamp to timezone naive local timestamp +/// +/// \param[in] values input to convert to local time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result LocalTimestamp(const Datum& values, + ExecContext* ctx = NULLPTR); + +/// \brief Years Between finds the number of years between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result YearsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Quarters Between finds the number of quarters between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result QuartersBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Months Between finds the number of month between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MonthsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Weeks Between finds the number of weeks between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result WeeksBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Month Day Nano Between finds the number of months, days, and nanoseconds +/// between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MonthDayNanoBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief DayTime Between finds the number of days and milliseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayTimeBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Days Between finds the number of days between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DaysBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Hours Between finds the number of hours between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result HoursBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Minutes Between finds the number of minutes between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MinutesBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Seconds Between finds the number of hours between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result SecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Milliseconds Between finds the number of milliseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MillisecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Microseconds Between finds the number of microseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MicrosecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Nanoseconds Between finds the number of nanoseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result NanosecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Finds either the FIRST, LAST, or ALL items with a key that matches the given +/// query key in a map. +/// +/// Returns an array of items for FIRST and LAST, and an array of list of items for ALL. +/// +/// \param[in] map to look in +/// \param[in] options to pass a query key and choose which matching keys to return +/// (FIRST, LAST or ALL) +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MapLookup(const Datum& map, MapLookupOptions options, + ExecContext* ctx = NULLPTR); +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..919572f16ee69edaa348f432d36214896b455732 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h @@ -0,0 +1,697 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/compute/function_options.h" +#include "arrow/compute/ordering.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace compute { + +class ExecContext; + +/// \addtogroup compute-concrete-options +/// @{ + +class ARROW_EXPORT FilterOptions : public FunctionOptions { + public: + /// Configure the action taken when a slot of the selection mask is null + enum NullSelectionBehavior { + /// The corresponding filtered value will be removed in the output. + DROP, + /// The corresponding filtered value will be null in the output. + EMIT_NULL, + }; + + explicit FilterOptions(NullSelectionBehavior null_selection = DROP); + static constexpr char const kTypeName[] = "FilterOptions"; + static FilterOptions Defaults() { return FilterOptions(); } + + NullSelectionBehavior null_selection_behavior = DROP; +}; + +class ARROW_EXPORT TakeOptions : public FunctionOptions { + public: + explicit TakeOptions(bool boundscheck = true); + static constexpr char const kTypeName[] = "TakeOptions"; + static TakeOptions BoundsCheck() { return TakeOptions(true); } + static TakeOptions NoBoundsCheck() { return TakeOptions(false); } + static TakeOptions Defaults() { return BoundsCheck(); } + + bool boundscheck = true; +}; + +/// \brief Options for the dictionary encode function +class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions { + public: + /// Configure how null values will be encoded + enum NullEncodingBehavior { + /// The null value will be added to the dictionary with a proper index. + ENCODE, + /// The null value will be masked in the indices array. + MASK + }; + + explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK); + static constexpr char const kTypeName[] = "DictionaryEncodeOptions"; + static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); } + + NullEncodingBehavior null_encoding_behavior = MASK; +}; + +/// \brief Options for the run-end encode function +class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions { + public: + explicit RunEndEncodeOptions(std::shared_ptr run_end_type = int32()); + static constexpr char const kTypeName[] = "RunEndEncodeOptions"; + static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); } + + std::shared_ptr run_end_type; +}; + +class ARROW_EXPORT ArraySortOptions : public FunctionOptions { + public: + explicit ArraySortOptions(SortOrder order = SortOrder::Ascending, + NullPlacement null_placement = NullPlacement::AtEnd); + static constexpr char const kTypeName[] = "ArraySortOptions"; + static ArraySortOptions Defaults() { return ArraySortOptions(); } + + /// Sorting order + SortOrder order; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +class ARROW_EXPORT SortOptions : public FunctionOptions { + public: + explicit SortOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd); + explicit SortOptions(const Ordering& ordering); + static constexpr char const kTypeName[] = "SortOptions"; + static SortOptions Defaults() { return SortOptions(); } + /// Convenience constructor to create an ordering from SortOptions + /// + /// Note: Both classes contain the exact same information. However, + /// sort_options should only be used in a "function options" context while Ordering + /// is used more generally. + Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); } + Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +/// \brief SelectK options +class ARROW_EXPORT SelectKOptions : public FunctionOptions { + public: + explicit SelectKOptions(int64_t k = -1, std::vector sort_keys = {}); + static constexpr char const kTypeName[] = "SelectKOptions"; + static SelectKOptions Defaults() { return SelectKOptions(); } + + static SelectKOptions TopKDefault(int64_t k, std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Descending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Descending)); + } + return SelectKOptions{k, keys}; + } + static SelectKOptions BottomKDefault(int64_t k, + std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Ascending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Ascending)); + } + return SelectKOptions{k, keys}; + } + + /// The number of `k` elements to keep. + int64_t k; + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; +}; + +/// \brief Rank options +class ARROW_EXPORT RankOptions : public FunctionOptions { + public: + /// Configure how ties between equal values are handled + enum Tiebreaker { + /// Ties get the smallest possible rank in sorted order. + Min, + /// Ties get the largest possible rank in sorted order. + Max, + /// Ranks are assigned in order of when ties appear in the input. + /// This ensures the ranks are a stable permutation of the input. + First, + /// The ranks span a dense [1, M] interval where M is the number + /// of distinct values in the input. + Dense + }; + + explicit RankOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First); + /// Convenience constructor for array inputs + explicit RankOptions(SortOrder order, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First) + : RankOptions({SortKey("", order)}, null_placement, tiebreaker) {} + + static constexpr char const kTypeName[] = "RankOptions"; + static RankOptions Defaults() { return RankOptions(); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; + /// Tiebreaker for dealing with equal values in ranks + Tiebreaker tiebreaker; +}; + +/// \brief Partitioning options for NthToIndices +class ARROW_EXPORT PartitionNthOptions : public FunctionOptions { + public: + explicit PartitionNthOptions(int64_t pivot, + NullPlacement null_placement = NullPlacement::AtEnd); + PartitionNthOptions() : PartitionNthOptions(0) {} + static constexpr char const kTypeName[] = "PartitionNthOptions"; + + /// The index into the equivalent sorted array of the partition pivot element. + int64_t pivot; + /// Whether nulls and NaNs are partitioned at the start or at the end + NullPlacement null_placement; +}; + +/// \brief Options for cumulative functions +/// \note Also aliased as CumulativeSumOptions for backward compatibility +class ARROW_EXPORT CumulativeOptions : public FunctionOptions { + public: + explicit CumulativeOptions(bool skip_nulls = false); + explicit CumulativeOptions(double start, bool skip_nulls = false); + explicit CumulativeOptions(std::shared_ptr start, bool skip_nulls = false); + static constexpr char const kTypeName[] = "CumulativeOptions"; + static CumulativeOptions Defaults() { return CumulativeOptions(); } + + /// Optional starting value for cumulative operation computation, default depends on the + /// operation and input type. + /// - sum: 0 + /// - prod: 1 + /// - min: maximum of the input type + /// - max: minimum of the input type + /// - mean: start is ignored because it has no meaning for mean + std::optional> start; + + /// If true, nulls in the input are ignored and produce a corresponding null output. + /// When false, the first null encountered is propagated through the remaining output. + bool skip_nulls = false; +}; +using CumulativeSumOptions = CumulativeOptions; // For backward compatibility + +/// \brief Options for pairwise functions +class ARROW_EXPORT PairwiseOptions : public FunctionOptions { + public: + explicit PairwiseOptions(int64_t periods = 1); + static constexpr char const kTypeName[] = "PairwiseOptions"; + static PairwiseOptions Defaults() { return PairwiseOptions(); } + + /// Periods to shift for applying the binary operation, accepts negative values. + int64_t periods = 1; +}; + +/// @} + +/// \brief Filter with a boolean selection filter +/// +/// The output will be populated with values from the input at positions +/// where the selection filter is not 0. Nulls in the filter will be handled +/// based on options.null_selection_behavior. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// filter = [0, 1, 1, 0, null, 1], the output will be +/// (null_selection_behavior == DROP) = ["b", "c", "f"] +/// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"] +/// +/// \param[in] values array to filter +/// \param[in] filter indicates which values should be filtered out +/// \param[in] options configures null_selection_behavior +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Filter(const Datum& values, const Datum& filter, + const FilterOptions& options = FilterOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +namespace internal { + +// These internal functions are implemented in kernels/vector_selection.cc + +/// \brief Return the number of selected indices in the boolean filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +int64_t GetFilterOutputSize(const ArraySpan& filter, + FilterOptions::NullSelectionBehavior null_selection); + +/// \brief Compute uint64 selection indices for use with Take given a boolean +/// filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +Result> GetTakeIndices( + const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection, + MemoryPool* memory_pool = default_memory_pool()); + +} // namespace internal + +/// \brief ReplaceWithMask replaces each value in the array corresponding +/// to a true value in the mask with the next element from `replacements`. +/// +/// \param[in] values Array input to replace +/// \param[in] mask Array or Scalar of Boolean mask values +/// \param[in] replacements The replacement values to draw from. There must +/// be as many replacement values as true values in the mask. +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result ReplaceWithMask(const Datum& values, const Datum& mask, + const Datum& replacements, ExecContext* ctx = NULLPTR); + +/// \brief FillNullForward fill null values in forward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in forward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "c", "c", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief FillNullBackward fill null values in backward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in backward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "f", "f", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Take from an array of values at indices in another array +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array at the given +/// indices. If an index is null then the taken element will be null. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// indices = [2, 1, null, 3], the output will be +/// = [values[2], values[1], null, values[3]] +/// = ["c", "b", null, null] +/// +/// \param[in] values datum from which to take +/// \param[in] indices which values to take +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Take(const Datum& values, const Datum& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Take with Array inputs and output +ARROW_EXPORT +Result> Take(const Array& values, const Array& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Drop Null from an array of values +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array without nulls. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"], +/// the output will be = ["a", "b", "c", "e", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result DropNull(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief DropNull with Array inputs and output +ARROW_EXPORT +Result> DropNull(const Array& values, ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// Find index of n-th(0 based) smallest value and perform indirect +/// partition of an array around that element. Output indices[0 ~ n-1] +/// holds values no greater than n-th element, and indices[n+1 ~ end] +/// holds values no less than n-th element. Elements in each partition +/// is not sorted. Nulls will be partitioned to the end of the output. +/// Output is not guaranteed to be stable. +/// +/// \param[in] values array to be partitioned +/// \param[in] n pivot array around sorted n-th element +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, int64_t n, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// This overload takes a PartitionNthOptions specifying the pivot index +/// and the null handling. +/// +/// \param[in] values array to be partitioned +/// \param[in] options options including pivot index and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, + const PartitionNthOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that would select the first `k` elements. +/// +/// Perform an indirect sort of the datum, keeping only the first `k` elements. The output +/// array will contain indices such that the item indicated by the k-th index will be in +/// the position it would be if the datum were sorted by `options.sort_keys`. However, +/// indices of null values will not be part of the output. The sort is not guaranteed to +/// be stable. +/// +/// \param[in] datum datum to be partitioned +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return a datum with the same schema as the input +ARROW_EXPORT +Result> SelectKUnstable(const Datum& datum, + const SelectKOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// Perform an indirect sort of array. The output array will contain +/// indices that would sort an array, which would be the same length +/// as input. Nulls will be stably partitioned to the end of the output +/// regardless of order. +/// +/// For example given array = [null, 1, 3.3, null, 2, 5.3] and order +/// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0, +/// 3]. +/// +/// \param[in] array array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] array array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// Perform an indirect sort of chunked array. The output array will +/// contain indices that would sort a chunked array, which would be +/// the same length as input. Nulls will be stably partitioned to the +/// end of the output regardless of order. +/// +/// For example given chunked_array = [[null, 1], [3.3], [null, 2, +/// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2, +/// 4, 1, 0, 3]. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an input in the +/// specified order. Input is one of array, chunked array record batch +/// or table. +/// +/// Perform an indirect sort of input. The output array will contain +/// indices that would sort an input, which would be the same length +/// as input. Nulls will be stably partitioned to the start or to the end +/// of the output depending on SortOrder::null_placement. +/// +/// For example given input (table) = { +/// "column1": [[null, 1], [ 3, null, 2, 1]], +/// "column2": [[ 5], [3, null, null, 5, 5]], +/// } and options = { +/// {"column1", SortOrder::Ascending}, +/// {"column2", SortOrder::Descending}, +/// }, the output will be [5, 1, 4, 2, 0, 3]. +/// +/// \param[in] datum array, chunked array, record batch or table to sort +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort a table +ARROW_EXPORT +Result> SortIndices(const Datum& datum, const SortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Compute unique elements from an array-like object +/// +/// Note if a null occurs in the input it will NOT be included in the output. +/// +/// \param[in] datum array-like input +/// \param[in] ctx the function execution context, optional +/// \return result as Array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> Unique(const Datum& datum, ExecContext* ctx = NULLPTR); + +// Constants for accessing the output of ValueCounts +ARROW_EXPORT extern const char kValuesFieldName[]; +ARROW_EXPORT extern const char kCountsFieldName[]; +ARROW_EXPORT extern const int32_t kValuesFieldIndex; +ARROW_EXPORT extern const int32_t kCountsFieldIndex; + +/// \brief Return counts of unique elements from an array-like object. +/// +/// Note that the counts do not include counts for nulls in the array. These can be +/// obtained separately from metadata. +/// +/// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values +/// which can lead to unexpected results if the input Array has these values. +/// +/// \param[in] value array-like input +/// \param[in] ctx the function execution context, optional +/// \return counts An array of structs. +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> ValueCounts(const Datum& value, + ExecContext* ctx = NULLPTR); + +/// \brief Dictionary-encode values in an array-like object +/// +/// Any nulls encountered in the dictionary will be handled according to the +/// specified null encoding behavior. +/// +/// For example, given values ["a", "b", null, "a", null] the output will be +/// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null] +/// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"] +/// +/// If the input is already dictionary encoded this function is a no-op unless +/// it needs to modify the null_encoding (TODO) +/// +/// \param[in] data array-like input +/// \param[in] ctx the function execution context, optional +/// \param[in] options configures null encoding behavior +/// \return result with same shape and type as input +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result DictionaryEncode( + const Datum& data, + const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Run-end-encode values in an array-like object +/// +/// The returned run-end encoded type uses the same value type of the input and +/// run-end type defined in the options. +/// +/// \param[in] value array-like input +/// \param[in] options configures encoding behavior +/// \param[in] ctx the function execution context, optional +/// \return result with same shape but run-end encoded +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndEncode( + const Datum& value, + const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Decode a Run-End Encoded array to a plain array +/// +/// The output data type is the same as the values array type of run-end encoded +/// input. +/// +/// \param[in] value run-end-encoded input +/// \param[in] ctx the function execution context, optional +/// \return plain array resulting from decoding the run-end encoded input +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative sum of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative sum behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeSum( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative product of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative prod behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeProd( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative max of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative max behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMax( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative min of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative min behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMin( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative mean of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative mean behavior, `start` is ignored +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMean( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Return the first order difference of an array. +/// +/// Computes the first order difference of an array, i.e. +/// output[i] = input[i] - input[i - p] if i >= p +/// output[i] = null otherwise +/// where p is the period. For example, with p = 1, +/// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5]. +/// With p = 2, +/// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6] +/// p can also be negative, in which case the diff is computed in +/// the opposite direction. +/// \param[in] array array input +/// \param[in] options options, specifying overflow behavior and period +/// \param[in] check_overflow whether to return error on overflow +/// \param[in] ctx the function execution context, optional +/// \return result as array +ARROW_EXPORT +Result> PairwiseDiff(const Array& array, + const PairwiseOptions& options, + bool check_overflow = false, + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h new file mode 100644 index 0000000000000000000000000000000000000000..18e56092dda2a5f8f997de5b5cd1c81262e77a8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/compute/function.h" +#include "arrow/compute/function_options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; + +namespace compute { + +class ExecContext; + +/// \addtogroup compute-concrete-options +/// @{ + +class ARROW_EXPORT CastOptions : public FunctionOptions { + public: + explicit CastOptions(bool safe = true); + + static constexpr char const kTypeName[] = "CastOptions"; + static CastOptions Safe(TypeHolder to_type = {}) { + CastOptions safe(true); + safe.to_type = std::move(to_type); + return safe; + } + + static CastOptions Unsafe(TypeHolder to_type = {}) { + CastOptions unsafe(false); + unsafe.to_type = std::move(to_type); + return unsafe; + } + + // Type being casted to. May be passed separate to eager function + // compute::Cast + TypeHolder to_type; + + bool allow_int_overflow; + bool allow_time_truncate; + bool allow_time_overflow; + bool allow_decimal_truncate; + bool allow_float_truncate; + // Indicate if conversions from Binary/FixedSizeBinary to string must + // validate the utf8 payload. + bool allow_invalid_utf8; + + /// true if the safety options all match CastOptions::Safe + /// + /// Note, if this returns false it does not mean is_unsafe will return true + bool is_safe() const; + /// true if the safety options all match CastOptions::Unsafe + /// + /// Note, if this returns false it does not mean is_safe will return true + bool is_unsafe() const; +}; + +/// @} + +/// \brief Return true if a cast function is defined +ARROW_EXPORT +bool CanCast(const DataType& from_type, const DataType& to_type); + +// ---------------------------------------------------------------------- +// Convenience invocation APIs for a number of kernels + +/// \brief Cast from one array type to another +/// \param[in] value array to cast +/// \param[in] to_type type to cast to +/// \param[in] options casting options +/// \param[in] ctx the function execution context, optional +/// \return the resulting array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> Cast(const Array& value, const TypeHolder& to_type, + const CastOptions& options = CastOptions::Safe(), + ExecContext* ctx = NULLPTR); + +/// \brief Cast from one array type to another +/// \param[in] value array to cast +/// \param[in] options casting options. The "to_type" field must be populated +/// \param[in] ctx the function execution context, optional +/// \return the resulting array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Cast(const Datum& value, const CastOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Cast from one value to another +/// \param[in] value datum to cast +/// \param[in] to_type type to cast to +/// \param[in] options casting options +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Cast(const Datum& value, const TypeHolder& to_type, + const CastOptions& options = CastOptions::Safe(), + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h new file mode 100644 index 0000000000000000000000000000000000000000..3fbefe4a1ab7b7e432e07607f674b5de1c947cd5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h @@ -0,0 +1,489 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +// It seems like 64K might be a good default chunksize to use for execution +// based on the experience of other query processing systems. The current +// default is not to chunk contiguous arrays, though, but this may change in +// the future once parallel execution is implemented +static constexpr int64_t kDefaultExecChunksize = UINT16_MAX; + +/// \brief Context for expression-global variables and options used by +/// function evaluation +class ARROW_EXPORT ExecContext { + public: + // If no function registry passed, the default is used. + explicit ExecContext(MemoryPool* pool = default_memory_pool(), + ::arrow::internal::Executor* executor = NULLPTR, + FunctionRegistry* func_registry = NULLPTR); + + /// \brief The MemoryPool used for allocations, default is + /// default_memory_pool(). + MemoryPool* memory_pool() const { return pool_; } + + const ::arrow::internal::CpuInfo* cpu_info() const; + + /// \brief An Executor which may be used to parallelize execution. + ::arrow::internal::Executor* executor() const { return executor_; } + + /// \brief The FunctionRegistry for looking up functions by name and + /// selecting kernels for execution. Defaults to the library-global function + /// registry provided by GetFunctionRegistry. + FunctionRegistry* func_registry() const { return func_registry_; } + + // \brief Set maximum length unit of work for kernel execution. Larger + // contiguous array inputs will be split into smaller chunks, and, if + // possible and enabled, processed in parallel. The default chunksize is + // INT64_MAX, so contiguous arrays are not split. + void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; } + + // \brief Maximum length for ExecBatch data chunks processed by + // kernels. Contiguous array inputs with longer length will be split into + // smaller chunks. + int64_t exec_chunksize() const { return exec_chunksize_; } + + /// \brief Set whether to use multiple threads for function execution. This + /// is not yet used. + void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; } + + /// \brief If true, then utilize multiple threads where relevant for function + /// execution. This is not yet used. + bool use_threads() const { return use_threads_; } + + // Set the preallocation strategy for kernel execution as it relates to + // chunked execution. For chunked execution, whether via ChunkedArray inputs + // or splitting larger Array arguments into smaller pieces, contiguous + // allocation (if permitted by the kernel) will allocate one large array to + // write output into yielding it to the caller at the end. If this option is + // set to off, then preallocations will be performed independently for each + // chunk of execution + // + // TODO: At some point we might want the limit the size of contiguous + // preallocations. For example, even if the exec_chunksize is 64K or less, we + // might limit contiguous allocations to 1M records, say. + void set_preallocate_contiguous(bool preallocate) { + preallocate_contiguous_ = preallocate; + } + + /// \brief If contiguous preallocations should be used when doing chunked + /// execution as specified by exec_chunksize(). See + /// set_preallocate_contiguous() for more information. + bool preallocate_contiguous() const { return preallocate_contiguous_; } + + private: + MemoryPool* pool_; + ::arrow::internal::Executor* executor_; + FunctionRegistry* func_registry_; + int64_t exec_chunksize_ = std::numeric_limits::max(); + bool preallocate_contiguous_ = true; + bool use_threads_ = true; +}; + +// TODO: Consider standardizing on uint16 selection vectors and only use them +// when we can ensure that each value is 64K length or smaller + +/// \brief Container for an array of value selection indices that were +/// materialized from a filter. +/// +/// Columnar query engines (see e.g. [1]) have found that rather than +/// materializing filtered data, the filter can instead be converted to an +/// array of the "on" indices and then "fusing" these indices in operator +/// implementations. This is especially relevant for aggregations but also +/// applies to scalar operations. +/// +/// We are not yet using this so this is mostly a placeholder for now. +/// +/// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf +class ARROW_EXPORT SelectionVector { + public: + explicit SelectionVector(std::shared_ptr data); + + explicit SelectionVector(const Array& arr); + + /// \brief Create SelectionVector from boolean mask + static Result> FromMask(const BooleanArray& arr); + + const int32_t* indices() const { return indices_; } + int32_t length() const; + + private: + std::shared_ptr data_; + const int32_t* indices_; +}; + +/// An index to represent that a batch does not belong to an ordered stream +constexpr int64_t kUnsequencedIndex = -1; + +/// \brief A unit of work for kernel execution. It contains a collection of +/// Array and Scalar values and an optional SelectionVector indicating that +/// there is an unmaterialized filter that either must be materialized, or (if +/// the kernel supports it) pushed down into the kernel implementation. +/// +/// ExecBatch is semantically similar to RecordBatch in that in a SQL context +/// it represents a collection of records, but constant "columns" are +/// represented by Scalar values rather than having to be converted into arrays +/// with repeated values. +/// +/// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight +/// than is desirable for this class. Microbenchmarks would help determine for +/// sure. See ARROW-8928. + +/// \addtogroup acero-internals +/// @{ + +struct ARROW_EXPORT ExecBatch { + ExecBatch() = default; + ExecBatch(std::vector values, int64_t length) + : values(std::move(values)), length(length) {} + + explicit ExecBatch(const RecordBatch& batch); + + /// \brief Infer the ExecBatch length from values. + static Result InferLength(const std::vector& values); + + /// Creates an ExecBatch with length-validation. + /// + /// If any value is given, then all values must have a common length. If the given + /// length is negative, then the length of the ExecBatch is set to this common length, + /// or to 1 if no values are given. Otherwise, the given length must equal the common + /// length, if any value is given. + static Result Make(std::vector values, int64_t length = -1); + + Result> ToRecordBatch( + std::shared_ptr schema, MemoryPool* pool = default_memory_pool()) const; + + /// The values representing positional arguments to be passed to a kernel's + /// exec function for processing. + std::vector values; + + /// A deferred filter represented as an array of indices into the values. + /// + /// For example, the filter [true, true, false, true] would be represented as + /// the selection vector [0, 1, 3]. When the selection vector is set, + /// ExecBatch::length is equal to the length of this array. + std::shared_ptr selection_vector; + + /// A predicate Expression guaranteed to evaluate to true for all rows in this batch. + Expression guarantee = literal(true); + + /// The semantic length of the ExecBatch. When the values are all scalars, + /// the length should be set to 1 for non-aggregate kernels, otherwise the + /// length is taken from the array values, except when there is a selection + /// vector. When there is a selection vector set, the length of the batch is + /// the length of the selection. Aggregate kernels can have an ExecBatch + /// formed by projecting just the partition columns from a batch in which + /// case, it would have scalar rows with length greater than 1. + /// + /// If the array values are of length 0 then the length is 0 regardless of + /// whether any values are Scalar. + int64_t length = 0; + + /// \brief index of this batch in a sorted stream of batches + /// + /// This index must be strictly monotonic starting at 0 without gaps or + /// it can be set to kUnsequencedIndex if there is no meaningful order + int64_t index = kUnsequencedIndex; + + /// \brief The sum of bytes in each buffer referenced by the batch + /// + /// Note: Scalars are not counted + /// Note: Some values may referenced only part of a buffer, for + /// example, an array with an offset. The actual data + /// visible to this batch will be smaller than the total + /// buffer size in this case. + int64_t TotalBufferSize() const; + + /// \brief Return the value at the i-th index + template + inline const Datum& operator[](index_type i) const { + return values[i]; + } + + bool Equals(const ExecBatch& other) const; + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + ExecBatch Slice(int64_t offset, int64_t length) const; + + Result SelectValues(const std::vector& ids) const; + + /// \brief A convenience for returning the types from the batch. + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + std::string ToString() const; +}; + +inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); } +inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); } + +ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*); + +/// @} + +/// \defgroup compute-internals Utilities for calling functions, useful for those +/// extending the function registry +/// +/// @{ + +struct ExecValue { + ArraySpan array = {}; + const Scalar* scalar = NULLPTR; + + ExecValue(Scalar* scalar) // NOLINT implicit conversion + : scalar(scalar) {} + + ExecValue(ArraySpan array) // NOLINT implicit conversion + : array(std::move(array)) {} + + ExecValue(const ArrayData& array) { // NOLINT implicit conversion + this->array.SetMembers(array); + } + + ExecValue() = default; + ExecValue(const ExecValue& other) = default; + ExecValue& operator=(const ExecValue& other) = default; + ExecValue(ExecValue&& other) = default; + ExecValue& operator=(ExecValue&& other) = default; + + int64_t length() const { return this->is_array() ? this->array.length : 1; } + + bool is_array() const { return this->scalar == NULLPTR; } + bool is_scalar() const { return !this->is_array(); } + + void SetArray(const ArrayData& array) { + this->array.SetMembers(array); + this->scalar = NULLPTR; + } + + void SetScalar(const Scalar* scalar) { this->scalar = scalar; } + + template + const ExactType& scalar_as() const { + return ::arrow::internal::checked_cast(*this->scalar); + } + + /// XXX: here temporarily for compatibility with datum, see + /// e.g. MakeStructExec in scalar_nested.cc + int64_t null_count() const { + if (this->is_array()) { + return this->array.GetNullCount(); + } else { + return this->scalar->is_valid ? 0 : 1; + } + } + + const DataType* type() const { + if (this->is_array()) { + return array.type; + } else { + return scalar->type.get(); + } + } +}; + +struct ARROW_EXPORT ExecResult { + // The default value of the variant is ArraySpan + std::variant> value; + + int64_t length() const { + if (this->is_array_span()) { + return this->array_span()->length; + } else { + return this->array_data()->length; + } + } + + const DataType* type() const { + if (this->is_array_span()) { + return this->array_span()->type; + } else { + return this->array_data()->type.get(); + } + } + + const ArraySpan* array_span() const { return &std::get(this->value); } + ArraySpan* array_span_mutable() { return &std::get(this->value); } + + bool is_array_span() const { return this->value.index() == 0; } + + const std::shared_ptr& array_data() const { + return std::get>(this->value); + } + ArrayData* array_data_mutable() { + return std::get>(this->value).get(); + } + + bool is_array_data() const { return this->value.index() == 1; } +}; + +/// \brief A "lightweight" column batch object which contains no +/// std::shared_ptr objects and does not have any memory ownership +/// semantics. Can represent a view onto an "owning" ExecBatch. +struct ARROW_EXPORT ExecSpan { + ExecSpan() = default; + ExecSpan(const ExecSpan& other) = default; + ExecSpan& operator=(const ExecSpan& other) = default; + ExecSpan(ExecSpan&& other) = default; + ExecSpan& operator=(ExecSpan&& other) = default; + + explicit ExecSpan(std::vector values, int64_t length) + : length(length), values(std::move(values)) {} + + explicit ExecSpan(const ExecBatch& batch) { + this->length = batch.length; + this->values.resize(batch.values.size()); + for (size_t i = 0; i < batch.values.size(); ++i) { + const Datum& in_value = batch[i]; + ExecValue* out_value = &this->values[i]; + if (in_value.is_array()) { + out_value->SetArray(*in_value.array()); + } else { + out_value->SetScalar(in_value.scalar().get()); + } + } + } + + /// \brief Return the value at the i-th index + template + inline const ExecValue& operator[](index_type i) const { + return values[i]; + } + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + ExecBatch ToExecBatch() const { + ExecBatch result; + result.length = this->length; + for (const ExecValue& value : this->values) { + if (value.is_array()) { + result.values.push_back(value.array.ToArrayData()); + } else { + result.values.push_back(value.scalar->GetSharedPtr()); + } + } + return result; + } + + int64_t length = 0; + std::vector values; +}; + +/// \defgroup compute-call-function One-shot calls to compute functions +/// +/// @{ + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + ExecContext* ctx = NULLPTR); + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + ExecContext* ctx = NULLPTR); + +/// @} + +/// \defgroup compute-function-executor One-shot calls to obtain function executors +/// +/// @{ + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, std::vector in_types, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types (taken from the Datum arguments) +/// and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, const std::vector& args, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h new file mode 100644 index 0000000000000000000000000000000000000000..2b86f642166e2ccb8a49e3842d98120d59cb25e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h @@ -0,0 +1,409 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle. + +#pragma once + +#include +#include +#include + +#include "arrow/compute/kernel.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-functions +/// @{ + +/// \brief Contains the number of required arguments for the function. +/// +/// Naming conventions taken from https://en.wikipedia.org/wiki/Arity. +struct ARROW_EXPORT Arity { + /// \brief A function taking no arguments + static Arity Nullary() { return Arity(0, false); } + + /// \brief A function taking 1 argument + static Arity Unary() { return Arity(1, false); } + + /// \brief A function taking 2 arguments + static Arity Binary() { return Arity(2, false); } + + /// \brief A function taking 3 arguments + static Arity Ternary() { return Arity(3, false); } + + /// \brief A function taking a variable number of arguments + /// + /// \param[in] min_args the minimum number of arguments required when + /// invoking the function + static Arity VarArgs(int min_args = 0) { return Arity(min_args, true); } + + // NOTE: the 0-argument form (default constructor) is required for Cython + explicit Arity(int num_args = 0, bool is_varargs = false) + : num_args(num_args), is_varargs(is_varargs) {} + + /// The number of required arguments (or the minimum number for varargs + /// functions). + int num_args; + + /// If true, then the num_args is the minimum number of required arguments. + bool is_varargs = false; +}; + +struct ARROW_EXPORT FunctionDoc { + /// \brief A one-line summary of the function, using a verb. + /// + /// For example, "Add two numeric arrays or scalars". + std::string summary; + + /// \brief A detailed description of the function, meant to follow the summary. + std::string description; + + /// \brief Symbolic names (identifiers) for the function arguments. + /// + /// Some bindings may use this to generate nicer function signatures. + std::vector arg_names; + + // TODO add argument descriptions? + + /// \brief Name of the options class, if any. + std::string options_class; + + /// \brief Whether options are required for function execution + /// + /// If false, then either the function does not have an options class + /// or there is a usable default options value. + bool options_required; + + FunctionDoc() = default; + + FunctionDoc(std::string summary, std::string description, + std::vector arg_names, std::string options_class = "", + bool options_required = false) + : summary(std::move(summary)), + description(std::move(description)), + arg_names(std::move(arg_names)), + options_class(std::move(options_class)), + options_required(options_required) {} + + static const FunctionDoc& Empty(); +}; + +/// \brief An executor of a function with a preconfigured kernel +class ARROW_EXPORT FunctionExecutor { + public: + virtual ~FunctionExecutor() = default; + /// \brief Initialize or re-initialize the preconfigured kernel + /// + /// This method may be called zero or more times. Depending on how + /// the FunctionExecutor was obtained, it may already have been initialized. + virtual Status Init(const FunctionOptions* options = NULLPTR, + ExecContext* exec_ctx = NULLPTR) = 0; + /// \brief Execute the preconfigured kernel with arguments that must fit it + /// + /// The method requires the arguments be castable to the preconfigured types. + /// + /// \param[in] args Arguments to execute the function on + /// \param[in] length Length of arguments batch or -1 to default it. If the + /// function has no parameters, this determines the batch length, defaulting + /// to 0. Otherwise, if the function is scalar, this must equal the argument + /// batch's inferred length or be -1 to default to it. This is ignored for + /// vector functions. + virtual Result Execute(const std::vector& args, int64_t length = -1) = 0; +}; + +/// \brief Base class for compute functions. Function implementations contain a +/// collection of "kernels" which are implementations of the function for +/// specific argument types. Selecting a viable kernel for executing a function +/// is referred to as "dispatching". +class ARROW_EXPORT Function { + public: + /// \brief The kind of function, which indicates in what contexts it is + /// valid for use. + enum Kind { + /// A function that performs scalar data operations on whole arrays of + /// data. Can generally process Array or Scalar values. The size of the + /// output will be the same as the size (or broadcasted size, in the case + /// of mixing Array and Scalar inputs) of the input. + SCALAR, + + /// A function with array input and output whose behavior depends on the + /// values of the entire arrays passed, rather than the value of each scalar + /// value. + VECTOR, + + /// A function that computes scalar summary statistics from array input. + SCALAR_AGGREGATE, + + /// A function that computes grouped summary statistics from array input + /// and an array of group identifiers. + HASH_AGGREGATE, + + /// A function that dispatches to other functions and does not contain its + /// own kernels. + META + }; + + virtual ~Function() = default; + + /// \brief The name of the kernel. The registry enforces uniqueness of names. + const std::string& name() const { return name_; } + + /// \brief The kind of kernel, which indicates in what contexts it is valid + /// for use. + Function::Kind kind() const { return kind_; } + + /// \brief Contains the number of arguments the function requires, or if the + /// function accepts variable numbers of arguments. + const Arity& arity() const { return arity_; } + + /// \brief Return the function documentation + const FunctionDoc& doc() const { return doc_; } + + /// \brief Returns the number of registered kernels for this function. + virtual int num_kernels() const = 0; + + /// \brief Return a kernel that can execute the function given the exact + /// argument types (without implicit type casts). + /// + /// NB: This function is overridden in CastFunction. + virtual Result DispatchExact(const std::vector& types) const; + + /// \brief Return a best-match kernel that can execute the function given the argument + /// types, after implicit casts are applied. + /// + /// \param[in,out] values Argument types. An element may be modified to + /// indicate that the returned kernel only approximately matches the input + /// value descriptors; callers are responsible for casting inputs to the type + /// required by the kernel. + virtual Result DispatchBest(std::vector* values) const; + + /// \brief Get a function executor with a best-matching kernel + /// + /// The returned executor will by default work with the default FunctionOptions + /// and KernelContext. If you want to change that, call `FunctionExecutor::Init`. + virtual Result> GetBestExecutor( + std::vector inputs) const; + + /// \brief Execute the function eagerly with the passed input arguments with + /// kernel dispatch, batch iteration, and memory allocation details taken + /// care of. + /// + /// If the `options` pointer is null, then `default_options()` will be used. + /// + /// This function can be overridden in subclasses. + virtual Result Execute(const std::vector& args, + const FunctionOptions* options, ExecContext* ctx) const; + + virtual Result Execute(const ExecBatch& batch, const FunctionOptions* options, + ExecContext* ctx) const; + + /// \brief Returns the default options for this function. + /// + /// Whatever option semantics a Function has, implementations must guarantee + /// that default_options() is valid to pass to Execute as options. + const FunctionOptions* default_options() const { return default_options_; } + + virtual Status Validate() const; + + /// \brief Returns the pure property for this function. + /// + /// Impure functions are those that may return different results for the same + /// input arguments. For example, a function that returns a random number is + /// not pure. An expression containing only pure functions can be simplified by + /// pre-evaluating any sub-expressions that have constant arguments. + virtual bool is_pure() const { return true; } + + protected: + Function(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options) + : name_(std::move(name)), + kind_(kind), + arity_(arity), + doc_(std::move(doc)), + default_options_(default_options) {} + + Status CheckArity(size_t num_args) const; + + std::string name_; + Function::Kind kind_; + Arity arity_; + const FunctionDoc doc_; + const FunctionOptions* default_options_ = NULLPTR; +}; + +namespace detail { + +template +class FunctionImpl : public Function { + public: + /// \brief Return pointers to current-available kernels for inspection + std::vector kernels() const { + std::vector result; + for (const auto& kernel : kernels_) { + result.push_back(&kernel); + } + return result; + } + + int num_kernels() const override { return static_cast(kernels_.size()); } + + protected: + FunctionImpl(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options) + : Function(std::move(name), kind, arity, std::move(doc), default_options) {} + + std::vector kernels_; +}; + +/// \brief Look up a kernel in a function. If no Kernel is found, nullptr is returned. +ARROW_EXPORT +const Kernel* DispatchExactImpl(const Function* func, const std::vector&); + +/// \brief Return an error message if no Kernel is found. +ARROW_EXPORT +Status NoMatchingKernel(const Function* func, const std::vector&); + +} // namespace detail + +/// \brief A function that executes elementwise operations on arrays or +/// scalars, and therefore whose results generally do not depend on the order +/// of the values in the arguments. Accepts and returns arrays that are all of +/// the same size. These functions roughly correspond to the functions used in +/// SQL expressions. +class ARROW_EXPORT ScalarFunction : public detail::FunctionImpl { + public: + using KernelType = ScalarKernel; + + ScalarFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR, bool is_pure = true) + : detail::FunctionImpl(std::move(name), Function::SCALAR, arity, + std::move(doc), default_options), + is_pure_(is_pure) {} + + /// \brief Add a kernel with given input/output types, no required state + /// initialization, preallocation for fixed-width types, and default null + /// handling (intersect validity bitmaps of inputs). + Status AddKernel(std::vector in_types, OutputType out_type, + ArrayKernelExec exec, KernelInit init = NULLPTR); + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(ScalarKernel kernel); + + /// \brief Returns the pure property for this function. + bool is_pure() const override { return is_pure_; } + + private: + const bool is_pure_; +}; + +/// \brief A function that executes general array operations that may yield +/// outputs of different sizes or have results that depend on the whole array +/// contents. These functions roughly correspond to the functions found in +/// non-SQL array languages like APL and its derivatives. +class ARROW_EXPORT VectorFunction : public detail::FunctionImpl { + public: + using KernelType = VectorKernel; + + VectorFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), Function::VECTOR, arity, + std::move(doc), default_options) {} + + /// \brief Add a simple kernel with given input/output types, no required + /// state initialization, no data preallocation, and no preallocation of the + /// validity bitmap. + Status AddKernel(std::vector in_types, OutputType out_type, + ArrayKernelExec exec, KernelInit init = NULLPTR); + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(VectorKernel kernel); +}; + +class ARROW_EXPORT ScalarAggregateFunction + : public detail::FunctionImpl { + public: + using KernelType = ScalarAggregateKernel; + + ScalarAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), + Function::SCALAR_AGGREGATE, arity, + std::move(doc), default_options) {} + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(ScalarAggregateKernel kernel); +}; + +class ARROW_EXPORT HashAggregateFunction + : public detail::FunctionImpl { + public: + using KernelType = HashAggregateKernel; + + HashAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), + Function::HASH_AGGREGATE, arity, + std::move(doc), default_options) {} + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(HashAggregateKernel kernel); +}; + +/// \brief A function that dispatches to other functions. Must implement +/// MetaFunction::ExecuteImpl. +/// +/// For Array, ChunkedArray, and Scalar Datum kinds, may rely on the execution +/// of concrete Function types, but must handle other Datum kinds on its own. +class ARROW_EXPORT MetaFunction : public Function { + public: + int num_kernels() const override { return 0; } + + Result Execute(const std::vector& args, const FunctionOptions* options, + ExecContext* ctx) const override; + + Result Execute(const ExecBatch& batch, const FunctionOptions* options, + ExecContext* ctx) const override; + + protected: + virtual Result ExecuteImpl(const std::vector& args, + const FunctionOptions* options, + ExecContext* ctx) const = 0; + + MetaFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : Function(std::move(name), Function::META, arity, std::move(doc), + default_options) {} +}; + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h new file mode 100644 index 0000000000000000000000000000000000000000..88ec2fd2d0679b5c849549179aa652bec9b37b56 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle. + +#pragma once + +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-functions +/// @{ + +/// \brief Extension point for defining options outside libarrow (but +/// still within this project). +class ARROW_EXPORT FunctionOptionsType { + public: + virtual ~FunctionOptionsType() = default; + + virtual const char* type_name() const = 0; + virtual std::string Stringify(const FunctionOptions&) const = 0; + virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0; + virtual Result> Serialize(const FunctionOptions&) const; + virtual Result> Deserialize( + const Buffer& buffer) const; + virtual std::unique_ptr Copy(const FunctionOptions&) const = 0; +}; + +/// \brief Base class for specifying options configuring a function's behavior, +/// such as error handling. +class ARROW_EXPORT FunctionOptions : public util::EqualityComparable { + public: + virtual ~FunctionOptions() = default; + + const FunctionOptionsType* options_type() const { return options_type_; } + const char* type_name() const { return options_type()->type_name(); } + + bool Equals(const FunctionOptions& other) const; + std::string ToString() const; + std::unique_ptr Copy() const; + /// \brief Serialize an options struct to a buffer. + Result> Serialize() const; + /// \brief Deserialize an options struct from a buffer. + /// Note: this will only look for `type_name` in the default FunctionRegistry; + /// to use a custom FunctionRegistry, look up the FunctionOptionsType, then + /// call FunctionOptionsType::Deserialize(). + static Result> Deserialize( + const std::string& type_name, const Buffer& buffer); + + protected: + explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {} + const FunctionOptionsType* options_type_; +}; + +ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h new file mode 100644 index 0000000000000000000000000000000000000000..61caa2b570dd31dc988d34406f9b05c3573333e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/compare.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +enum class SortOrder { + /// Arrange values in increasing order + Ascending, + /// Arrange values in decreasing order + Descending, +}; + +enum class NullPlacement { + /// Place nulls and NaNs before any non-null values. + /// NaNs will come after nulls. + AtStart, + /// Place nulls and NaNs after any non-null values. + /// NaNs will come before nulls. + AtEnd, +}; + +/// \brief One sort key for PartitionNthIndices (TODO) and SortIndices +class ARROW_EXPORT SortKey : public util::EqualityComparable { + public: + explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending) + : target(std::move(target)), order(order) {} + + bool Equals(const SortKey& other) const; + std::string ToString() const; + + /// A FieldRef targeting the sort column. + FieldRef target; + /// How to order by this sort key. + SortOrder order; +}; + +class ARROW_EXPORT Ordering : public util::EqualityComparable { + public: + Ordering(std::vector sort_keys, + NullPlacement null_placement = NullPlacement::AtStart) + : sort_keys_(std::move(sort_keys)), null_placement_(null_placement) {} + /// true if data ordered by other is also ordered by this + /// + /// For example, if data is ordered by [a, b, c] then it is also ordered + /// by [a, b] but not by [b, c] or [a, b, c, d]. + /// + /// [a, b].IsSuborderOf([a, b, c]) - true + /// [a, b, c].IsSuborderOf([a, b, c]) - true + /// [b, c].IsSuborderOf([a, b, c]) - false + /// [a, b, c, d].IsSuborderOf([a, b, c]) - false + /// + /// The implicit ordering is not a suborder of any other ordering and + /// no other ordering is a suborder of it. The implicit ordering is not a + /// suborder of itself. + /// + /// The unordered ordering is a suborder of all other orderings but no + /// other ordering is a suborder of it. The unordered ordering is a suborder + /// of itself. + /// + /// The unordered ordering is a suborder of the implicit ordering. + bool IsSuborderOf(const Ordering& other) const; + + bool Equals(const Ordering& other) const; + std::string ToString() const; + + bool is_implicit() const { return is_implicit_; } + bool is_unordered() const { return !is_implicit_ && sort_keys_.empty(); } + + const std::vector& sort_keys() const { return sort_keys_; } + NullPlacement null_placement() const { return null_placement_; } + + static const Ordering& Implicit() { + static const Ordering kImplicit(true); + return kImplicit; + } + + static const Ordering& Unordered() { + static const Ordering kUnordered(false); + // It is also possible to get an unordered ordering by passing in an empty vector + // using the normal constructor. This is ok and useful when ordering comes from user + // input. + return kUnordered; + } + + private: + explicit Ordering(bool is_implicit) + : null_placement_(NullPlacement::AtStart), is_implicit_(is_implicit) {} + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys_; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement_; + bool is_implicit_ = false; +}; + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h new file mode 100644 index 0000000000000000000000000000000000000000..f31c4c1ba5920626578a4e4170e3cd2d28288545 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +class Function; +class FunctionOptionsType; + +/// \brief A mutable central function registry for built-in functions as well +/// as user-defined functions. Functions are implementations of +/// arrow::compute::Function. +/// +/// Generally, each function contains kernels which are implementations of a +/// function for a specific argument signature. After looking up a function in +/// the registry, one can either execute it eagerly with Function::Execute or +/// use one of the function's dispatch methods to pick a suitable kernel for +/// lower-level function execution. +class ARROW_EXPORT FunctionRegistry { + public: + ~FunctionRegistry(); + + /// \brief Construct a new registry. + /// + /// Most users only need to use the global registry. + static std::unique_ptr Make(); + + /// \brief Construct a new nested registry with the given parent. + /// + /// Most users only need to use the global registry. The returned registry never changes + /// its parent, even when an operation allows overwriting. + static std::unique_ptr Make(FunctionRegistry* parent); + + /// \brief Check whether a new function can be added to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status CanAddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Add a new function to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status AddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Check whether an alias can be added for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status CanAddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Add alias for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status AddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Check whether a new function options type can be added to the registry. + /// + /// \return Status::KeyError if a function options type with the same name is already + /// registered. + Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Add a new function options type to the registry. + /// + /// \returns Status::KeyError if a function options type with the same name is already + /// registered. + Status AddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Retrieve a function by name from the registry. + Result> GetFunction(const std::string& name) const; + + /// \brief Return vector of all entry names in the registry. + /// + /// Helpful for displaying a manifest of available functions. + std::vector GetFunctionNames() const; + + /// \brief Retrieve a function options type by name from the registry. + Result GetFunctionOptionsType( + const std::string& name) const; + + /// \brief The number of currently registered functions. + int num_functions() const; + + /// \brief The cast function object registered in AddFunction. + /// + /// Helpful for get cast function as needed. + const Function* cast_function() const; + + private: + FunctionRegistry(); + + // Use PIMPL pattern to not have std::unordered_map here + class FunctionRegistryImpl; + std::unique_ptr impl_; + + explicit FunctionRegistry(FunctionRegistryImpl* impl); +}; + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..89f32ceb0f906e0d50bf063da22f33c3a856fe5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { + +struct Datum; +struct TypeHolder; + +namespace compute { + +class Function; +class ScalarAggregateFunction; +class FunctionExecutor; +class FunctionOptions; +class FunctionRegistry; + +/// \brief Return the process-global function registry. +// Defined in registry.cc +ARROW_EXPORT FunctionRegistry* GetFunctionRegistry(); + +class CastOptions; + +struct ExecBatch; +class ExecContext; +class KernelContext; + +struct Kernel; +struct ScalarKernel; +struct ScalarAggregateKernel; +struct VectorKernel; + +struct KernelState; + +class Expression; + +ARROW_EXPORT ExecContext* default_exec_context(); +ARROW_EXPORT ExecContext* threaded_exec_context(); + +} // namespace compute +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/config.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/config.h new file mode 100644 index 0000000000000000000000000000000000000000..617d6c268b55ea344a3fe7f96141ff0f7e4d3f88 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/config.h @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/config.h" // IWYU pragma: export +#include "arrow/util/visibility.h" + +namespace arrow { + +struct BuildInfo { + /// The packed version number, e.g. 1002003 (decimal) for Arrow 1.2.3 + int version; + /// The "major" version number, e.g. 1 for Arrow 1.2.3 + int version_major; + /// The "minor" version number, e.g. 2 for Arrow 1.2.3 + int version_minor; + /// The "patch" version number, e.g. 3 for Arrow 1.2.3 + int version_patch; + /// The version string, e.g. "1.2.3" + std::string version_string; + std::string so_version; + std::string full_so_version; + + /// The CMake compiler identifier, e.g. "GNU" + std::string compiler_id; + std::string compiler_version; + std::string compiler_flags; + + /// The git changeset id, if available + std::string git_id; + /// The git changeset description, if available + std::string git_description; + std::string package_kind; + + /// The uppercase build type, e.g. "DEBUG" or "RELEASE" + std::string build_type; +}; + +struct RuntimeInfo { + /// The enabled SIMD level + /// + /// This can be less than `detected_simd_level` if the ARROW_USER_SIMD_LEVEL + /// environment variable is set to another value. + std::string simd_level; + + /// The SIMD level available on the OS and CPU + std::string detected_simd_level; + + /// Whether using the OS-based timezone database + /// This is set at compile-time. + bool using_os_timezone_db; + + /// The path to the timezone database; by default None. + std::optional timezone_db_path; +}; + +/// \brief Get runtime build info. +/// +/// The returned values correspond to exact loaded version of the Arrow library, +/// rather than the values frozen at application compile-time through the `ARROW_*` +/// preprocessor definitions. +ARROW_EXPORT +const BuildInfo& GetBuildInfo(); + +/// \brief Get runtime info. +/// +ARROW_EXPORT +RuntimeInfo GetRuntimeInfo(); + +struct GlobalOptions { + /// Path to text timezone database. This is only configurable on Windows, + /// which does not have a compatible OS timezone database. + std::optional timezone_db_path; +}; + +ARROW_EXPORT +Status Initialize(const GlobalOptions& options) noexcept; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h new file mode 100644 index 0000000000000000000000000000000000000000..31b2d2274c9008b32d63a4130b315a1ecba3e5a3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h @@ -0,0 +1,311 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/scalar.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class RecordBatch; +class Table; + +/// \class Datum +/// \brief Variant type for various Arrow C++ data structures +struct ARROW_EXPORT Datum { + /// \brief The kind of datum stored + enum Kind { NONE, SCALAR, ARRAY, CHUNKED_ARRAY, RECORD_BATCH, TABLE }; + + /// \brief A placeholder type to represent empty datum + struct Empty {}; + + /// \brief Datums variants may have a length. This special value indicate that the + /// current variant does not have a length. + static constexpr int64_t kUnknownLength = -1; + + /// \brief Storage of the actual datum. + /// + /// Note: For arrays, ArrayData is stored instead of Array for easier processing + std::variant, std::shared_ptr, + std::shared_ptr, std::shared_ptr, + std::shared_ptr> + value; + + /// \brief Empty datum, to be populated elsewhere + Datum() = default; + + Datum(const Datum& other) = default; + Datum& operator=(const Datum& other) = default; + Datum(Datum&& other) = default; + Datum& operator=(Datum&& other) = default; + + /// \brief Construct from a Scalar + Datum(std::shared_ptr value) // NOLINT implicit conversion + : value(std::move(value)) {} + + /// \brief Construct from an ArrayData + Datum(std::shared_ptr value) // NOLINT implicit conversion + : value(std::move(value)) {} + + /// \brief Construct from an ArrayData + Datum(ArrayData arg) // NOLINT implicit conversion + : value(std::make_shared(std::move(arg))) {} + + /// \brief Construct from an Array + Datum(const Array& value); // NOLINT implicit conversion + + /// \brief Construct from an Array + Datum(const std::shared_ptr& value); // NOLINT implicit conversion + + /// \brief Construct from a ChunkedArray + Datum(std::shared_ptr value); // NOLINT implicit conversion + + /// \brief Construct from a RecordBatch + Datum(std::shared_ptr value); // NOLINT implicit conversion + + /// \brief Construct from a Table + Datum(std::shared_ptr
value); // NOLINT implicit conversion + + /// \brief Construct from a ChunkedArray. + /// + /// This can be expensive, prefer the shared_ptr constructor + explicit Datum(const ChunkedArray& value); + + /// \brief Construct from a RecordBatch. + /// + /// This can be expensive, prefer the shared_ptr constructor + explicit Datum(const RecordBatch& value); + + /// \brief Construct from a Table. + /// + /// This can be expensive, prefer the shared_ptr
constructor + explicit Datum(const Table& value); + + /// \brief Cast from concrete subtypes of Array or Scalar to Datum + template , + bool IsScalar = std::is_base_of_v, + typename = enable_if_t> + Datum(std::shared_ptr value) // NOLINT implicit conversion + : Datum(std::shared_ptr::type>( + std::move(value))) {} + + /// \brief Cast from concrete subtypes of Array or Scalar to Datum + template , + bool IsArray = std::is_base_of_v, + bool IsScalar = std::is_base_of_v, + typename = enable_if_t> + Datum(T&& value) // NOLINT implicit conversion + : Datum(std::make_shared(std::forward(value))) {} + + /// \brief Copy from concrete subtypes of Scalar. + /// + /// The concrete scalar type must be copyable (not all of them are). + template >> + Datum(const T& value) // NOLINT implicit conversion + : Datum(std::make_shared(value)) {} + + // Convenience constructors + /// \brief Convenience constructor storing a bool scalar. + explicit Datum(bool value); + /// \brief Convenience constructor storing an int8 scalar. + explicit Datum(int8_t value); + /// \brief Convenience constructor storing a uint8 scalar. + explicit Datum(uint8_t value); + /// \brief Convenience constructor storing an int16 scalar. + explicit Datum(int16_t value); + /// \brief Convenience constructor storing a uint16 scalar. + explicit Datum(uint16_t value); + /// \brief Convenience constructor storing an int32 scalar. + explicit Datum(int32_t value); + /// \brief Convenience constructor storing a uint32 scalar. + explicit Datum(uint32_t value); + /// \brief Convenience constructor storing an int64 scalar. + explicit Datum(int64_t value); + /// \brief Convenience constructor storing a uint64 scalar. + explicit Datum(uint64_t value); + /// \brief Convenience constructor storing a float scalar. + explicit Datum(float value); + /// \brief Convenience constructor storing a double scalar. + explicit Datum(double value); + /// \brief Convenience constructor storing a string scalar. + explicit Datum(std::string value); + /// \brief Convenience constructor storing a string scalar. + explicit Datum(const char* value); + + /// \brief Convenience constructor for a DurationScalar from std::chrono::duration + template