diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9da94885ec6b22741b87e4d88bab7d0929b73aa2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.py @@ -0,0 +1,466 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + +""" +PyArrow is the python implementation of Apache Arrow. + +Apache Arrow is a cross-language development platform for in-memory data. +It specifies a standardized language-independent columnar memory format for +flat and hierarchical data, organized for efficient analytic operations on +modern hardware. It also provides computational libraries and zero-copy +streaming messaging and interprocess communication. + +For more information see the official page at https://arrow.apache.org +""" + +import gc as _gc +import importlib as _importlib +import os as _os +import platform as _platform +import sys as _sys +import warnings as _warnings + +try: + from ._generated_version import version as __version__ +except ImportError: + # Package is not installed, parse git tag at runtime + try: + import setuptools_scm + # Code duplicated from setup.py to avoid a dependency on each other + + def parse_git(root, **kwargs): + """ + Parse function for setuptools_scm that ignores tags for non-C++ + subprojects, e.g. apache-arrow-js-XXX tags. + """ + from setuptools_scm.git import parse + kwargs['describe_command'] = \ + "git describe --dirty --tags --long --match 'apache-arrow-[0-9]*.*'" + return parse(root, **kwargs) + __version__ = setuptools_scm.get_version('../', + parse=parse_git) + except ImportError: + __version__ = None + +# ARROW-8684: Disable GC while initializing Cython extension module, +# to workaround Cython bug in https://github.com/cython/cython/issues/3603 +_gc_enabled = _gc.isenabled() +_gc.disable() +import pyarrow.lib as _lib +if _gc_enabled: + _gc.enable() + +from pyarrow.lib import (BuildInfo, RuntimeInfo, set_timezone_db_path, + MonthDayNano, VersionInfo, cpp_build_info, + cpp_version, cpp_version_info, runtime_info, + cpu_count, set_cpu_count, enable_signal_handlers, + io_thread_count, set_io_thread_count) + + +def show_versions(): + """ + Print various version information, to help with error reporting. + """ + def print_entry(label, value): + print(f"{label: <26}: {value: <8}") + + print("pyarrow version info\n--------------------") + print_entry("Package kind", cpp_build_info.package_kind + if len(cpp_build_info.package_kind) > 0 + else "not indicated") + print_entry("Arrow C++ library version", cpp_build_info.version) + print_entry("Arrow C++ compiler", + f"{cpp_build_info.compiler_id} {cpp_build_info.compiler_version}") + print_entry("Arrow C++ compiler flags", cpp_build_info.compiler_flags) + print_entry("Arrow C++ git revision", cpp_build_info.git_id) + print_entry("Arrow C++ git description", cpp_build_info.git_description) + print_entry("Arrow C++ build type", cpp_build_info.build_type) + + +def _module_is_available(module): + try: + _importlib.import_module(f'pyarrow.{module}') + except ImportError: + return False + else: + return True + + +def _filesystem_is_available(fs): + try: + import pyarrow.fs + except ImportError: + return False + + try: + getattr(pyarrow.fs, fs) + except (ImportError, AttributeError): + return False + else: + return True + + +def show_info(): + """ + Print detailed version and platform information, for error reporting + """ + show_versions() + + def print_entry(label, value): + print(f" {label: <20}: {value: <8}") + + print("\nPlatform:") + print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}") + print_entry("SIMD Level", runtime_info().simd_level) + print_entry("Detected SIMD Level", runtime_info().detected_simd_level) + + pool = default_memory_pool() + print("\nMemory:") + print_entry("Default backend", pool.backend_name) + print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes") + print_entry("Max memory", f"{pool.max_memory()} bytes") + print_entry("Supported Backends", ', '.join(supported_memory_backends())) + + print("\nOptional modules:") + modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json", + "orc", "parquet"] + for module in modules: + status = "Enabled" if _module_is_available(module) else "-" + print(f" {module: <20}: {status: <8}") + + print("\nFilesystems:") + filesystems = ["GcsFileSystem", "HadoopFileSystem", "S3FileSystem"] + for fs in filesystems: + status = "Enabled" if _filesystem_is_available(fs) else "-" + print(f" {fs: <20}: {status: <8}") + + print("\nCompression Codecs:") + codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"] + for codec in codecs: + status = "Enabled" if Codec.is_available(codec) else "-" + print(f" {codec: <20}: {status: <8}") + + +from pyarrow.lib import (null, bool_, + int8, int16, int32, int64, + uint8, uint16, uint32, uint64, + time32, time64, timestamp, date32, date64, duration, + month_day_nano_interval, + float16, float32, float64, + binary, string, utf8, + large_binary, large_string, large_utf8, + decimal128, decimal256, + list_, large_list, map_, struct, + union, sparse_union, dense_union, + dictionary, + run_end_encoded, + fixed_shape_tensor, + field, + type_for_alias, + DataType, DictionaryType, StructType, + ListType, LargeListType, MapType, FixedSizeListType, + UnionType, SparseUnionType, DenseUnionType, + TimestampType, Time32Type, Time64Type, DurationType, + FixedSizeBinaryType, Decimal128Type, Decimal256Type, + BaseExtensionType, ExtensionType, + RunEndEncodedType, FixedShapeTensorType, + PyExtensionType, UnknownExtensionType, + register_extension_type, unregister_extension_type, + DictionaryMemo, + KeyValueMetadata, + Field, + Schema, + schema, + unify_schemas, + Array, Tensor, + array, chunked_array, record_batch, nulls, repeat, + SparseCOOTensor, SparseCSRMatrix, SparseCSCMatrix, + SparseCSFTensor, + infer_type, from_numpy_dtype, + NullArray, + NumericArray, IntegerArray, FloatingPointArray, + BooleanArray, + Int8Array, UInt8Array, + Int16Array, UInt16Array, + Int32Array, UInt32Array, + Int64Array, UInt64Array, + HalfFloatArray, FloatArray, DoubleArray, + ListArray, LargeListArray, MapArray, + FixedSizeListArray, UnionArray, + BinaryArray, StringArray, + LargeBinaryArray, LargeStringArray, + FixedSizeBinaryArray, + DictionaryArray, + Date32Array, Date64Array, TimestampArray, + Time32Array, Time64Array, DurationArray, + MonthDayNanoIntervalArray, + Decimal128Array, Decimal256Array, StructArray, ExtensionArray, + RunEndEncodedArray, FixedShapeTensorArray, + scalar, NA, _NULL as NULL, Scalar, + NullScalar, BooleanScalar, + Int8Scalar, Int16Scalar, Int32Scalar, Int64Scalar, + UInt8Scalar, UInt16Scalar, UInt32Scalar, UInt64Scalar, + HalfFloatScalar, FloatScalar, DoubleScalar, + Decimal128Scalar, Decimal256Scalar, + ListScalar, LargeListScalar, FixedSizeListScalar, + Date32Scalar, Date64Scalar, + Time32Scalar, Time64Scalar, + TimestampScalar, DurationScalar, + MonthDayNanoIntervalScalar, + BinaryScalar, LargeBinaryScalar, + StringScalar, LargeStringScalar, + FixedSizeBinaryScalar, DictionaryScalar, + MapScalar, StructScalar, UnionScalar, + RunEndEncodedScalar, ExtensionScalar) + +# Buffers, allocation +from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer, + Codec, compress, decompress, allocate_buffer) + +from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool, + total_allocated_bytes, set_memory_pool, + default_memory_pool, system_memory_pool, + jemalloc_memory_pool, mimalloc_memory_pool, + logging_memory_pool, proxy_memory_pool, + log_memory_allocations, jemalloc_set_decay_ms, + supported_memory_backends) + +# I/O +from pyarrow.lib import (NativeFile, PythonFile, + BufferedInputStream, BufferedOutputStream, CacheOptions, + CompressedInputStream, CompressedOutputStream, + TransformInputStream, transcoding_input_stream, + FixedSizeBufferWriter, + BufferReader, BufferOutputStream, + OSFile, MemoryMappedFile, memory_map, + create_memory_map, MockOutputStream, + input_stream, output_stream) + +from pyarrow._hdfsio import HdfsFile, have_libhdfs + +from pyarrow.lib import (ChunkedArray, RecordBatch, Table, table, + concat_arrays, concat_tables, TableGroupBy, + RecordBatchReader) + +# Exceptions +from pyarrow.lib import (ArrowCancelled, + ArrowCapacityError, + ArrowException, + ArrowKeyError, + ArrowIndexError, + ArrowInvalid, + ArrowIOError, + ArrowMemoryError, + ArrowNotImplementedError, + ArrowTypeError, + ArrowSerializationError) + +import pyarrow.hdfs as hdfs + +from pyarrow.ipc import serialize_pandas, deserialize_pandas +import pyarrow.ipc as ipc + +import pyarrow.types as types + + +# deprecated top-level access + + +from pyarrow.filesystem import FileSystem as _FileSystem +from pyarrow.filesystem import LocalFileSystem as _LocalFileSystem +from pyarrow.hdfs import HadoopFileSystem as _HadoopFileSystem + + +_localfs = _LocalFileSystem._get_instance() + + +_msg = ( + "pyarrow.{0} is deprecated as of 2.0.0, please use pyarrow.fs.{1} instead." +) + +_serialization_msg = ( + "'pyarrow.{0}' is deprecated and will be removed in a future version. " + "Use pickle or the pyarrow IPC functionality instead." +) + +_deprecated = { + "localfs": (_localfs, "LocalFileSystem"), + "FileSystem": (_FileSystem, "FileSystem"), + "LocalFileSystem": (_LocalFileSystem, "LocalFileSystem"), + "HadoopFileSystem": (_HadoopFileSystem, "HadoopFileSystem"), +} + + +def __getattr__(name): + if name in _deprecated: + obj, new_name = _deprecated[name] + _warnings.warn(_msg.format(name, new_name), + FutureWarning, stacklevel=2) + return obj + + raise AttributeError( + "module 'pyarrow' has no attribute '{0}'".format(name) + ) + + +# ---------------------------------------------------------------------- +# Deprecations + +from pyarrow.util import _deprecate_api, _deprecate_class + + +# TODO: Deprecate these somehow in the pyarrow namespace +from pyarrow.ipc import (Message, MessageReader, MetadataVersion, + RecordBatchFileReader, RecordBatchFileWriter, + RecordBatchStreamReader, RecordBatchStreamWriter) + +# ---------------------------------------------------------------------- +# Returning absolute path to the pyarrow include directory (if bundled, e.g. in +# wheels) + + +def get_include(): + """ + Return absolute path to directory containing Arrow C++ include + headers. Similar to numpy.get_include + """ + return _os.path.join(_os.path.dirname(__file__), 'include') + + +def _get_pkg_config_executable(): + return _os.environ.get('PKG_CONFIG', 'pkg-config') + + +def _has_pkg_config(pkgname): + import subprocess + try: + return subprocess.call([_get_pkg_config_executable(), + '--exists', pkgname]) == 0 + except FileNotFoundError: + return False + + +def _read_pkg_config_variable(pkgname, cli_args): + import subprocess + cmd = [_get_pkg_config_executable(), pkgname] + cli_args + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = proc.communicate() + if proc.returncode != 0: + raise RuntimeError("pkg-config failed: " + err.decode('utf8')) + return out.rstrip().decode('utf8') + + +def get_libraries(): + """ + Return list of library names to include in the `libraries` argument for C + or Cython extensions using pyarrow + """ + return ['arrow_python', 'arrow'] + + +def create_library_symlinks(): + """ + With Linux and macOS wheels, the bundled shared libraries have an embedded + ABI version like libarrow.so.17 or libarrow.17.dylib and so linking to them + with -larrow won't work unless we create symlinks at locations like + site-packages/pyarrow/libarrow.so. This unfortunate workaround addresses + prior problems we had with shipping two copies of the shared libraries to + permit third party projects like turbodbc to build their C++ extensions + against the pyarrow wheels. + + This function must only be invoked once and only when the shared libraries + are bundled with the Python package, which should only apply to wheel-based + installs. It requires write access to the site-packages/pyarrow directory + and so depending on your system may need to be run with root. + """ + import glob + if _sys.platform == 'win32': + return + package_cwd = _os.path.dirname(__file__) + + if _sys.platform == 'linux': + bundled_libs = glob.glob(_os.path.join(package_cwd, '*.so.*')) + + def get_symlink_path(hard_path): + return hard_path.rsplit('.', 1)[0] + else: + bundled_libs = glob.glob(_os.path.join(package_cwd, '*.*.dylib')) + + def get_symlink_path(hard_path): + return '.'.join((hard_path.rsplit('.', 2)[0], 'dylib')) + + for lib_hard_path in bundled_libs: + symlink_path = get_symlink_path(lib_hard_path) + if _os.path.exists(symlink_path): + continue + try: + _os.symlink(lib_hard_path, symlink_path) + except PermissionError: + print("Tried creating symlink {}. If you need to link to " + "bundled shared libraries, run " + "pyarrow.create_library_symlinks() as root") + + +def get_library_dirs(): + """ + Return lists of directories likely to contain Arrow C++ libraries for + linking C or Cython extensions using pyarrow + """ + package_cwd = _os.path.dirname(__file__) + library_dirs = [package_cwd] + + def append_library_dir(library_dir): + if library_dir not in library_dirs: + library_dirs.append(library_dir) + + # Search library paths via pkg-config. This is necessary if the user + # installed libarrow and the other shared libraries manually and they + # are not shipped inside the pyarrow package (see also ARROW-2976). + pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config' + for pkgname in ["arrow", "arrow_python"]: + if _has_pkg_config(pkgname): + library_dir = _read_pkg_config_variable(pkgname, + ["--libs-only-L"]) + # pkg-config output could be empty if Arrow is installed + # as a system package. + if library_dir: + if not library_dir.startswith("-L"): + raise ValueError( + "pkg-config --libs-only-L returned unexpected " + "value {!r}".format(library_dir)) + append_library_dir(library_dir[2:]) + + if _sys.platform == 'win32': + # TODO(wesm): Is this necessary, or does setuptools within a conda + # installation add Library\lib to the linker path for MSVC? + python_base_install = _os.path.dirname(_sys.executable) + library_dir = _os.path.join(python_base_install, 'Library', 'lib') + + if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')): + append_library_dir(library_dir) + + # ARROW-4074: Allow for ARROW_HOME to be set to some other directory + if _os.environ.get('ARROW_HOME'): + append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib')) + else: + # Python wheels bundle the Arrow libraries in the pyarrow directory. + append_library_dir(_os.path.dirname(_os.path.abspath(__file__))) + + return library_dirs diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx new file mode 100644 index 0000000000000000000000000000000000000000..11a7174eb3c9de5304b3bcbb5c7a20d79b5e83c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.pyx @@ -0,0 +1,170 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for Parquet encryption.""" + +from pyarrow.includes.libarrow_dataset_parquet cimport * +from pyarrow._parquet_encryption cimport * +from pyarrow._dataset_parquet cimport ParquetFragmentScanOptions, ParquetFileWriteOptions + + +cdef class ParquetEncryptionConfig(_Weakrefable): + """ + Core configuration class encapsulating parameters for high-level encryption + within the Parquet framework. + + The ParquetEncryptionConfig class serves as a bridge for passing encryption-related + parameters to the appropriate components within the Parquet library. It maintains references + to objects that define the encryption strategy, Key Management Service (KMS) configuration, + and specific encryption configurations for Parquet data. + + Parameters + ---------- + crypto_factory : pyarrow.parquet.encryption.CryptoFactory + Shared pointer to a `CryptoFactory` object. The `CryptoFactory` is responsible for + creating cryptographic components, such as encryptors and decryptors. + kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig + Shared pointer to a `KmsConnectionConfig` object. This object holds the configuration + parameters necessary for connecting to a Key Management Service (KMS). + encryption_config : pyarrow.parquet.encryption.EncryptionConfiguration + Shared pointer to an `EncryptionConfiguration` object. This object defines specific + encryption settings for Parquet data, including the keys assigned to different columns. + + Raises + ------ + ValueError + Raised if `encryption_config` is None. + """ + cdef: + shared_ptr[CParquetEncryptionConfig] c_config + + # Avoid mistakenly creating attributes + __slots__ = () + + def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config, + EncryptionConfiguration encryption_config): + + cdef shared_ptr[CEncryptionConfiguration] c_encryption_config + + if crypto_factory is None: + raise ValueError("crypto_factory cannot be None") + + if kms_connection_config is None: + raise ValueError("kms_connection_config cannot be None") + + if encryption_config is None: + raise ValueError("encryption_config cannot be None") + + self.c_config.reset(new CParquetEncryptionConfig()) + + c_encryption_config = pyarrow_unwrap_encryptionconfig( + encryption_config) + + self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory) + self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig( + kms_connection_config) + self.c_config.get().encryption_config = c_encryption_config + + @staticmethod + cdef wrap(shared_ptr[CParquetEncryptionConfig] c_config): + cdef ParquetEncryptionConfig python_config = ParquetEncryptionConfig.__new__(ParquetEncryptionConfig) + python_config.c_config = c_config + return python_config + + cdef shared_ptr[CParquetEncryptionConfig] unwrap(self): + return self.c_config + + +cdef class ParquetDecryptionConfig(_Weakrefable): + """ + Core configuration class encapsulating parameters for high-level decryption + within the Parquet framework. + + ParquetDecryptionConfig is designed to pass decryption-related parameters to + the appropriate decryption components within the Parquet library. It holds references to + objects that define the decryption strategy, Key Management Service (KMS) configuration, + and specific decryption configurations for reading encrypted Parquet data. + + Parameters + ---------- + crypto_factory : pyarrow.parquet.encryption.CryptoFactory + Shared pointer to a `CryptoFactory` object, pivotal in creating cryptographic + components for the decryption process. + kms_connection_config : pyarrow.parquet.encryption.KmsConnectionConfig + Shared pointer to a `KmsConnectionConfig` object, containing parameters necessary + for connecting to a Key Management Service (KMS) during decryption. + decryption_config : pyarrow.parquet.encryption.DecryptionConfiguration + Shared pointer to a `DecryptionConfiguration` object, specifying decryption settings + for reading encrypted Parquet data. + + Raises + ------ + ValueError + Raised if `decryption_config` is None. + """ + + cdef: + shared_ptr[CParquetDecryptionConfig] c_config + + # Avoid mistakingly creating attributes + __slots__ = () + + def __cinit__(self, CryptoFactory crypto_factory, KmsConnectionConfig kms_connection_config, + DecryptionConfiguration decryption_config): + + cdef shared_ptr[CDecryptionConfiguration] c_decryption_config + + if decryption_config is None: + raise ValueError( + "decryption_config cannot be None") + + self.c_config.reset(new CParquetDecryptionConfig()) + + c_decryption_config = pyarrow_unwrap_decryptionconfig( + decryption_config) + + self.c_config.get().crypto_factory = pyarrow_unwrap_cryptofactory(crypto_factory) + self.c_config.get().kms_connection_config = pyarrow_unwrap_kmsconnectionconfig( + kms_connection_config) + self.c_config.get().decryption_config = c_decryption_config + + @staticmethod + cdef wrap(shared_ptr[CParquetDecryptionConfig] c_config): + cdef ParquetDecryptionConfig python_config = ParquetDecryptionConfig.__new__(ParquetDecryptionConfig) + python_config.c_config = c_config + return python_config + + cdef shared_ptr[CParquetDecryptionConfig] unwrap(self): + return self.c_config + + +def set_encryption_config( + ParquetFileWriteOptions opts not None, + ParquetEncryptionConfig config not None +): + cdef shared_ptr[CParquetEncryptionConfig] c_config = config.unwrap() + opts.parquet_options.parquet_encryption_config = c_config + + +def set_decryption_config( + ParquetFragmentScanOptions opts not None, + ParquetDecryptionConfig config not None +): + cdef shared_ptr[CParquetDecryptionConfig] c_config = config.unwrap() + opts.parquet_options.parquet_decryption_config = c_config diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..395f48814433138ca54a2465a1aea7ec84ef7b6e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pyx @@ -0,0 +1,1631 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cpython.datetime cimport datetime, PyDateTime_DateTime +from cython cimport binding + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_python cimport PyDateTime_to_TimePoint +from pyarrow.lib import _detect_compression, frombytes, tobytes +from pyarrow.lib cimport * +from pyarrow.util import _stringify_path + +from abc import ABC, abstractmethod +from datetime import datetime, timezone +import os +import pathlib +import sys + + +cdef _init_ca_paths(): + cdef CFileSystemGlobalOptions options + + import ssl + paths = ssl.get_default_verify_paths() + if paths.cafile: + options.tls_ca_file_path = os.fsencode(paths.cafile) + if paths.capath: + options.tls_ca_dir_path = os.fsencode(paths.capath) + check_status(CFileSystemsInitialize(options)) + + +if sys.platform == 'linux': + # ARROW-9261: On Linux, we may need to fixup the paths to TLS CA certs + # (especially in manylinux packages) since the values hardcoded at + # compile-time in libcurl may be wrong. + _init_ca_paths() + + +cdef inline c_string _path_as_bytes(path) except *: + # handle only abstract paths, not bound to any filesystem like pathlib is, + # so we only accept plain strings + if not isinstance(path, (bytes, str)): + raise TypeError('Path must be a string') + # tobytes always uses utf-8, which is more or less ok, at least on Windows + # since the C++ side then decodes from utf-8. On Unix, os.fsencode may be + # better. + return tobytes(path) + + +cdef object _wrap_file_type(CFileType ty): + return FileType( ty) + + +cdef CFileType _unwrap_file_type(FileType ty) except *: + if ty == FileType.Unknown: + return CFileType_Unknown + elif ty == FileType.NotFound: + return CFileType_NotFound + elif ty == FileType.File: + return CFileType_File + elif ty == FileType.Directory: + return CFileType_Directory + assert 0 + + +def _file_type_to_string(ty): + # Python 3.11 changed str(IntEnum) to return the string representation + # of the integer value: https://github.com/python/cpython/issues/94763 + return f"{ty.__class__.__name__}.{ty._name_}" + + +cdef class FileInfo(_Weakrefable): + """ + FileSystem entry info. + + Parameters + ---------- + path : str + The full path to the filesystem entry. + type : FileType + The type of the filesystem entry. + mtime : datetime or float, default None + If given, the modification time of the filesystem entry. + If a float is given, it is the number of seconds since the + Unix epoch. + mtime_ns : int, default None + If given, the modification time of the filesystem entry, + in nanoseconds since the Unix epoch. + `mtime` and `mtime_ns` are mutually exclusive. + size : int, default None + If given, the filesystem entry size in bytes. This should only + be given if `type` is `FileType.File`. + + Examples + -------- + Generate a file: + + >>> from pyarrow import fs + >>> local = fs.LocalFileSystem() + >>> path_fs = local_path + '/pyarrow-fs-example.dat' + >>> with local.open_output_stream(path_fs) as stream: + ... stream.write(b'data') + 4 + + Get FileInfo object using ``get_file_info()``: + + >>> file_info = local.get_file_info(path_fs) + >>> file_info + + + Inspect FileInfo attributes: + + >>> file_info.type + + + >>> file_info.is_file + True + + >>> file_info.path + '/.../pyarrow-fs-example.dat' + + >>> file_info.base_name + 'pyarrow-fs-example.dat' + + >>> file_info.size + 4 + + >>> file_info.extension + 'dat' + + >>> file_info.mtime # doctest: +SKIP + datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc) + + >>> file_info.mtime_ns # doctest: +SKIP + 1656489370873922073 + """ + + def __init__(self, path, FileType type=FileType.Unknown, *, + mtime=None, mtime_ns=None, size=None): + self.info.set_path(tobytes(path)) + self.info.set_type(_unwrap_file_type(type)) + if mtime is not None: + if mtime_ns is not None: + raise TypeError("Only one of mtime and mtime_ns " + "can be given") + if isinstance(mtime, datetime): + self.info.set_mtime(PyDateTime_to_TimePoint( + mtime)) + else: + self.info.set_mtime(TimePoint_from_s(mtime)) + elif mtime_ns is not None: + self.info.set_mtime(TimePoint_from_ns(mtime_ns)) + if size is not None: + self.info.set_size(size) + + @staticmethod + cdef wrap(CFileInfo info): + cdef FileInfo self = FileInfo.__new__(FileInfo) + self.info = move(info) + return self + + cdef inline CFileInfo unwrap(self) nogil: + return self.info + + @staticmethod + cdef CFileInfo unwrap_safe(obj): + if not isinstance(obj, FileInfo): + raise TypeError("Expected FileInfo instance, got {0}" + .format(type(obj))) + return ( obj).unwrap() + + def __repr__(self): + def getvalue(attr): + try: + return getattr(self, attr) + except ValueError: + return '' + + s = (f'>> file_info = local.get_file_info(path) + >>> file_info.path + '/.../pyarrow-fs-example.dat' + """ + return frombytes(self.info.path()) + + @property + def base_name(self): + """ + The file base name. + + Component after the last directory separator. + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.base_name + 'pyarrow-fs-example.dat' + """ + return frombytes(self.info.base_name()) + + @property + def size(self): + """ + The size in bytes, if available. + + Only regular files are guaranteed to have a size. + + Returns + ------- + size : int or None + """ + cdef int64_t size + size = self.info.size() + return (size if size != -1 else None) + + @property + def extension(self): + """ + The file extension. + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.extension + 'dat' + """ + return frombytes(self.info.extension()) + + @property + def mtime(self): + """ + The time of last modification, if available. + + Returns + ------- + mtime : datetime.datetime or None + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.mtime # doctest: +SKIP + datetime.datetime(2022, 6, 29, 7, 56, 10, 873922, tzinfo=datetime.timezone.utc) + """ + cdef int64_t nanoseconds + nanoseconds = TimePoint_to_ns(self.info.mtime()) + return (datetime.fromtimestamp(nanoseconds / 1.0e9, timezone.utc) + if nanoseconds != -1 else None) + + @property + def mtime_ns(self): + """ + The time of last modification, if available, expressed in nanoseconds + since the Unix epoch. + + Returns + ------- + mtime_ns : int or None + + Examples + -------- + >>> file_info = local.get_file_info(path) + >>> file_info.mtime_ns # doctest: +SKIP + 1656489370873922073 + """ + cdef int64_t nanoseconds + nanoseconds = TimePoint_to_ns(self.info.mtime()) + return (nanoseconds if nanoseconds != -1 else None) + + +cdef class FileSelector(_Weakrefable): + """ + File and directory selector. + + It contains a set of options that describes how to search for files and + directories. + + Parameters + ---------- + base_dir : str + The directory in which to select files. Relative paths also work, use + '.' for the current directory and '..' for the parent. + allow_not_found : bool, default False + The behavior if `base_dir` doesn't exist in the filesystem. + If false, an error is returned. + If true, an empty selection is returned. + recursive : bool, default False + Whether to recurse into subdirectories. + + Examples + -------- + List the contents of a directory and subdirectories: + + >>> selector_1 = fs.FileSelector(local_path, recursive=True) + >>> local.get_file_info(selector_1) # doctest: +SKIP + [, + , + ] + + List only the contents of the base directory: + + >>> selector_2 = fs.FileSelector(local_path) + >>> local.get_file_info(selector_2) # doctest: +SKIP + [, + ] + + Return empty selection if the directory doesn't exist: + + >>> selector_not_found = fs.FileSelector(local_path + '/missing', + ... recursive=True, + ... allow_not_found=True) + >>> local.get_file_info(selector_not_found) + [] + """ + + def __init__(self, base_dir, bint allow_not_found=False, + bint recursive=False): + self.base_dir = base_dir + self.recursive = recursive + self.allow_not_found = allow_not_found + + @staticmethod + cdef FileSelector wrap(CFileSelector wrapped): + cdef FileSelector self = FileSelector.__new__(FileSelector) + self.selector = move(wrapped) + return self + + cdef inline CFileSelector unwrap(self) nogil: + return self.selector + + @property + def base_dir(self): + return frombytes(self.selector.base_dir) + + @base_dir.setter + def base_dir(self, base_dir): + self.selector.base_dir = _path_as_bytes(base_dir) + + @property + def allow_not_found(self): + return self.selector.allow_not_found + + @allow_not_found.setter + def allow_not_found(self, bint allow_not_found): + self.selector.allow_not_found = allow_not_found + + @property + def recursive(self): + return self.selector.recursive + + @recursive.setter + def recursive(self, bint recursive): + self.selector.recursive = recursive + + def __repr__(self): + return ("".format(self)) + + +cdef class FileSystem(_Weakrefable): + """ + Abstract file system API. + """ + + def __init__(self): + raise TypeError("FileSystem is an abstract class, instantiate one of " + "the subclasses instead: LocalFileSystem or " + "SubTreeFileSystem") + + @staticmethod + def from_uri(uri): + """ + Create a new FileSystem from URI or Path. + + Recognized URI schemes are "file", "mock", "s3fs", "gs", "gcs", "hdfs" and "viewfs". + In addition, the argument can be a pathlib.Path object, or a string + describing an absolute local path. + + Parameters + ---------- + uri : string + URI-based path, for example: file:///some/local/path. + + Returns + ------- + tuple of (FileSystem, str path) + With (filesystem, path) tuple where path is the abstract path + inside the FileSystem instance. + + Examples + -------- + Create a new FileSystem subclass from a URI: + + >>> uri = 'file:///{}/pyarrow-fs-example.dat'.format(local_path) + >>> local_new, path_new = fs.FileSystem.from_uri(uri) + >>> local_new + >> path_new + '/.../pyarrow-fs-example.dat' + + Or from a s3 bucket: + + >>> fs.FileSystem.from_uri("s3://usgs-landsat/collection02/") + (, 'usgs-landsat/collection02') + """ + cdef: + c_string c_path + c_string c_uri + CResult[shared_ptr[CFileSystem]] result + + if isinstance(uri, pathlib.Path): + # Make absolute + uri = uri.resolve().absolute() + c_uri = tobytes(_stringify_path(uri)) + with nogil: + result = CFileSystemFromUriOrPath(c_uri, &c_path) + return FileSystem.wrap(GetResultValue(result)), frombytes(c_path) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + self.wrapped = wrapped + self.fs = wrapped.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileSystem]& sp): + cdef FileSystem self + + typ = frombytes(sp.get().type_name()) + if typ == 'local': + self = LocalFileSystem.__new__(LocalFileSystem) + elif typ == 'mock': + self = _MockFileSystem.__new__(_MockFileSystem) + elif typ == 'subtree': + self = SubTreeFileSystem.__new__(SubTreeFileSystem) + elif typ == 's3': + from pyarrow._s3fs import S3FileSystem + self = S3FileSystem.__new__(S3FileSystem) + elif typ == 'gcs': + from pyarrow._gcsfs import GcsFileSystem + self = GcsFileSystem.__new__(GcsFileSystem) + elif typ == 'hdfs': + from pyarrow._hdfs import HadoopFileSystem + self = HadoopFileSystem.__new__(HadoopFileSystem) + elif typ.startswith('py::'): + self = PyFileSystem.__new__(PyFileSystem) + else: + raise TypeError('Cannot wrap FileSystem pointer') + + self.init(sp) + return self + + cdef inline shared_ptr[CFileSystem] unwrap(self) nogil: + return self.wrapped + + def equals(self, FileSystem other not None): + """ + Parameters + ---------- + other : pyarrow.fs.FileSystem + + Returns + ------- + bool + """ + return self.fs.Equals(other.unwrap()) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + @property + def type_name(self): + """ + The filesystem's type name. + """ + return frombytes(self.fs.type_name()) + + def get_file_info(self, paths_or_selector): + """ + Get info for the given files. + + Any symlink is automatically dereferenced, recursively. A non-existing + or unreachable file returns a FileStat object and has a FileType of + value NotFound. An exception indicates a truly exceptional condition + (low-level I/O error, etc.). + + Parameters + ---------- + paths_or_selector : FileSelector, path-like or list of path-likes + Either a selector object, a path-like object or a list of + path-like objects. The selector's base directory will not be + part of the results, even if it exists. If it doesn't exist, + use `allow_not_found`. + + Returns + ------- + FileInfo or list of FileInfo + Single FileInfo object is returned for a single path, otherwise + a list of FileInfo objects is returned. + + Examples + -------- + >>> local + + >>> local.get_file_info("/{}/pyarrow-fs-example.dat".format(local_path)) + + """ + cdef: + CFileInfo info + c_string path + vector[CFileInfo] infos + vector[c_string] paths + CFileSelector selector + + if isinstance(paths_or_selector, FileSelector): + with nogil: + selector = (paths_or_selector).selector + infos = GetResultValue(self.fs.GetFileInfo(selector)) + elif isinstance(paths_or_selector, (list, tuple)): + paths = [_path_as_bytes(s) for s in paths_or_selector] + with nogil: + infos = GetResultValue(self.fs.GetFileInfo(paths)) + elif isinstance(paths_or_selector, (bytes, str)): + path =_path_as_bytes(paths_or_selector) + with nogil: + info = GetResultValue(self.fs.GetFileInfo(path)) + return FileInfo.wrap(info) + else: + raise TypeError('Must pass either path(s) or a FileSelector') + + return [FileInfo.wrap(info) for info in infos] + + def create_dir(self, path, *, bint recursive=True): + """ + Create a directory and subdirectories. + + This function succeeds if the directory already exists. + + Parameters + ---------- + path : str + The path of the new directory. + recursive : bool, default True + Create nested directories as well. + """ + cdef c_string directory = _path_as_bytes(path) + with nogil: + check_status(self.fs.CreateDir(directory, recursive=recursive)) + + def delete_dir(self, path): + """ + Delete a directory and its contents, recursively. + + Parameters + ---------- + path : str + The path of the directory to be deleted. + """ + cdef c_string directory = _path_as_bytes(path) + with nogil: + check_status(self.fs.DeleteDir(directory)) + + def delete_dir_contents(self, path, *, + bint accept_root_dir=False, + bint missing_dir_ok=False): + """ + Delete a directory's contents, recursively. + + Like delete_dir, but doesn't delete the directory itself. + + Parameters + ---------- + path : str + The path of the directory to be deleted. + accept_root_dir : boolean, default False + Allow deleting the root directory's contents + (if path is empty or "/") + missing_dir_ok : boolean, default False + If False then an error is raised if path does + not exist + """ + cdef c_string directory = _path_as_bytes(path) + if accept_root_dir and directory.strip(b"/") == b"": + with nogil: + check_status(self.fs.DeleteRootDirContents()) + else: + with nogil: + check_status(self.fs.DeleteDirContents(directory, + missing_dir_ok)) + + def move(self, src, dest): + """ + Move / rename a file or directory. + + If the destination exists: + - if it is a non-empty directory, an error is returned + - otherwise, if it has the same type as the source, it is replaced + - otherwise, behavior is unspecified (implementation-dependent). + + Parameters + ---------- + src : str + The path of the file or the directory to be moved. + dest : str + The destination path where the file or directory is moved to. + + Examples + -------- + Create a new folder with a file: + + >>> local.create_dir('/tmp/other_dir') + >>> local.copy_file(path,'/tmp/move_example.dat') + + Move the file: + + >>> local.move('/tmp/move_example.dat', + ... '/tmp/other_dir/move_example_2.dat') + + Inspect the file info: + + >>> local.get_file_info('/tmp/other_dir/move_example_2.dat') + + >>> local.get_file_info('/tmp/move_example.dat') + + + Delete the folder: + >>> local.delete_dir('/tmp/other_dir') + """ + cdef: + c_string source = _path_as_bytes(src) + c_string destination = _path_as_bytes(dest) + with nogil: + check_status(self.fs.Move(source, destination)) + + def copy_file(self, src, dest): + """ + Copy a file. + + If the destination exists and is a directory, an error is returned. + Otherwise, it is replaced. + + Parameters + ---------- + src : str + The path of the file to be copied from. + dest : str + The destination path where the file is copied to. + + Examples + -------- + >>> local.copy_file(path, + ... local_path + '/pyarrow-fs-example_copy.dat') + + Inspect the file info: + + >>> local.get_file_info(local_path + '/pyarrow-fs-example_copy.dat') + + >>> local.get_file_info(path) + + """ + cdef: + c_string source = _path_as_bytes(src) + c_string destination = _path_as_bytes(dest) + with nogil: + check_status(self.fs.CopyFile(source, destination)) + + def delete_file(self, path): + """ + Delete a file. + + Parameters + ---------- + path : str + The path of the file to be deleted. + """ + cdef c_string file = _path_as_bytes(path) + with nogil: + check_status(self.fs.DeleteFile(file)) + + def _wrap_input_stream(self, stream, path, compression, buffer_size): + if buffer_size is not None and buffer_size != 0: + stream = BufferedInputStream(stream, buffer_size) + if compression == 'detect': + compression = _detect_compression(path) + if compression is not None: + stream = CompressedInputStream(stream, compression) + return stream + + def _wrap_output_stream(self, stream, path, compression, buffer_size): + if buffer_size is not None and buffer_size != 0: + stream = BufferedOutputStream(stream, buffer_size) + if compression == 'detect': + compression = _detect_compression(path) + if compression is not None: + stream = CompressedOutputStream(stream, compression) + return stream + + def open_input_file(self, path): + """ + Open an input file for random access reading. + + Parameters + ---------- + path : str + The source to open for reading. + + Returns + ------- + stream : NativeFile + + Examples + -------- + Print the data from the file with `open_input_file()`: + + >>> with local.open_input_file(path) as f: + ... print(f.readall()) + b'data' + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[CRandomAccessFile] in_handle + + with nogil: + in_handle = GetResultValue(self.fs.OpenInputFile(pathstr)) + + stream.set_random_access_file(in_handle) + stream.is_readable = True + return stream + + def open_input_stream(self, path, compression='detect', buffer_size=None): + """ + Open an input stream for sequential reading. + + Parameters + ---------- + path : str + The source to open for reading. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly decompression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. Otherwise, a well-known + algorithm name must be supplied (e.g. "gzip"). + buffer_size : int optional, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary read buffer. + + Returns + ------- + stream : NativeFile + + Examples + -------- + Print the data from the file with `open_input_stream()`: + + >>> with local.open_input_stream(path) as f: + ... print(f.readall()) + b'data' + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[CInputStream] in_handle + + with nogil: + in_handle = GetResultValue(self.fs.OpenInputStream(pathstr)) + + stream.set_input_stream(in_handle) + stream.is_readable = True + + return self._wrap_input_stream( + stream, path=path, compression=compression, buffer_size=buffer_size + ) + + def open_output_stream(self, path, compression='detect', + buffer_size=None, metadata=None): + """ + Open an output stream for sequential writing. + + If the target already exists, existing data is truncated. + + Parameters + ---------- + path : str + The source to open for writing. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly compression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. Otherwise, a well-known + algorithm name must be supplied (e.g. "gzip"). + buffer_size : int optional, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary write buffer. + metadata : dict optional, default None + If not None, a mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + Unsupported metadata keys will be ignored. + + Returns + ------- + stream : NativeFile + + Examples + -------- + >>> local = fs.LocalFileSystem() + >>> with local.open_output_stream(path) as stream: + ... stream.write(b'data') + 4 + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[COutputStream] out_handle + shared_ptr[const CKeyValueMetadata] c_metadata + + if metadata is not None: + c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata)) + + with nogil: + out_handle = GetResultValue( + self.fs.OpenOutputStream(pathstr, c_metadata)) + + stream.set_output_stream(out_handle) + stream.is_writable = True + + return self._wrap_output_stream( + stream, path=path, compression=compression, buffer_size=buffer_size + ) + + def open_append_stream(self, path, compression='detect', + buffer_size=None, metadata=None): + """ + Open an output stream for appending. + + If the target doesn't exist, a new empty file is created. + + .. note:: + Some filesystem implementations do not support efficient + appending to an existing file, in which case this method will + raise NotImplementedError. + Consider writing to multiple files (using e.g. the dataset layer) + instead. + + Parameters + ---------- + path : str + The source to open for writing. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly compression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. Otherwise, a well-known + algorithm name must be supplied (e.g. "gzip"). + buffer_size : int optional, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary write buffer. + metadata : dict optional, default None + If not None, a mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + Unsupported metadata keys will be ignored. + + Returns + ------- + stream : NativeFile + + Examples + -------- + Append new data to a FileSystem subclass with nonempty file: + + >>> with local.open_append_stream(path) as f: + ... f.write(b'+newly added') + 12 + + Print out the content fo the file: + + >>> with local.open_input_file(path) as f: + ... print(f.readall()) + b'data+newly added' + """ + cdef: + c_string pathstr = _path_as_bytes(path) + NativeFile stream = NativeFile() + shared_ptr[COutputStream] out_handle + shared_ptr[const CKeyValueMetadata] c_metadata + + if metadata is not None: + c_metadata = pyarrow_unwrap_metadata(KeyValueMetadata(metadata)) + + with nogil: + out_handle = GetResultValue( + self.fs.OpenAppendStream(pathstr, c_metadata)) + + stream.set_output_stream(out_handle) + stream.is_writable = True + + return self._wrap_output_stream( + stream, path=path, compression=compression, buffer_size=buffer_size + ) + + def normalize_path(self, path): + """ + Normalize filesystem path. + + Parameters + ---------- + path : str + The path to normalize + + Returns + ------- + normalized_path : str + The normalized path + """ + cdef: + c_string c_path = _path_as_bytes(path) + c_string c_path_normalized + + c_path_normalized = GetResultValue(self.fs.NormalizePath(c_path)) + return frombytes(c_path_normalized) + + +cdef class LocalFileSystem(FileSystem): + """ + A FileSystem implementation accessing files on the local machine. + + Details such as symlinks are abstracted away (symlinks are always followed, + except when deleting an entry). + + Parameters + ---------- + use_mmap : bool, default False + Whether open_input_stream and open_input_file should return + a mmap'ed file or a regular file. + + Examples + -------- + Create a FileSystem object with LocalFileSystem constructor: + + >>> from pyarrow import fs + >>> local = fs.LocalFileSystem() + >>> local + + + and write data on to the file: + + >>> with local.open_output_stream('/tmp/local_fs.dat') as stream: + ... stream.write(b'data') + 4 + >>> with local.open_input_stream('/tmp/local_fs.dat') as stream: + ... print(stream.readall()) + b'data' + + Create a FileSystem object inferred from a URI of the saved file: + + >>> local_new, path = fs.LocalFileSystem().from_uri('/tmp/local_fs.dat') + >>> local_new + >> path + '/tmp/local_fs.dat' + + Check if FileSystems `local` and `local_new` are equal: + + >>> local.equals(local_new) + True + + Compare two different FileSystems: + + >>> local2 = fs.LocalFileSystem(use_mmap=True) + >>> local.equals(local2) + False + + Copy a file and print out the data: + + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/local_fs-copy.dat') + >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as stream: + ... print(stream.readall()) + ... + b'data' + + Open an output stream for appending, add text and print the new data: + + >>> with local.open_append_stream('/tmp/local_fs-copy.dat') as f: + ... f.write(b'+newly added') + 12 + + >>> with local.open_input_stream('/tmp/local_fs-copy.dat') as f: + ... print(f.readall()) + b'data+newly added' + + Create a directory, copy a file into it and then delete the whole directory: + + >>> local.create_dir('/tmp/new_folder') + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat') + >>> local.get_file_info('/tmp/new_folder') + + >>> local.delete_dir('/tmp/new_folder') + >>> local.get_file_info('/tmp/new_folder') + + + Create a directory, copy a file into it and then delete + the content of the directory: + + >>> local.create_dir('/tmp/new_folder') + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat') + >>> local.get_file_info('/tmp/new_folder/local_fs.dat') + + >>> local.delete_dir_contents('/tmp/new_folder') + >>> local.get_file_info('/tmp/new_folder') + + >>> local.get_file_info('/tmp/new_folder/local_fs.dat') + + + Create a directory, copy a file into it and then delete + the file from the directory: + + >>> local.create_dir('/tmp/new_folder') + >>> local.copy_file('/tmp/local_fs.dat', '/tmp/new_folder/local_fs.dat') + >>> local.delete_file('/tmp/new_folder/local_fs.dat') + >>> local.get_file_info('/tmp/new_folder/local_fs.dat') + + >>> local.get_file_info('/tmp/new_folder') + + + Move the file: + + >>> local.move('/tmp/local_fs-copy.dat', '/tmp/new_folder/local_fs-copy.dat') + >>> local.get_file_info('/tmp/new_folder/local_fs-copy.dat') + + >>> local.get_file_info('/tmp/local_fs-copy.dat') + + + To finish delete the file left: + >>> local.delete_file('/tmp/local_fs.dat') + """ + + def __init__(self, *, use_mmap=False): + cdef: + CLocalFileSystemOptions opts + shared_ptr[CLocalFileSystem] fs + + opts = CLocalFileSystemOptions.Defaults() + opts.use_mmap = use_mmap + + fs = make_shared[CLocalFileSystem](opts) + self.init( fs) + + cdef init(self, const shared_ptr[CFileSystem]& c_fs): + FileSystem.init(self, c_fs) + self.localfs = c_fs.get() + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return LocalFileSystem(**kwargs) + + def __reduce__(self): + cdef CLocalFileSystemOptions opts = self.localfs.options() + return LocalFileSystem._reconstruct, (dict( + use_mmap=opts.use_mmap),) + + +cdef class SubTreeFileSystem(FileSystem): + """ + Delegates to another implementation after prepending a fixed base path. + + This is useful to expose a logical view of a subtree of a filesystem, + for example a directory in a LocalFileSystem. + + Note, that this makes no security guarantee. For example, symlinks may + allow to "escape" the subtree and access other parts of the underlying + filesystem. + + Parameters + ---------- + base_path : str + The root of the subtree. + base_fs : FileSystem + FileSystem object the operations delegated to. + + Examples + -------- + Create a LocalFileSystem instance: + + >>> from pyarrow import fs + >>> local = fs.LocalFileSystem() + >>> with local.open_output_stream('/tmp/local_fs.dat') as stream: + ... stream.write(b'data') + 4 + + Create a directory and a SubTreeFileSystem instance: + + >>> local.create_dir('/tmp/sub_tree') + >>> subtree = fs.SubTreeFileSystem('/tmp/sub_tree', local) + + Write data into the existing file: + + >>> with subtree.open_append_stream('sub_tree_fs.dat') as f: + ... f.write(b'+newly added') + 12 + + Print out the attributes: + + >>> subtree.base_fs + + >>> subtree.base_path + '/tmp/sub_tree/' + + Get info for the given directory or given file: + + >>> subtree.get_file_info('') + + >>> subtree.get_file_info('sub_tree_fs.dat') + + + Delete the file and directory: + + >>> subtree.delete_file('sub_tree_fs.dat') + >>> local.delete_dir('/tmp/sub_tree') + >>> local.delete_file('/tmp/local_fs.dat') + + For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`. + """ + + def __init__(self, base_path, FileSystem base_fs): + cdef: + c_string pathstr + shared_ptr[CSubTreeFileSystem] wrapped + + pathstr = _path_as_bytes(base_path) + wrapped = make_shared[CSubTreeFileSystem](pathstr, base_fs.wrapped) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.subtreefs = wrapped.get() + + def __repr__(self): + return ("SubTreeFileSystem(base_path={}, base_fs={}" + .format(self.base_path, self.base_fs)) + + def __reduce__(self): + return SubTreeFileSystem, ( + frombytes(self.subtreefs.base_path()), + FileSystem.wrap(self.subtreefs.base_fs()) + ) + + @property + def base_path(self): + return frombytes(self.subtreefs.base_path()) + + @property + def base_fs(self): + return FileSystem.wrap(self.subtreefs.base_fs()) + + +cdef class _MockFileSystem(FileSystem): + + def __init__(self, datetime current_time=None): + cdef shared_ptr[CMockFileSystem] wrapped + + current_time = current_time or datetime.now() + wrapped = make_shared[CMockFileSystem]( + PyDateTime_to_TimePoint( current_time) + ) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.mockfs = wrapped.get() + + +cdef class PyFileSystem(FileSystem): + """ + A FileSystem with behavior implemented in Python. + + Parameters + ---------- + handler : FileSystemHandler + The handler object implementing custom filesystem behavior. + + Examples + -------- + Create an fsspec-based filesystem object for GitHub: + + >>> from fsspec.implementations import github + >>> gfs = github.GithubFileSystem('apache', 'arrow') # doctest: +SKIP + + Get a PyArrow FileSystem object: + + >>> from pyarrow.fs import PyFileSystem, FSSpecHandler + >>> pa_fs = PyFileSystem(FSSpecHandler(gfs)) # doctest: +SKIP + + Use :func:`~pyarrow.fs.FileSystem` functionality ``get_file_info()``: + + >>> pa_fs.get_file_info('README.md') # doctest: +SKIP + + """ + + def __init__(self, handler): + cdef: + CPyFileSystemVtable vtable + shared_ptr[CPyFileSystem] wrapped + + if not isinstance(handler, FileSystemHandler): + raise TypeError("Expected a FileSystemHandler instance, got {0}" + .format(type(handler))) + + vtable.get_type_name = _cb_get_type_name + vtable.equals = _cb_equals + vtable.get_file_info = _cb_get_file_info + vtable.get_file_info_vector = _cb_get_file_info_vector + vtable.get_file_info_selector = _cb_get_file_info_selector + vtable.create_dir = _cb_create_dir + vtable.delete_dir = _cb_delete_dir + vtable.delete_dir_contents = _cb_delete_dir_contents + vtable.delete_root_dir_contents = _cb_delete_root_dir_contents + vtable.delete_file = _cb_delete_file + vtable.move = _cb_move + vtable.copy_file = _cb_copy_file + vtable.open_input_stream = _cb_open_input_stream + vtable.open_input_file = _cb_open_input_file + vtable.open_output_stream = _cb_open_output_stream + vtable.open_append_stream = _cb_open_append_stream + vtable.normalize_path = _cb_normalize_path + + wrapped = CPyFileSystem.Make(handler, move(vtable)) + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.pyfs = wrapped.get() + + @property + def handler(self): + """ + The filesystem's underlying handler. + + Returns + ------- + handler : FileSystemHandler + """ + return self.pyfs.handler() + + def __reduce__(self): + return PyFileSystem, (self.handler,) + + +class FileSystemHandler(ABC): + """ + An abstract class exposing methods to implement PyFileSystem's behavior. + """ + + @abstractmethod + def get_type_name(self): + """ + Implement PyFileSystem.type_name. + """ + + @abstractmethod + def get_file_info(self, paths): + """ + Implement PyFileSystem.get_file_info(paths). + + Parameters + ---------- + paths : list of str + paths for which we want to retrieve the info. + """ + + @abstractmethod + def get_file_info_selector(self, selector): + """ + Implement PyFileSystem.get_file_info(selector). + + Parameters + ---------- + selector : FileSelector + selector for which we want to retrieve the info. + """ + + @abstractmethod + def create_dir(self, path, recursive): + """ + Implement PyFileSystem.create_dir(...). + + Parameters + ---------- + path : str + path of the directory. + recursive : bool + if the parent directories should be created too. + """ + + @abstractmethod + def delete_dir(self, path): + """ + Implement PyFileSystem.delete_dir(...). + + Parameters + ---------- + path : str + path of the directory. + """ + + @abstractmethod + def delete_dir_contents(self, path, missing_dir_ok=False): + """ + Implement PyFileSystem.delete_dir_contents(...). + + Parameters + ---------- + path : str + path of the directory. + missing_dir_ok : bool + if False an error should be raised if path does not exist + """ + + @abstractmethod + def delete_root_dir_contents(self): + """ + Implement PyFileSystem.delete_dir_contents("/", accept_root_dir=True). + """ + + @abstractmethod + def delete_file(self, path): + """ + Implement PyFileSystem.delete_file(...). + + Parameters + ---------- + path : str + path of the file. + """ + + @abstractmethod + def move(self, src, dest): + """ + Implement PyFileSystem.move(...). + + Parameters + ---------- + src : str + path of what should be moved. + dest : str + path of where it should be moved to. + """ + + @abstractmethod + def copy_file(self, src, dest): + """ + Implement PyFileSystem.copy_file(...). + + Parameters + ---------- + src : str + path of what should be copied. + dest : str + path of where it should be copied to. + """ + + @abstractmethod + def open_input_stream(self, path): + """ + Implement PyFileSystem.open_input_stream(...). + + Parameters + ---------- + path : str + path of what should be opened. + """ + + @abstractmethod + def open_input_file(self, path): + """ + Implement PyFileSystem.open_input_file(...). + + Parameters + ---------- + path : str + path of what should be opened. + """ + + @abstractmethod + def open_output_stream(self, path, metadata): + """ + Implement PyFileSystem.open_output_stream(...). + + Parameters + ---------- + path : str + path of what should be opened. + metadata : mapping + Mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + """ + + @abstractmethod + def open_append_stream(self, path, metadata): + """ + Implement PyFileSystem.open_append_stream(...). + + Parameters + ---------- + path : str + path of what should be opened. + metadata : mapping + Mapping of string keys to string values. + Some filesystems support storing metadata along the file + (such as "Content-Type"). + """ + + @abstractmethod + def normalize_path(self, path): + """ + Implement PyFileSystem.normalize_path(...). + + Parameters + ---------- + path : str + path of what should be normalized. + """ + +# Callback definitions for CPyFileSystemVtable + + +cdef void _cb_get_type_name(handler, c_string* out) except *: + out[0] = tobytes("py::" + handler.get_type_name()) + +cdef c_bool _cb_equals(handler, const CFileSystem& c_other) except False: + if c_other.type_name().startswith(b"py::"): + return ( c_other).handler() == handler + + return False + +cdef void _cb_get_file_info(handler, const c_string& path, + CFileInfo* out) except *: + infos = handler.get_file_info([frombytes(path)]) + if not isinstance(infos, list) or len(infos) != 1: + raise TypeError("get_file_info should have returned a 1-element list") + out[0] = FileInfo.unwrap_safe(infos[0]) + +cdef void _cb_get_file_info_vector(handler, const vector[c_string]& paths, + vector[CFileInfo]* out) except *: + py_paths = [frombytes(paths[i]) for i in range(len(paths))] + infos = handler.get_file_info(py_paths) + if not isinstance(infos, list): + raise TypeError("get_file_info should have returned a list") + out[0].clear() + out[0].reserve(len(infos)) + for info in infos: + out[0].push_back(FileInfo.unwrap_safe(info)) + +cdef void _cb_get_file_info_selector(handler, const CFileSelector& selector, + vector[CFileInfo]* out) except *: + infos = handler.get_file_info_selector(FileSelector.wrap(selector)) + if not isinstance(infos, list): + raise TypeError("get_file_info_selector should have returned a list") + out[0].clear() + out[0].reserve(len(infos)) + for info in infos: + out[0].push_back(FileInfo.unwrap_safe(info)) + +cdef void _cb_create_dir(handler, const c_string& path, + c_bool recursive) except *: + handler.create_dir(frombytes(path), recursive) + +cdef void _cb_delete_dir(handler, const c_string& path) except *: + handler.delete_dir(frombytes(path)) + +cdef void _cb_delete_dir_contents(handler, const c_string& path, + c_bool missing_dir_ok) except *: + handler.delete_dir_contents(frombytes(path), missing_dir_ok) + +cdef void _cb_delete_root_dir_contents(handler) except *: + handler.delete_root_dir_contents() + +cdef void _cb_delete_file(handler, const c_string& path) except *: + handler.delete_file(frombytes(path)) + +cdef void _cb_move(handler, const c_string& src, + const c_string& dest) except *: + handler.move(frombytes(src), frombytes(dest)) + +cdef void _cb_copy_file(handler, const c_string& src, + const c_string& dest) except *: + handler.copy_file(frombytes(src), frombytes(dest)) + +cdef void _cb_open_input_stream(handler, const c_string& path, + shared_ptr[CInputStream]* out) except *: + stream = handler.open_input_stream(frombytes(path)) + if not isinstance(stream, NativeFile): + raise TypeError("open_input_stream should have returned " + "a PyArrow file") + out[0] = ( stream).get_input_stream() + +cdef void _cb_open_input_file(handler, const c_string& path, + shared_ptr[CRandomAccessFile]* out) except *: + stream = handler.open_input_file(frombytes(path)) + if not isinstance(stream, NativeFile): + raise TypeError("open_input_file should have returned " + "a PyArrow file") + out[0] = ( stream).get_random_access_file() + +cdef void _cb_open_output_stream( + handler, const c_string& path, + const shared_ptr[const CKeyValueMetadata]& metadata, + shared_ptr[COutputStream]* out) except *: + stream = handler.open_output_stream( + frombytes(path), pyarrow_wrap_metadata(metadata)) + if not isinstance(stream, NativeFile): + raise TypeError("open_output_stream should have returned " + "a PyArrow file") + out[0] = ( stream).get_output_stream() + +cdef void _cb_open_append_stream( + handler, const c_string& path, + const shared_ptr[const CKeyValueMetadata]& metadata, + shared_ptr[COutputStream]* out) except *: + stream = handler.open_append_stream( + frombytes(path), pyarrow_wrap_metadata(metadata)) + if not isinstance(stream, NativeFile): + raise TypeError("open_append_stream should have returned " + "a PyArrow file") + out[0] = ( stream).get_output_stream() + +cdef void _cb_normalize_path(handler, const c_string& path, + c_string* out) except *: + out[0] = tobytes(handler.normalize_path(frombytes(path))) + + +def _copy_files(FileSystem source_fs, str source_path, + FileSystem destination_fs, str destination_path, + int64_t chunk_size, c_bool use_threads): + # low-level helper exposed through pyarrow/fs.py::copy_files + cdef: + CFileLocator c_source + vector[CFileLocator] c_sources + CFileLocator c_destination + vector[CFileLocator] c_destinations + + c_source.filesystem = source_fs.unwrap() + c_source.path = tobytes(source_path) + c_sources.push_back(c_source) + + c_destination.filesystem = destination_fs.unwrap() + c_destination.path = tobytes(destination_path) + c_destinations.push_back(c_destination) + + with nogil: + check_status(CCopyFiles( + c_sources, c_destinations, + c_default_io_context(), chunk_size, use_threads, + )) + + +def _copy_files_selector(FileSystem source_fs, FileSelector source_sel, + FileSystem destination_fs, str destination_base_dir, + int64_t chunk_size, c_bool use_threads): + # low-level helper exposed through pyarrow/fs.py::copy_files + cdef c_string c_destination_base_dir = tobytes(destination_base_dir) + + with nogil: + check_status(CCopyFilesWithSelector( + source_fs.unwrap(), source_sel.unwrap(), + destination_fs.unwrap(), c_destination_base_dir, + c_default_io_context(), chunk_size, use_threads, + )) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d36dad67abbaa575d8963273c884dd9e8f047b13 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.pyx @@ -0,0 +1,310 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (_Weakrefable, MemoryPool, + maybe_unbox_memory_pool, + get_input_stream, pyarrow_wrap_table, + pyarrow_wrap_schema, pyarrow_unwrap_schema) + + +cdef class ReadOptions(_Weakrefable): + """ + Options for reading JSON files. + + Parameters + ---------- + use_threads : bool, optional (default True) + Whether to use multiple threads to accelerate reading + block_size : int, optional + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual chunks in the Table. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, use_threads=None, block_size=None): + self.options = CJSONReadOptions.Defaults() + if use_threads is not None: + self.use_threads = use_threads + if block_size is not None: + self.block_size = block_size + + @property + def use_threads(self): + """ + Whether to use multiple threads to accelerate reading. + """ + return self.options.use_threads + + @use_threads.setter + def use_threads(self, value): + self.options.use_threads = value + + @property + def block_size(self): + """ + How much bytes to process at a time from the input stream. + + This will determine multi-threading granularity as well as the size of + individual chunks in the Table. + """ + return self.options.block_size + + @block_size.setter + def block_size(self, value): + self.options.block_size = value + + def __reduce__(self): + return ReadOptions, ( + self.use_threads, + self.block_size + ) + + def equals(self, ReadOptions other): + """ + Parameters + ---------- + other : pyarrow.json.ReadOptions + + Returns + ------- + bool + """ + return ( + self.use_threads == other.use_threads and + self.block_size == other.block_size + ) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + @staticmethod + cdef ReadOptions wrap(CJSONReadOptions options): + out = ReadOptions() + out.options = options # shallow copy + return out + + +cdef class ParseOptions(_Weakrefable): + """ + Options for parsing JSON files. + + Parameters + ---------- + explicit_schema : Schema, optional (default None) + Optional explicit schema (no type inference, ignores other fields). + newlines_in_values : bool, optional (default False) + Whether objects may be printed across multiple lines (for example + pretty printed). If false, input must end with an empty line. + unexpected_field_behavior : str, default "infer" + How JSON fields outside of explicit_schema (if given) are treated. + + Possible behaviors: + + - "ignore": unexpected JSON fields are ignored + - "error": error out on unexpected JSON fields + - "infer": unexpected JSON fields are type-inferred and included in + the output + """ + + __slots__ = () + + def __init__(self, explicit_schema=None, newlines_in_values=None, + unexpected_field_behavior=None): + self.options = CJSONParseOptions.Defaults() + if explicit_schema is not None: + self.explicit_schema = explicit_schema + if newlines_in_values is not None: + self.newlines_in_values = newlines_in_values + if unexpected_field_behavior is not None: + self.unexpected_field_behavior = unexpected_field_behavior + + def __reduce__(self): + return ParseOptions, ( + self.explicit_schema, + self.newlines_in_values, + self.unexpected_field_behavior + ) + + @property + def explicit_schema(self): + """ + Optional explicit schema (no type inference, ignores other fields) + """ + if self.options.explicit_schema.get() == NULL: + return None + else: + return pyarrow_wrap_schema(self.options.explicit_schema) + + @explicit_schema.setter + def explicit_schema(self, value): + self.options.explicit_schema = pyarrow_unwrap_schema(value) + + @property + def newlines_in_values(self): + """ + Whether newline characters are allowed in JSON values. + Setting this to True reduces the performance of multi-threaded + JSON reading. + """ + return self.options.newlines_in_values + + @newlines_in_values.setter + def newlines_in_values(self, value): + self.options.newlines_in_values = value + + @property + def unexpected_field_behavior(self): + """ + How JSON fields outside of explicit_schema (if given) are treated. + + Possible behaviors: + + - "ignore": unexpected JSON fields are ignored + - "error": error out on unexpected JSON fields + - "infer": unexpected JSON fields are type-inferred and included in + the output + + Set to "infer" by default. + """ + v = self.options.unexpected_field_behavior + if v == CUnexpectedFieldBehavior_Ignore: + return "ignore" + elif v == CUnexpectedFieldBehavior_Error: + return "error" + elif v == CUnexpectedFieldBehavior_InferType: + return "infer" + else: + raise ValueError('Unexpected value for unexpected_field_behavior') + + @unexpected_field_behavior.setter + def unexpected_field_behavior(self, value): + cdef CUnexpectedFieldBehavior v + + if value == "ignore": + v = CUnexpectedFieldBehavior_Ignore + elif value == "error": + v = CUnexpectedFieldBehavior_Error + elif value == "infer": + v = CUnexpectedFieldBehavior_InferType + else: + raise ValueError( + "Unexpected value `{}` for `unexpected_field_behavior`, pass " + "either `ignore`, `error` or `infer`.".format(value) + ) + + self.options.unexpected_field_behavior = v + + def equals(self, ParseOptions other): + """ + Parameters + ---------- + other : pyarrow.json.ParseOptions + + Returns + ------- + bool + """ + return ( + self.explicit_schema == other.explicit_schema and + self.newlines_in_values == other.newlines_in_values and + self.unexpected_field_behavior == other.unexpected_field_behavior + ) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + @staticmethod + cdef ParseOptions wrap(CJSONParseOptions options): + out = ParseOptions() + out.options = options # shallow copy + return out + + +cdef _get_reader(input_file, shared_ptr[CInputStream]* out): + use_memory_map = False + get_input_stream(input_file, use_memory_map, out) + +cdef _get_read_options(ReadOptions read_options, CJSONReadOptions* out): + if read_options is None: + out[0] = CJSONReadOptions.Defaults() + else: + out[0] = read_options.options + +cdef _get_parse_options(ParseOptions parse_options, CJSONParseOptions* out): + if parse_options is None: + out[0] = CJSONParseOptions.Defaults() + else: + out[0] = parse_options.options + + +def read_json(input_file, read_options=None, parse_options=None, + MemoryPool memory_pool=None): + """ + Read a Table from a stream of JSON data. + + Parameters + ---------- + input_file : str, path or file-like object + The location of JSON data. Currently only the line-delimited JSON + format is supported. + read_options : pyarrow.json.ReadOptions, optional + Options for the JSON reader (see ReadOptions constructor for defaults). + parse_options : pyarrow.json.ParseOptions, optional + Options for the JSON parser + (see ParseOptions constructor for defaults). + memory_pool : MemoryPool, optional + Pool to allocate Table memory from. + + Returns + ------- + :class:`pyarrow.Table` + Contents of the JSON file as a in-memory table. + """ + cdef: + shared_ptr[CInputStream] stream + CJSONReadOptions c_read_options + CJSONParseOptions c_parse_options + shared_ptr[CJSONReader] reader + shared_ptr[CTable] table + + _get_reader(input_file, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + + reader = GetResultValue( + CJSONReader.Make(maybe_unbox_memory_pool(memory_pool), + stream, c_read_options, c_parse_options)) + + with nogil: + table = GetResultValue(reader.get().Read()) + + return pyarrow_wrap_table(table) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pyx new file mode 100644 index 0000000000000000000000000000000000000000..1dd6848122c2d4d5d2a40faf70bbb4647329f9d8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pyx @@ -0,0 +1,445 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from cython.operator cimport dereference as deref +from libcpp.vector cimport vector as std_vector +from libcpp.utility cimport move +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (check_status, _Weakrefable, + MemoryPool, maybe_unbox_memory_pool, + pyarrow_wrap_schema, + pyarrow_wrap_batch, + Table, + pyarrow_wrap_table, + pyarrow_wrap_metadata, + pyarrow_unwrap_table, + get_reader, + get_writer) +from pyarrow.lib import frombytes, tobytes +from pyarrow.util import _stringify_path + + +cdef compression_type_from_enum(CCompressionType compression_type): + compression_map = { + CCompressionType_UNCOMPRESSED: 'UNCOMPRESSED', + CCompressionType_GZIP: 'ZLIB', + CCompressionType_SNAPPY: 'SNAPPY', + CCompressionType_LZ4: 'LZ4', + CCompressionType_ZSTD: 'ZSTD', + } + if compression_type in compression_map: + return compression_map[compression_type] + raise ValueError('Unsupported compression') + + +cdef CCompressionType compression_type_from_name(name) except *: + if not isinstance(name, str): + raise TypeError('compression must be a string') + name = name.upper() + if name == 'ZLIB': + return CCompressionType_GZIP + elif name == 'SNAPPY': + return CCompressionType_SNAPPY + elif name == 'LZ4': + return CCompressionType_LZ4 + elif name == 'ZSTD': + return CCompressionType_ZSTD + elif name == 'UNCOMPRESSED': + return CCompressionType_UNCOMPRESSED + raise ValueError(f'Unknown CompressionKind: {name}') + + +cdef compression_strategy_from_enum( + CompressionStrategy compression_strategy +): + compression_strategy_map = { + _CompressionStrategy_SPEED: 'SPEED', + _CompressionStrategy_COMPRESSION: 'COMPRESSION', + } + if compression_strategy in compression_strategy_map: + return compression_strategy_map[compression_strategy] + raise ValueError('Unsupported compression strategy') + + +cdef CompressionStrategy compression_strategy_from_name(name) except *: + if not isinstance(name, str): + raise TypeError('compression strategy must be a string') + name = name.upper() + if name == 'COMPRESSION': + return _CompressionStrategy_COMPRESSION + elif name == 'SPEED': + return _CompressionStrategy_SPEED + raise ValueError(f'Unknown CompressionStrategy: {name}') + + +cdef file_version_from_class(FileVersion file_version): + return frombytes(file_version.ToString()) + + +cdef writer_id_from_enum(WriterId writer_id): + writer_id_map = { + _WriterId_ORC_JAVA_WRITER: 'ORC_JAVA', + _WriterId_ORC_CPP_WRITER: 'ORC_CPP', + _WriterId_PRESTO_WRITER: 'PRESTO', + _WriterId_SCRITCHLEY_GO: 'SCRITCHLEY_GO', + _WriterId_TRINO_WRITER: 'TRINO', + } + if writer_id in writer_id_map: + return writer_id_map[writer_id] + raise ValueError('Unsupported writer ID') + + +cdef writer_version_from_enum(WriterVersion writer_version): + writer_version_map = { + _WriterVersion_ORIGINAL: 'ORIGINAL', + _WriterVersion_HIVE_8732: 'HIVE_8732', + _WriterVersion_HIVE_4243: 'HIVE_4243', + _WriterVersion_HIVE_12055: 'HIVE_12055', + _WriterVersion_HIVE_13083: 'HIVE_13083', + _WriterVersion_ORC_101: 'ORC_101', + _WriterVersion_ORC_135: 'ORC_135', + _WriterVersion_ORC_517: 'ORC_517', + _WriterVersion_ORC_203: 'ORC_203', + _WriterVersion_ORC_14: 'ORC_14', + } + if writer_version in writer_version_map: + return writer_version_map[writer_version] + raise ValueError('Unsupported writer version') + + +cdef shared_ptr[WriteOptions] _create_write_options( + file_version=None, + batch_size=None, + stripe_size=None, + compression=None, + compression_block_size=None, + compression_strategy=None, + row_index_stride=None, + padding_tolerance=None, + dictionary_key_size_threshold=None, + bloom_filter_columns=None, + bloom_filter_fpp=None +) except *: + """General writer options""" + cdef: + shared_ptr[WriteOptions] options + options = make_shared[WriteOptions]() + # batch_size + if batch_size is not None: + if isinstance(batch_size, int) and batch_size > 0: + deref(options).batch_size = batch_size + else: + raise ValueError(f"Invalid ORC writer batch size: {batch_size}") + # file_version + if file_version is not None: + if file_version == "0.12": + deref(options).file_version = FileVersion(0, 12) + elif file_version == "0.11": + deref(options).file_version = FileVersion(0, 11) + else: + raise ValueError(f"Unsupported ORC file version: {file_version}") + # stripe_size + if stripe_size is not None: + if isinstance(stripe_size, int) and stripe_size > 0: + deref(options).stripe_size = stripe_size + else: + raise ValueError(f"Invalid ORC stripe size: {stripe_size}") + # compression + if compression is not None: + if isinstance(compression, str): + deref(options).compression = compression_type_from_name( + compression) + else: + raise TypeError("Unsupported ORC compression type: " + f"{compression}") + # compression_block_size + if compression_block_size is not None: + if (isinstance(compression_block_size, int) and + compression_block_size > 0): + deref(options).compression_block_size = compression_block_size + else: + raise ValueError("Invalid ORC compression block size: " + f"{compression_block_size}") + # compression_strategy + if compression_strategy is not None: + if isinstance(compression, str): + deref(options).compression_strategy = \ + compression_strategy_from_name(compression_strategy) + else: + raise TypeError("Unsupported ORC compression strategy: " + f"{compression_strategy}") + # row_index_stride + if row_index_stride is not None: + if isinstance(row_index_stride, int) and row_index_stride > 0: + deref(options).row_index_stride = row_index_stride + else: + raise ValueError("Invalid ORC row index stride: " + f"{row_index_stride}") + # padding_tolerance + if padding_tolerance is not None: + try: + padding_tolerance = float(padding_tolerance) + deref(options).padding_tolerance = padding_tolerance + except Exception: + raise ValueError("Invalid ORC padding tolerance: " + f"{padding_tolerance}") + # dictionary_key_size_threshold + if dictionary_key_size_threshold is not None: + try: + dictionary_key_size_threshold = float( + dictionary_key_size_threshold) + assert 0 <= dictionary_key_size_threshold <= 1 + deref(options).dictionary_key_size_threshold = \ + dictionary_key_size_threshold + except Exception: + raise ValueError("Invalid ORC dictionary key size threshold: " + f"{dictionary_key_size_threshold}") + # bloom_filter_columns + if bloom_filter_columns is not None: + try: + bloom_filter_columns = list(bloom_filter_columns) + for col in bloom_filter_columns: + assert isinstance(col, int) and col >= 0 + deref(options).bloom_filter_columns = bloom_filter_columns + except Exception: + raise ValueError("Invalid ORC BloomFilter columns: " + f"{bloom_filter_columns}") + # Max false positive rate of the Bloom Filter + if bloom_filter_fpp is not None: + try: + bloom_filter_fpp = float(bloom_filter_fpp) + assert 0 <= bloom_filter_fpp <= 1 + deref(options).bloom_filter_fpp = bloom_filter_fpp + except Exception: + raise ValueError("Invalid ORC BloomFilter false positive rate: " + f"{bloom_filter_fpp}") + return options + + +cdef class ORCReader(_Weakrefable): + cdef: + object source + CMemoryPool* allocator + unique_ptr[ORCFileReader] reader + + def __cinit__(self, MemoryPool memory_pool=None): + self.allocator = maybe_unbox_memory_pool(memory_pool) + + def open(self, object source, c_bool use_memory_map=True): + cdef: + shared_ptr[CRandomAccessFile] rd_handle + + self.source = source + + get_reader(source, use_memory_map, &rd_handle) + with nogil: + self.reader = move(GetResultValue( + ORCFileReader.Open(rd_handle, self.allocator) + )) + + def metadata(self): + """ + The arrow metadata for this file. + + Returns + ------- + metadata : pyarrow.KeyValueMetadata + """ + cdef: + shared_ptr[const CKeyValueMetadata] sp_arrow_metadata + + with nogil: + sp_arrow_metadata = GetResultValue( + deref(self.reader).ReadMetadata() + ) + + return pyarrow_wrap_metadata(sp_arrow_metadata) + + def schema(self): + """ + The arrow schema for this file. + + Returns + ------- + schema : pyarrow.Schema + """ + cdef: + shared_ptr[CSchema] sp_arrow_schema + + with nogil: + sp_arrow_schema = GetResultValue(deref(self.reader).ReadSchema()) + + return pyarrow_wrap_schema(sp_arrow_schema) + + def nrows(self): + return deref(self.reader).NumberOfRows() + + def nstripes(self): + return deref(self.reader).NumberOfStripes() + + def file_version(self): + return file_version_from_class(deref(self.reader).GetFileVersion()) + + def software_version(self): + return frombytes(deref(self.reader).GetSoftwareVersion()) + + def compression(self): + return compression_type_from_enum( + GetResultValue(deref(self.reader).GetCompression())) + + def compression_size(self): + return deref(self.reader).GetCompressionSize() + + def row_index_stride(self): + return deref(self.reader).GetRowIndexStride() + + def writer(self): + writer_name = writer_id_from_enum(deref(self.reader).GetWriterId()) + if writer_name == 'UNKNOWN': + return deref(self.reader).GetWriterIdValue() + else: + return writer_name + + def writer_version(self): + return writer_version_from_enum(deref(self.reader).GetWriterVersion()) + + def nstripe_statistics(self): + return deref(self.reader).GetNumberOfStripeStatistics() + + def content_length(self): + return deref(self.reader).GetContentLength() + + def stripe_statistics_length(self): + return deref(self.reader).GetStripeStatisticsLength() + + def file_footer_length(self): + return deref(self.reader).GetFileFooterLength() + + def file_postscript_length(self): + return deref(self.reader).GetFilePostscriptLength() + + def file_length(self): + return deref(self.reader).GetFileLength() + + def serialized_file_tail(self): + return deref(self.reader).GetSerializedFileTail() + + def read_stripe(self, n, columns=None): + cdef: + shared_ptr[CRecordBatch] sp_record_batch + int64_t stripe + std_vector[c_string] c_names + + stripe = n + + if columns is None: + with nogil: + sp_record_batch = GetResultValue( + deref(self.reader).ReadStripe(stripe) + ) + else: + c_names = [tobytes(name) for name in columns] + with nogil: + sp_record_batch = GetResultValue( + deref(self.reader).ReadStripe(stripe, c_names) + ) + + return pyarrow_wrap_batch(sp_record_batch) + + def read(self, columns=None): + cdef: + shared_ptr[CTable] sp_table + std_vector[c_string] c_names + + if columns is None: + with nogil: + sp_table = GetResultValue(deref(self.reader).Read()) + else: + c_names = [tobytes(name) for name in columns] + with nogil: + sp_table = GetResultValue(deref(self.reader).Read(c_names)) + + return pyarrow_wrap_table(sp_table) + + +cdef class ORCWriter(_Weakrefable): + cdef: + unique_ptr[ORCFileWriter] writer + shared_ptr[COutputStream] sink + c_bool own_sink + + def open(self, object where, *, + file_version=None, + batch_size=None, + stripe_size=None, + compression=None, + compression_block_size=None, + compression_strategy=None, + row_index_stride=None, + padding_tolerance=None, + dictionary_key_size_threshold=None, + bloom_filter_columns=None, + bloom_filter_fpp=None): + cdef: + shared_ptr[WriteOptions] write_options + c_string c_where + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &self.sink) + self.own_sink = False + else: + c_where = tobytes(where) + with nogil: + self.sink = GetResultValue(FileOutputStream.Open(c_where)) + self.own_sink = True + + write_options = _create_write_options( + file_version=file_version, + batch_size=batch_size, + stripe_size=stripe_size, + compression=compression, + compression_block_size=compression_block_size, + compression_strategy=compression_strategy, + row_index_stride=row_index_stride, + padding_tolerance=padding_tolerance, + dictionary_key_size_threshold=dictionary_key_size_threshold, + bloom_filter_columns=bloom_filter_columns, + bloom_filter_fpp=bloom_filter_fpp + ) + + with nogil: + self.writer = move(GetResultValue( + ORCFileWriter.Open(self.sink.get(), + deref(write_options)))) + + def write(self, Table table): + cdef: + shared_ptr[CTable] sp_table + sp_table = pyarrow_unwrap_table(table) + with nogil: + check_status(deref(self.writer).Write(deref(sp_table))) + + def close(self): + with nogil: + check_status(deref(self.writer).Close()) + if self.own_sink: + check_status(deref(self.sink).Close()) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pyx new file mode 100644 index 0000000000000000000000000000000000000000..0b685245655a2d5d354ffb4b05dff1e9e52a8582 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pyx @@ -0,0 +1,2195 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from collections.abc import Sequence +from textwrap import indent +import warnings + +from cython.operator cimport dereference as deref +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.lib cimport (_Weakrefable, Buffer, Schema, + check_status, + MemoryPool, maybe_unbox_memory_pool, + Table, NativeFile, + pyarrow_wrap_chunked_array, + pyarrow_wrap_schema, + pyarrow_unwrap_schema, + pyarrow_wrap_table, + pyarrow_wrap_batch, + pyarrow_wrap_scalar, + NativeFile, get_reader, get_writer, + string_to_timeunit) + +from pyarrow.lib import (ArrowException, NativeFile, BufferOutputStream, + _stringify_path, + tobytes, frombytes) + +cimport cpython as cp + +_DEFAULT_ROW_GROUP_SIZE = 1024*1024 +_MAX_ROW_GROUP_SIZE = 64*1024*1024 + +cdef class Statistics(_Weakrefable): + """Statistics for a single column in a single row group.""" + + def __cinit__(self): + pass + + def __repr__(self): + return """{} + has_min_max: {} + min: {} + max: {} + null_count: {} + distinct_count: {} + num_values: {} + physical_type: {} + logical_type: {} + converted_type (legacy): {}""".format(object.__repr__(self), + self.has_min_max, + self.min, + self.max, + self.null_count, + self.distinct_count, + self.num_values, + self.physical_type, + str(self.logical_type), + self.converted_type) + + def to_dict(self): + """ + Get dictionary representation of statistics. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + d = dict( + has_min_max=self.has_min_max, + min=self.min, + max=self.max, + null_count=self.null_count, + distinct_count=self.distinct_count, + num_values=self.num_values, + physical_type=self.physical_type + ) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, Statistics other): + """ + Return whether the two column statistics objects are equal. + + Parameters + ---------- + other : Statistics + Statistics to compare against. + + Returns + ------- + are_equal : bool + """ + return self.statistics.get().Equals(deref(other.statistics.get())) + + @property + def has_min_max(self): + """Whether min and max are present (bool).""" + return self.statistics.get().HasMinMax() + + @property + def has_null_count(self): + """Whether null count is present (bool).""" + return self.statistics.get().HasNullCount() + + @property + def has_distinct_count(self): + """Whether distinct count is preset (bool).""" + return self.statistics.get().HasDistinctCount() + + @property + def min_raw(self): + """Min value as physical type (bool, int, float, or bytes).""" + if self.has_min_max: + return _cast_statistic_raw_min(self.statistics.get()) + else: + return None + + @property + def max_raw(self): + """Max value as physical type (bool, int, float, or bytes).""" + if self.has_min_max: + return _cast_statistic_raw_max(self.statistics.get()) + else: + return None + + @property + def min(self): + """ + Min value as logical type. + + Returned as the Python equivalent of logical type, such as datetime.date + for dates and decimal.Decimal for decimals. + """ + if self.has_min_max: + min_scalar, _ = _cast_statistics(self.statistics.get()) + return min_scalar.as_py() + else: + return None + + @property + def max(self): + """ + Max value as logical type. + + Returned as the Python equivalent of logical type, such as datetime.date + for dates and decimal.Decimal for decimals. + """ + if self.has_min_max: + _, max_scalar = _cast_statistics(self.statistics.get()) + return max_scalar.as_py() + else: + return None + + @property + def null_count(self): + """Number of null values in chunk (int).""" + if self.has_null_count: + return self.statistics.get().null_count() + else: + return None + + @property + def distinct_count(self): + """Distinct number of values in chunk (int).""" + if self.has_distinct_count: + return self.statistics.get().distinct_count() + else: + return None + + @property + def num_values(self): + """Number of non-null values (int).""" + return self.statistics.get().num_values() + + @property + def physical_type(self): + """Physical type of column (str).""" + raw_physical_type = self.statistics.get().physical_type() + return physical_type_name_from_enum(raw_physical_type) + + @property + def logical_type(self): + """Logical type of column (:class:`ParquetLogicalType`).""" + return wrap_logical_type(self.statistics.get().descr().logical_type()) + + @property + def converted_type(self): + """Legacy converted type (str or None).""" + raw_converted_type = self.statistics.get().descr().converted_type() + return converted_type_name_from_enum(raw_converted_type) + + +cdef class ParquetLogicalType(_Weakrefable): + """Logical type of parquet type.""" + cdef: + shared_ptr[const CParquetLogicalType] type + + def __cinit__(self): + pass + + cdef init(self, const shared_ptr[const CParquetLogicalType]& type): + self.type = type + + def __repr__(self): + return "{}\n {}".format(object.__repr__(self), str(self)) + + def __str__(self): + return frombytes(self.type.get().ToString(), safe=True) + + def to_json(self): + """ + Get a JSON string containing type and type parameters. + + Returns + ------- + json : str + JSON representation of type, with at least a field called 'Type' + which contains the type name. If the type is parameterized, such + as a decimal with scale and precision, will contain those as fields + as well. + """ + return frombytes(self.type.get().ToJSON()) + + @property + def type(self): + """Name of the logical type (str).""" + return logical_type_name_from_enum(self.type.get().type()) + + +cdef wrap_logical_type(const shared_ptr[const CParquetLogicalType]& type): + cdef ParquetLogicalType out = ParquetLogicalType() + out.init(type) + return out + + +cdef _cast_statistic_raw_min(CStatistics* statistics): + cdef ParquetType physical_type = statistics.physical_type() + cdef uint32_t type_length = statistics.descr().type_length() + if physical_type == ParquetType_BOOLEAN: + return ( statistics).min() + elif physical_type == ParquetType_INT32: + return ( statistics).min() + elif physical_type == ParquetType_INT64: + return ( statistics).min() + elif physical_type == ParquetType_FLOAT: + return ( statistics).min() + elif physical_type == ParquetType_DOUBLE: + return ( statistics).min() + elif physical_type == ParquetType_BYTE_ARRAY: + return _box_byte_array(( statistics).min()) + elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY: + return _box_flba(( statistics).min(), type_length) + + +cdef _cast_statistic_raw_max(CStatistics* statistics): + cdef ParquetType physical_type = statistics.physical_type() + cdef uint32_t type_length = statistics.descr().type_length() + if physical_type == ParquetType_BOOLEAN: + return ( statistics).max() + elif physical_type == ParquetType_INT32: + return ( statistics).max() + elif physical_type == ParquetType_INT64: + return ( statistics).max() + elif physical_type == ParquetType_FLOAT: + return ( statistics).max() + elif physical_type == ParquetType_DOUBLE: + return ( statistics).max() + elif physical_type == ParquetType_BYTE_ARRAY: + return _box_byte_array(( statistics).max()) + elif physical_type == ParquetType_FIXED_LEN_BYTE_ARRAY: + return _box_flba(( statistics).max(), type_length) + + +cdef _cast_statistics(CStatistics* statistics): + cdef: + shared_ptr[CScalar] c_min + shared_ptr[CScalar] c_max + check_status(StatisticsAsScalars(statistics[0], &c_min, &c_max)) + return (pyarrow_wrap_scalar(c_min), pyarrow_wrap_scalar(c_max)) + + +cdef _box_byte_array(ParquetByteArray val): + return cp.PyBytes_FromStringAndSize( val.ptr, val.len) + + +cdef _box_flba(ParquetFLBA val, uint32_t len): + return cp.PyBytes_FromStringAndSize( val.ptr, len) + + +cdef class ColumnChunkMetaData(_Weakrefable): + """Column metadata for a single row group.""" + + def __cinit__(self): + pass + + def __repr__(self): + statistics = indent(repr(self.statistics), 4 * ' ') + return """{0} + file_offset: {1} + file_path: {2} + physical_type: {3} + num_values: {4} + path_in_schema: {5} + is_stats_set: {6} + statistics: +{7} + compression: {8} + encodings: {9} + has_dictionary_page: {10} + dictionary_page_offset: {11} + data_page_offset: {12} + total_compressed_size: {13} + total_uncompressed_size: {14}""".format(object.__repr__(self), + self.file_offset, + self.file_path, + self.physical_type, + self.num_values, + self.path_in_schema, + self.is_stats_set, + statistics, + self.compression, + self.encodings, + self.has_dictionary_page, + self.dictionary_page_offset, + self.data_page_offset, + self.total_compressed_size, + self.total_uncompressed_size) + + def to_dict(self): + """ + Get dictionary representation of the column chunk metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + statistics = self.statistics.to_dict() if self.is_stats_set else None + d = dict( + file_offset=self.file_offset, + file_path=self.file_path, + physical_type=self.physical_type, + num_values=self.num_values, + path_in_schema=self.path_in_schema, + is_stats_set=self.is_stats_set, + statistics=statistics, + compression=self.compression, + encodings=self.encodings, + has_dictionary_page=self.has_dictionary_page, + dictionary_page_offset=self.dictionary_page_offset, + data_page_offset=self.data_page_offset, + total_compressed_size=self.total_compressed_size, + total_uncompressed_size=self.total_uncompressed_size + ) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, ColumnChunkMetaData other): + """ + Return whether the two column chunk metadata objects are equal. + + Parameters + ---------- + other : ColumnChunkMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self.metadata.Equals(deref(other.metadata)) + + @property + def file_offset(self): + """Offset into file where column chunk is located (int).""" + return self.metadata.file_offset() + + @property + def file_path(self): + """Optional file path if set (str or None).""" + return frombytes(self.metadata.file_path()) + + @property + def physical_type(self): + """Physical type of column (str).""" + return physical_type_name_from_enum(self.metadata.type()) + + @property + def num_values(self): + """Total number of values (int).""" + return self.metadata.num_values() + + @property + def path_in_schema(self): + """Nested path to field, separated by periods (str).""" + path = self.metadata.path_in_schema().get().ToDotString() + return frombytes(path) + + @property + def is_stats_set(self): + """Whether or not statistics are present in metadata (bool).""" + return self.metadata.is_stats_set() + + @property + def statistics(self): + """Statistics for column chunk (:class:`Statistics`).""" + if not self.metadata.is_stats_set(): + return None + statistics = Statistics() + statistics.init(self.metadata.statistics(), self) + return statistics + + @property + def compression(self): + """ + Type of compression used for column (str). + + One of 'UNCOMPRESSED', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD', + or 'UNKNOWN'. + """ + return compression_name_from_enum(self.metadata.compression()) + + @property + def encodings(self): + """ + Encodings used for column (tuple of str). + + One of 'PLAIN', 'BIT_PACKED', 'RLE', 'BYTE_STREAM_SPLIT', 'DELTA_BINARY_PACKED', + 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'. + """ + return tuple(map(encoding_name_from_enum, self.metadata.encodings())) + + @property + def has_dictionary_page(self): + """Whether there is dictionary data present in the column chunk (bool).""" + return bool(self.metadata.has_dictionary_page()) + + @property + def dictionary_page_offset(self): + """Offset of dictionary page relative to column chunk offset (int).""" + if self.has_dictionary_page: + return self.metadata.dictionary_page_offset() + else: + return None + + @property + def data_page_offset(self): + """Offset of data page relative to column chunk offset (int).""" + return self.metadata.data_page_offset() + + @property + def has_index_page(self): + """Not yet supported.""" + raise NotImplementedError('not supported in parquet-cpp') + + @property + def index_page_offset(self): + """Not yet supported.""" + raise NotImplementedError("parquet-cpp doesn't return valid values") + + @property + def total_compressed_size(self): + """Compressed size in bytes (int).""" + return self.metadata.total_compressed_size() + + @property + def total_uncompressed_size(self): + """Uncompressed size in bytes (int).""" + return self.metadata.total_uncompressed_size() + + @property + def has_offset_index(self): + """Whether the column chunk has an offset index""" + return self.metadata.GetOffsetIndexLocation().has_value() + + @property + def has_column_index(self): + """Whether the column chunk has a column index""" + return self.metadata.GetColumnIndexLocation().has_value() + + +cdef class SortingColumn: + """ + Sorting specification for a single column. + + Returned by :meth:`RowGroupMetaData.sorting_columns` and used in + :class:`ParquetWriter` to specify the sort order of the data. + + Parameters + ---------- + column_index : int + Index of column that data is sorted by. + descending : bool, default False + Whether column is sorted in descending order. + nulls_first : bool, default False + Whether null values appear before valid values. + + Notes + ----- + + Column indices are zero-based, refer only to leaf fields, and are in + depth-first order. This may make the column indices for nested schemas + different from what you expect. In most cases, it will be easier to + specify the sort order using column names instead of column indices + and converting using the ``from_ordering`` method. + + Examples + -------- + + In other APIs, sort order is specified by names, such as: + + >>> sort_order = [('id', 'ascending'), ('timestamp', 'descending')] + + For Parquet, the column index must be used instead: + + >>> import pyarrow.parquet as pq + >>> [pq.SortingColumn(0), pq.SortingColumn(1, descending=True)] + [SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)] + + Convert the sort_order into the list of sorting columns with + ``from_ordering`` (note that the schema must be provided as well): + + >>> import pyarrow as pa + >>> schema = pa.schema([('id', pa.int64()), ('timestamp', pa.timestamp('ms'))]) + >>> sorting_columns = pq.SortingColumn.from_ordering(schema, sort_order) + >>> sorting_columns + (SortingColumn(column_index=0, descending=False, nulls_first=False), SortingColumn(column_index=1, descending=True, nulls_first=False)) + + Convert back to the sort order with ``to_ordering``: + + >>> pq.SortingColumn.to_ordering(schema, sorting_columns) + ((('id', 'ascending'), ('timestamp', 'descending')), 'at_end') + + See Also + -------- + RowGroupMetaData.sorting_columns + """ + cdef int column_index + cdef c_bool descending + cdef c_bool nulls_first + + def __init__(self, int column_index, c_bool descending=False, c_bool nulls_first=False): + self.column_index = column_index + self.descending = descending + self.nulls_first = nulls_first + + @classmethod + def from_ordering(cls, Schema schema, sort_keys, null_placement='at_end'): + """ + Create a tuple of SortingColumn objects from the same arguments as + :class:`pyarrow.compute.SortOptions`. + + Parameters + ---------- + schema : Schema + Schema of the input data. + sort_keys : Sequence of (name, order) tuples + Names of field/column keys (str) to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + null_placement : {'at_start', 'at_end'}, default 'at_end' + Where null values should appear in the sort order. + + Returns + ------- + sorting_columns : tuple of SortingColumn + """ + if null_placement == 'at_start': + nulls_first = True + elif null_placement == 'at_end': + nulls_first = False + else: + raise ValueError('null_placement must be "at_start" or "at_end"') + + col_map = _name_to_index_map(schema) + + sorting_columns = [] + + for sort_key in sort_keys: + if isinstance(sort_key, str): + name = sort_key + descending = False + elif (isinstance(sort_key, tuple) and len(sort_key) == 2 and + isinstance(sort_key[0], str) and + isinstance(sort_key[1], str)): + name, descending = sort_key + if descending == "descending": + descending = True + elif descending == "ascending": + descending = False + else: + raise ValueError("Invalid sort key direction: {0}" + .format(descending)) + else: + raise ValueError("Invalid sort key: {0}".format(sort_key)) + + try: + column_index = col_map[name] + except KeyError: + raise ValueError("Sort key name '{0}' not found in schema:\n{1}" + .format(name, schema)) + + sorting_columns.append( + cls(column_index, descending=descending, nulls_first=nulls_first) + ) + + return tuple(sorting_columns) + + @staticmethod + def to_ordering(Schema schema, sorting_columns): + """ + Convert a tuple of SortingColumn objects to the same format as + :class:`pyarrow.compute.SortOptions`. + + Parameters + ---------- + schema : Schema + Schema of the input data. + sorting_columns : tuple of SortingColumn + Columns to sort the input on. + + Returns + ------- + sort_keys : tuple of (name, order) tuples + null_placement : {'at_start', 'at_end'} + """ + col_map = {i: name for name, i in _name_to_index_map(schema).items()} + + sort_keys = [] + nulls_first = None + + for sorting_column in sorting_columns: + name = col_map[sorting_column.column_index] + if sorting_column.descending: + order = "descending" + else: + order = "ascending" + sort_keys.append((name, order)) + if nulls_first is None: + nulls_first = sorting_column.nulls_first + elif nulls_first != sorting_column.nulls_first: + raise ValueError("Sorting columns have inconsistent null placement") + + if nulls_first: + null_placement = "at_start" + else: + null_placement = "at_end" + + return tuple(sort_keys), null_placement + + def __repr__(self): + return """{}(column_index={}, descending={}, nulls_first={})""".format( + self.__class__.__name__, + self.column_index, self.descending, self.nulls_first) + + def __eq__(self, SortingColumn other): + return (self.column_index == other.column_index and + self.descending == other.descending and + self.nulls_first == other.nulls_first) + + def __hash__(self): + return hash((self.column_index, self.descending, self.nulls_first)) + + @property + def column_index(self): + """"Index of column data is sorted by (int).""" + return self.column_index + + @property + def descending(self): + """Whether column is sorted in descending order (bool).""" + return self.descending + + @property + def nulls_first(self): + """Whether null values appear before valid values (bool).""" + return self.nulls_first + + +cdef class RowGroupMetaData(_Weakrefable): + """Metadata for a single row group.""" + + def __cinit__(self, FileMetaData parent, int index): + if index < 0 or index >= parent.num_row_groups: + raise IndexError('{0} out of bounds'.format(index)) + self.up_metadata = parent._metadata.RowGroup(index) + self.metadata = self.up_metadata.get() + self.parent = parent + self.index = index + + def __reduce__(self): + return RowGroupMetaData, (self.parent, self.index) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, RowGroupMetaData other): + """ + Return whether the two row group metadata objects are equal. + + Parameters + ---------- + other : RowGroupMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self.metadata.Equals(deref(other.metadata)) + + def column(self, int i): + """ + Get column metadata at given index. + + Parameters + ---------- + i : int + Index of column to get metadata for. + + Returns + ------- + ColumnChunkMetaData + Metadata for column within this chunk. + """ + if i < 0 or i >= self.num_columns: + raise IndexError('{0} out of bounds'.format(i)) + chunk = ColumnChunkMetaData() + chunk.init(self, i) + return chunk + + def __repr__(self): + return """{0} + num_columns: {1} + num_rows: {2} + total_byte_size: {3} + sorting_columns: {4}""".format(object.__repr__(self), + self.num_columns, + self.num_rows, + self.total_byte_size, + self.sorting_columns) + + def to_dict(self): + """ + Get dictionary representation of the row group metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + columns = [] + d = dict( + num_columns=self.num_columns, + num_rows=self.num_rows, + total_byte_size=self.total_byte_size, + columns=columns, + sorting_columns=[col.to_dict() for col in self.sorting_columns] + ) + for i in range(self.num_columns): + columns.append(self.column(i).to_dict()) + return d + + @property + def num_columns(self): + """Number of columns in this row group (int).""" + return self.metadata.num_columns() + + @property + def num_rows(self): + """Number of rows in this row group (int).""" + return self.metadata.num_rows() + + @property + def total_byte_size(self): + """Total byte size of all the uncompressed column data in this row group (int).""" + return self.metadata.total_byte_size() + + @property + def sorting_columns(self): + """Columns the row group is sorted by (tuple of :class:`SortingColumn`)).""" + out = [] + cdef vector[CSortingColumn] sorting_columns = self.metadata.sorting_columns() + for sorting_col in sorting_columns: + out.append(SortingColumn( + sorting_col.column_idx, + sorting_col.descending, + sorting_col.nulls_first + )) + return tuple(out) + + +def _reconstruct_filemetadata(Buffer serialized): + cdef: + FileMetaData metadata = FileMetaData.__new__(FileMetaData) + CBuffer *buffer = serialized.buffer.get() + uint32_t metadata_len = buffer.size() + + metadata.init(CFileMetaData_Make(buffer.data(), &metadata_len)) + + return metadata + + +cdef class FileMetaData(_Weakrefable): + """Parquet metadata for a single file.""" + + def __cinit__(self): + pass + + def __reduce__(self): + cdef: + NativeFile sink = BufferOutputStream() + COutputStream* c_sink = sink.get_output_stream().get() + with nogil: + self._metadata.WriteTo(c_sink) + + cdef Buffer buffer = sink.getvalue() + return _reconstruct_filemetadata, (buffer,) + + def __repr__(self): + return """{0} + created_by: {1} + num_columns: {2} + num_rows: {3} + num_row_groups: {4} + format_version: {5} + serialized_size: {6}""".format(object.__repr__(self), + self.created_by, self.num_columns, + self.num_rows, self.num_row_groups, + self.format_version, + self.serialized_size) + + def to_dict(self): + """ + Get dictionary representation of the file metadata. + + Returns + ------- + dict + Dictionary with a key for each attribute of this class. + """ + row_groups = [] + d = dict( + created_by=self.created_by, + num_columns=self.num_columns, + num_rows=self.num_rows, + num_row_groups=self.num_row_groups, + row_groups=row_groups, + format_version=self.format_version, + serialized_size=self.serialized_size + ) + for i in range(self.num_row_groups): + row_groups.append(self.row_group(i).to_dict()) + return d + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, FileMetaData other not None): + """ + Return whether the two file metadata objects are equal. + + Parameters + ---------- + other : FileMetaData + Metadata to compare against. + + Returns + ------- + are_equal : bool + """ + return self._metadata.Equals(deref(other._metadata)) + + @property + def schema(self): + """Schema of the file (:class:`ParquetSchema`).""" + if self._schema is None: + self._schema = ParquetSchema(self) + return self._schema + + @property + def serialized_size(self): + """Size of the original thrift encoded metadata footer (int).""" + return self._metadata.size() + + @property + def num_columns(self): + """Number of columns in file (int).""" + return self._metadata.num_columns() + + @property + def num_rows(self): + """Total number of rows in file (int).""" + return self._metadata.num_rows() + + @property + def num_row_groups(self): + """Number of row groups in file (int).""" + return self._metadata.num_row_groups() + + @property + def format_version(self): + """ + Parquet format version used in file (str, such as '1.0', '2.4'). + + If version is missing or unparsable, will default to assuming '2.6'. + """ + cdef ParquetVersion version = self._metadata.version() + if version == ParquetVersion_V1: + return '1.0' + elif version == ParquetVersion_V2_0: + return 'pseudo-2.0' + elif version == ParquetVersion_V2_4: + return '2.4' + elif version == ParquetVersion_V2_6: + return '2.6' + else: + warnings.warn('Unrecognized file version, assuming 2.6: {}' + .format(version)) + return '2.6' + + @property + def created_by(self): + """ + String describing source of the parquet file (str). + + This typically includes library name and version number. For example, Arrow 7.0's + writer returns 'parquet-cpp-arrow version 7.0.0'. + """ + return frombytes(self._metadata.created_by()) + + @property + def metadata(self): + """Additional metadata as key value pairs (dict[bytes, bytes]).""" + cdef: + unordered_map[c_string, c_string] metadata + const CKeyValueMetadata* underlying_metadata + underlying_metadata = self._metadata.key_value_metadata().get() + if underlying_metadata != NULL: + underlying_metadata.ToUnorderedMap(&metadata) + return metadata + else: + return None + + def row_group(self, int i): + """ + Get metadata for row group at index i. + + Parameters + ---------- + i : int + Row group index to get. + + Returns + ------- + row_group_metadata : RowGroupMetaData + """ + return RowGroupMetaData(self, i) + + def set_file_path(self, path): + """ + Set ColumnChunk file paths to the given value. + + This method modifies the ``file_path`` field of each ColumnChunk + in the FileMetaData to be a particular value. + + Parameters + ---------- + path : str + The file path to set on all ColumnChunks. + """ + cdef: + c_string c_path = tobytes(path) + self._metadata.set_file_path(c_path) + + def append_row_groups(self, FileMetaData other): + """ + Append row groups from other FileMetaData object. + + Parameters + ---------- + other : FileMetaData + Other metadata to append row groups from. + """ + cdef shared_ptr[CFileMetaData] c_metadata + + c_metadata = other.sp_metadata + self._metadata.AppendRowGroups(deref(c_metadata)) + + def write_metadata_file(self, where): + """ + Write the metadata to a metadata-only Parquet file. + + Parameters + ---------- + where : path or file-like object + Where to write the metadata. Should be a writable path on + the local filesystem, or a writable file-like object. + """ + cdef: + shared_ptr[COutputStream] sink + c_string c_where + + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &sink) + else: + c_where = tobytes(where) + with nogil: + sink = GetResultValue(FileOutputStream.Open(c_where)) + + with nogil: + check_status( + WriteMetaDataFile(deref(self._metadata), sink.get())) + + +cdef class ParquetSchema(_Weakrefable): + """A Parquet schema.""" + + def __cinit__(self, FileMetaData container): + self.parent = container + self.schema = container._metadata.schema() + + def __repr__(self): + return "{0}\n{1}".format( + object.__repr__(self), + frombytes(self.schema.ToString(), safe=True)) + + def __reduce__(self): + return ParquetSchema, (self.parent,) + + def __len__(self): + return self.schema.num_columns() + + def __getitem__(self, i): + return self.column(i) + + @property + def names(self): + """Name of each field (list of str).""" + return [self[i].name for i in range(len(self))] + + def to_arrow_schema(self): + """ + Convert Parquet schema to effective Arrow schema. + + Returns + ------- + schema : Schema + """ + cdef shared_ptr[CSchema] sp_arrow_schema + + with nogil: + check_status(FromParquetSchema( + self.schema, default_arrow_reader_properties(), + self.parent._metadata.key_value_metadata(), + &sp_arrow_schema)) + + return pyarrow_wrap_schema(sp_arrow_schema) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def equals(self, ParquetSchema other): + """ + Return whether the two schemas are equal. + + Parameters + ---------- + other : ParquetSchema + Schema to compare against. + + Returns + ------- + are_equal : bool + """ + return self.schema.Equals(deref(other.schema)) + + def column(self, i): + """ + Return the schema for a single column. + + Parameters + ---------- + i : int + Index of column in schema. + + Returns + ------- + column_schema : ColumnSchema + """ + if i < 0 or i >= len(self): + raise IndexError('{0} out of bounds'.format(i)) + + return ColumnSchema(self, i) + + +cdef class ColumnSchema(_Weakrefable): + """Schema for a single column.""" + cdef: + int index + ParquetSchema parent + const ColumnDescriptor* descr + + def __cinit__(self, ParquetSchema schema, int index): + self.parent = schema + self.index = index # for pickling support + self.descr = schema.schema.Column(index) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __reduce__(self): + return ColumnSchema, (self.parent, self.index) + + def equals(self, ColumnSchema other): + """ + Return whether the two column schemas are equal. + + Parameters + ---------- + other : ColumnSchema + Schema to compare against. + + Returns + ------- + are_equal : bool + """ + return self.descr.Equals(deref(other.descr)) + + def __repr__(self): + physical_type = self.physical_type + converted_type = self.converted_type + if converted_type == 'DECIMAL': + converted_type = 'DECIMAL({0}, {1})'.format(self.precision, + self.scale) + elif physical_type == 'FIXED_LEN_BYTE_ARRAY': + converted_type = ('FIXED_LEN_BYTE_ARRAY(length={0})' + .format(self.length)) + + return """ + name: {0} + path: {1} + max_definition_level: {2} + max_repetition_level: {3} + physical_type: {4} + logical_type: {5} + converted_type (legacy): {6}""".format(self.name, self.path, + self.max_definition_level, + self.max_repetition_level, + physical_type, + str(self.logical_type), + converted_type) + + @property + def name(self): + """Name of field (str).""" + return frombytes(self.descr.name()) + + @property + def path(self): + """Nested path to field, separated by periods (str).""" + return frombytes(self.descr.path().get().ToDotString()) + + @property + def max_definition_level(self): + """Maximum definition level (int).""" + return self.descr.max_definition_level() + + @property + def max_repetition_level(self): + """Maximum repetition level (int).""" + return self.descr.max_repetition_level() + + @property + def physical_type(self): + """Name of physical type (str).""" + return physical_type_name_from_enum(self.descr.physical_type()) + + @property + def logical_type(self): + """Logical type of column (:class:`ParquetLogicalType`).""" + return wrap_logical_type(self.descr.logical_type()) + + @property + def converted_type(self): + """Legacy converted type (str or None).""" + return converted_type_name_from_enum(self.descr.converted_type()) + + # FIXED_LEN_BYTE_ARRAY attribute + @property + def length(self): + """Array length if fixed length byte array type, None otherwise (int or None).""" + return self.descr.type_length() + + # Decimal attributes + @property + def precision(self): + """Precision if decimal type, None otherwise (int or None).""" + return self.descr.type_precision() + + @property + def scale(self): + """Scale if decimal type, None otherwise (int or None).""" + return self.descr.type_scale() + + +cdef physical_type_name_from_enum(ParquetType type_): + return { + ParquetType_BOOLEAN: 'BOOLEAN', + ParquetType_INT32: 'INT32', + ParquetType_INT64: 'INT64', + ParquetType_INT96: 'INT96', + ParquetType_FLOAT: 'FLOAT', + ParquetType_DOUBLE: 'DOUBLE', + ParquetType_BYTE_ARRAY: 'BYTE_ARRAY', + ParquetType_FIXED_LEN_BYTE_ARRAY: 'FIXED_LEN_BYTE_ARRAY', + }.get(type_, 'UNKNOWN') + + +cdef logical_type_name_from_enum(ParquetLogicalTypeId type_): + return { + ParquetLogicalType_UNDEFINED: 'UNDEFINED', + ParquetLogicalType_STRING: 'STRING', + ParquetLogicalType_MAP: 'MAP', + ParquetLogicalType_LIST: 'LIST', + ParquetLogicalType_ENUM: 'ENUM', + ParquetLogicalType_DECIMAL: 'DECIMAL', + ParquetLogicalType_DATE: 'DATE', + ParquetLogicalType_TIME: 'TIME', + ParquetLogicalType_TIMESTAMP: 'TIMESTAMP', + ParquetLogicalType_INT: 'INT', + ParquetLogicalType_JSON: 'JSON', + ParquetLogicalType_BSON: 'BSON', + ParquetLogicalType_UUID: 'UUID', + ParquetLogicalType_NONE: 'NONE', + }.get(type_, 'UNKNOWN') + + +cdef converted_type_name_from_enum(ParquetConvertedType type_): + return { + ParquetConvertedType_NONE: 'NONE', + ParquetConvertedType_UTF8: 'UTF8', + ParquetConvertedType_MAP: 'MAP', + ParquetConvertedType_MAP_KEY_VALUE: 'MAP_KEY_VALUE', + ParquetConvertedType_LIST: 'LIST', + ParquetConvertedType_ENUM: 'ENUM', + ParquetConvertedType_DECIMAL: 'DECIMAL', + ParquetConvertedType_DATE: 'DATE', + ParquetConvertedType_TIME_MILLIS: 'TIME_MILLIS', + ParquetConvertedType_TIME_MICROS: 'TIME_MICROS', + ParquetConvertedType_TIMESTAMP_MILLIS: 'TIMESTAMP_MILLIS', + ParquetConvertedType_TIMESTAMP_MICROS: 'TIMESTAMP_MICROS', + ParquetConvertedType_UINT_8: 'UINT_8', + ParquetConvertedType_UINT_16: 'UINT_16', + ParquetConvertedType_UINT_32: 'UINT_32', + ParquetConvertedType_UINT_64: 'UINT_64', + ParquetConvertedType_INT_8: 'INT_8', + ParquetConvertedType_INT_16: 'INT_16', + ParquetConvertedType_INT_32: 'INT_32', + ParquetConvertedType_INT_64: 'INT_64', + ParquetConvertedType_JSON: 'JSON', + ParquetConvertedType_BSON: 'BSON', + ParquetConvertedType_INTERVAL: 'INTERVAL', + }.get(type_, 'UNKNOWN') + + +cdef encoding_name_from_enum(ParquetEncoding encoding_): + return { + ParquetEncoding_PLAIN: 'PLAIN', + ParquetEncoding_PLAIN_DICTIONARY: 'PLAIN_DICTIONARY', + ParquetEncoding_RLE: 'RLE', + ParquetEncoding_BIT_PACKED: 'BIT_PACKED', + ParquetEncoding_DELTA_BINARY_PACKED: 'DELTA_BINARY_PACKED', + ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY: 'DELTA_LENGTH_BYTE_ARRAY', + ParquetEncoding_DELTA_BYTE_ARRAY: 'DELTA_BYTE_ARRAY', + ParquetEncoding_RLE_DICTIONARY: 'RLE_DICTIONARY', + ParquetEncoding_BYTE_STREAM_SPLIT: 'BYTE_STREAM_SPLIT', + }.get(encoding_, 'UNKNOWN') + + +cdef encoding_enum_from_name(str encoding_name): + enc = { + 'PLAIN': ParquetEncoding_PLAIN, + 'BIT_PACKED': ParquetEncoding_BIT_PACKED, + 'RLE': ParquetEncoding_RLE, + 'BYTE_STREAM_SPLIT': ParquetEncoding_BYTE_STREAM_SPLIT, + 'DELTA_BINARY_PACKED': ParquetEncoding_DELTA_BINARY_PACKED, + 'DELTA_LENGTH_BYTE_ARRAY': ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY, + 'DELTA_BYTE_ARRAY': ParquetEncoding_DELTA_BYTE_ARRAY, + 'RLE_DICTIONARY': 'dict', + 'PLAIN_DICTIONARY': 'dict', + }.get(encoding_name, None) + if enc is None: + raise ValueError(f"Unsupported column encoding: {encoding_name!r}") + elif enc == 'dict': + raise ValueError(f"{encoding_name!r} is already used by default.") + else: + return enc + + +cdef compression_name_from_enum(ParquetCompression compression_): + return { + ParquetCompression_UNCOMPRESSED: 'UNCOMPRESSED', + ParquetCompression_SNAPPY: 'SNAPPY', + ParquetCompression_GZIP: 'GZIP', + ParquetCompression_LZO: 'LZO', + ParquetCompression_BROTLI: 'BROTLI', + ParquetCompression_LZ4: 'LZ4', + ParquetCompression_ZSTD: 'ZSTD', + }.get(compression_, 'UNKNOWN') + + +cdef int check_compression_name(name) except -1: + if name.upper() not in {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', + 'ZSTD'}: + raise ArrowException("Unsupported compression: " + name) + return 0 + + +cdef ParquetCompression compression_from_name(name): + name = name.upper() + if name == 'SNAPPY': + return ParquetCompression_SNAPPY + elif name == 'GZIP': + return ParquetCompression_GZIP + elif name == 'LZO': + return ParquetCompression_LZO + elif name == 'BROTLI': + return ParquetCompression_BROTLI + elif name == 'LZ4': + return ParquetCompression_LZ4 + elif name == 'ZSTD': + return ParquetCompression_ZSTD + else: + return ParquetCompression_UNCOMPRESSED + + +cdef class ParquetReader(_Weakrefable): + cdef: + object source + CMemoryPool* pool + UniquePtrNoGIL[FileReader] reader + FileMetaData _metadata + shared_ptr[CRandomAccessFile] rd_handle + + cdef public: + _column_idx_map + + def __cinit__(self, MemoryPool memory_pool=None): + self.pool = maybe_unbox_memory_pool(memory_pool) + self._metadata = None + + def open(self, object source not None, *, bint use_memory_map=False, + read_dictionary=None, FileMetaData metadata=None, + int buffer_size=0, bint pre_buffer=False, + coerce_int96_timestamp_unit=None, + FileDecryptionProperties decryption_properties=None, + thrift_string_size_limit=None, + thrift_container_size_limit=None, + page_checksum_verification=False): + """ + Open a parquet file for reading. + + Parameters + ---------- + source : str, pathlib.Path, pyarrow.NativeFile, or file-like object + use_memory_map : bool, default False + read_dictionary : iterable[int or str], optional + metadata : FileMetaData, optional + buffer_size : int, default 0 + pre_buffer : bool, default False + coerce_int96_timestamp_unit : str, optional + decryption_properties : FileDecryptionProperties, optional + thrift_string_size_limit : int, optional + thrift_container_size_limit : int, optional + page_checksum_verification : bool, default False + """ + cdef: + shared_ptr[CFileMetaData] c_metadata + CReaderProperties properties = default_reader_properties() + ArrowReaderProperties arrow_props = ( + default_arrow_reader_properties()) + FileReaderBuilder builder + + if metadata is not None: + c_metadata = metadata.sp_metadata + + if buffer_size > 0: + properties.enable_buffered_stream() + properties.set_buffer_size(buffer_size) + elif buffer_size == 0: + properties.disable_buffered_stream() + else: + raise ValueError('Buffer size must be larger than zero') + + if thrift_string_size_limit is not None: + if thrift_string_size_limit <= 0: + raise ValueError("thrift_string_size_limit " + "must be larger than zero") + properties.set_thrift_string_size_limit(thrift_string_size_limit) + if thrift_container_size_limit is not None: + if thrift_container_size_limit <= 0: + raise ValueError("thrift_container_size_limit " + "must be larger than zero") + properties.set_thrift_container_size_limit( + thrift_container_size_limit) + + if decryption_properties is not None: + properties.file_decryption_properties( + decryption_properties.unwrap()) + + arrow_props.set_pre_buffer(pre_buffer) + + properties.set_page_checksum_verification(page_checksum_verification) + + if coerce_int96_timestamp_unit is None: + # use the default defined in default_arrow_reader_properties() + pass + else: + arrow_props.set_coerce_int96_timestamp_unit( + string_to_timeunit(coerce_int96_timestamp_unit)) + + self.source = source + get_reader(source, use_memory_map, &self.rd_handle) + + with nogil: + check_status(builder.Open(self.rd_handle, properties, c_metadata)) + + # Set up metadata + with nogil: + c_metadata = builder.raw_reader().metadata() + self._metadata = result = FileMetaData() + result.init(c_metadata) + + if read_dictionary is not None: + self._set_read_dictionary(read_dictionary, &arrow_props) + + with nogil: + check_status(builder.memory_pool(self.pool) + .properties(arrow_props) + .Build(&self.reader)) + + cdef _set_read_dictionary(self, read_dictionary, + ArrowReaderProperties* props): + for column in read_dictionary: + if not isinstance(column, int): + column = self.column_name_idx(column) + props.set_read_dictionary(column, True) + + @property + def column_paths(self): + cdef: + FileMetaData container = self.metadata + const CFileMetaData* metadata = container._metadata + vector[c_string] path + int i = 0 + + paths = [] + for i in range(0, metadata.num_columns()): + path = (metadata.schema().Column(i) + .path().get().ToDotVector()) + paths.append([frombytes(x) for x in path]) + + return paths + + @property + def metadata(self): + return self._metadata + + @property + def schema_arrow(self): + cdef shared_ptr[CSchema] out + with nogil: + check_status(self.reader.get().GetSchema(&out)) + return pyarrow_wrap_schema(out) + + @property + def num_row_groups(self): + return self.reader.get().num_row_groups() + + def set_use_threads(self, bint use_threads): + """ + Parameters + ---------- + use_threads : bool + """ + self.reader.get().set_use_threads(use_threads) + + def set_batch_size(self, int64_t batch_size): + """ + Parameters + ---------- + batch_size : int64 + """ + self.reader.get().set_batch_size(batch_size) + + def iter_batches(self, int64_t batch_size, row_groups, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + batch_size : int64 + row_groups : list[int] + column_indices : list[int], optional + use_threads : bool, default True + + Yields + ------ + next : RecordBatch + """ + cdef: + vector[int] c_row_groups + vector[int] c_column_indices + shared_ptr[CRecordBatch] record_batch + UniquePtrNoGIL[CRecordBatchReader] recordbatchreader + + self.set_batch_size(batch_size) + + if use_threads: + self.set_use_threads(use_threads) + + for row_group in row_groups: + c_row_groups.push_back(row_group) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + with nogil: + check_status( + self.reader.get().GetRecordBatchReader( + c_row_groups, c_column_indices, &recordbatchreader + ) + ) + else: + with nogil: + check_status( + self.reader.get().GetRecordBatchReader( + c_row_groups, &recordbatchreader + ) + ) + + while True: + with nogil: + check_status( + recordbatchreader.get().ReadNext(&record_batch) + ) + if record_batch.get() == NULL: + break + + yield pyarrow_wrap_batch(record_batch) + + def read_row_group(self, int i, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + i : int + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + return self.read_row_groups([i], column_indices, use_threads) + + def read_row_groups(self, row_groups not None, column_indices=None, + bint use_threads=True): + """ + Parameters + ---------- + row_groups : list[int] + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + cdef: + shared_ptr[CTable] ctable + vector[int] c_row_groups + vector[int] c_column_indices + + self.set_use_threads(use_threads) + + for row_group in row_groups: + c_row_groups.push_back(row_group) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + with nogil: + check_status(self.reader.get() + .ReadRowGroups(c_row_groups, c_column_indices, + &ctable)) + else: + # Read all columns + with nogil: + check_status(self.reader.get() + .ReadRowGroups(c_row_groups, &ctable)) + return pyarrow_wrap_table(ctable) + + def read_all(self, column_indices=None, bint use_threads=True): + """ + Parameters + ---------- + column_indices : list[int], optional + use_threads : bool, default True + + Returns + ------- + table : pyarrow.Table + """ + cdef: + shared_ptr[CTable] ctable + vector[int] c_column_indices + + self.set_use_threads(use_threads) + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + with nogil: + check_status(self.reader.get() + .ReadTable(c_column_indices, &ctable)) + else: + # Read all columns + with nogil: + check_status(self.reader.get() + .ReadTable(&ctable)) + return pyarrow_wrap_table(ctable) + + def scan_contents(self, column_indices=None, batch_size=65536): + """ + Parameters + ---------- + column_indices : list[int], optional + batch_size : int32, default 65536 + + Returns + ------- + num_rows : int64 + """ + cdef: + vector[int] c_column_indices + int32_t c_batch_size + int64_t c_num_rows + + if column_indices is not None: + for index in column_indices: + c_column_indices.push_back(index) + + c_batch_size = batch_size + + with nogil: + check_status(self.reader.get() + .ScanContents(c_column_indices, c_batch_size, + &c_num_rows)) + + return c_num_rows + + def column_name_idx(self, column_name): + """ + Find the index of a column by its name. + + Parameters + ---------- + column_name : str + Name of the column; separation of nesting levels is done via ".". + + Returns + ------- + column_idx : int + Integer index of the column in the schema. + """ + cdef: + FileMetaData container = self.metadata + const CFileMetaData* metadata = container._metadata + int i = 0 + + if self._column_idx_map is None: + self._column_idx_map = {} + for i in range(0, metadata.num_columns()): + col_bytes = tobytes(metadata.schema().Column(i) + .path().get().ToDotString()) + self._column_idx_map[col_bytes] = i + + return self._column_idx_map[tobytes(column_name)] + + def read_column(self, int column_index): + """ + Read the column at the specified index. + + Parameters + ---------- + column_index : int + Index of the column. + + Returns + ------- + column : pyarrow.ChunkedArray + """ + cdef shared_ptr[CChunkedArray] out + with nogil: + check_status(self.reader.get() + .ReadColumn(column_index, &out)) + return pyarrow_wrap_chunked_array(out) + + def close(self): + if not self.closed: + with nogil: + check_status(self.rd_handle.get().Close()) + + @property + def closed(self): + if self.rd_handle == NULL: + return True + with nogil: + closed = self.rd_handle.get().closed() + return closed + + +cdef CSortingColumn _convert_sorting_column(SortingColumn sorting_column): + cdef CSortingColumn c_sorting_column + + c_sorting_column.column_idx = sorting_column.column_index + c_sorting_column.descending = sorting_column.descending + c_sorting_column.nulls_first = sorting_column.nulls_first + + return c_sorting_column + + +cdef vector[CSortingColumn] _convert_sorting_columns(sorting_columns) except *: + if not (isinstance(sorting_columns, Sequence) + and all(isinstance(col, SortingColumn) for col in sorting_columns)): + raise ValueError( + "'sorting_columns' must be a list of `SortingColumn`") + + cdef vector[CSortingColumn] c_sorting_columns = [_convert_sorting_column(col) + for col in sorting_columns] + + return c_sorting_columns + + +cdef shared_ptr[WriterProperties] _create_writer_properties( + use_dictionary=None, + compression=None, + version=None, + write_statistics=None, + data_page_size=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + data_page_version=None, + FileEncryptionProperties encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None) except *: + """General writer properties""" + cdef: + shared_ptr[WriterProperties] properties + WriterProperties.Builder props + + # data_page_version + + if data_page_version is not None: + if data_page_version == "1.0": + props.data_page_version(ParquetDataPageVersion_V1) + elif data_page_version == "2.0": + props.data_page_version(ParquetDataPageVersion_V2) + else: + raise ValueError("Unsupported Parquet data page version: {0}" + .format(data_page_version)) + + # version + + if version is not None: + if version == "1.0": + props.version(ParquetVersion_V1) + elif version in ("2.0", "pseudo-2.0"): + warnings.warn( + "Parquet format '2.0' pseudo version is deprecated, use " + "'2.4' or '2.6' for fine-grained feature selection", + FutureWarning, stacklevel=2) + props.version(ParquetVersion_V2_0) + elif version == "2.4": + props.version(ParquetVersion_V2_4) + elif version == "2.6": + props.version(ParquetVersion_V2_6) + else: + raise ValueError("Unsupported Parquet format version: {0}" + .format(version)) + + # compression + + if isinstance(compression, basestring): + check_compression_name(compression) + props.compression(compression_from_name(compression)) + elif compression is not None: + for column, codec in compression.iteritems(): + check_compression_name(codec) + props.compression(tobytes(column), compression_from_name(codec)) + + if isinstance(compression_level, int): + props.compression_level(compression_level) + elif compression_level is not None: + for column, level in compression_level.iteritems(): + props.compression_level(tobytes(column), level) + + # use_dictionary + + if isinstance(use_dictionary, bool): + if use_dictionary: + props.enable_dictionary() + if column_encoding is not None: + raise ValueError( + "To use 'column_encoding' set 'use_dictionary' to False") + else: + props.disable_dictionary() + elif use_dictionary is not None: + # Deactivate dictionary encoding by default + props.disable_dictionary() + for column in use_dictionary: + props.enable_dictionary(tobytes(column)) + if (column_encoding is not None and + column_encoding.get(column) is not None): + raise ValueError( + "To use 'column_encoding' set 'use_dictionary' to False") + + # write_statistics + + if isinstance(write_statistics, bool): + if write_statistics: + props.enable_statistics() + else: + props.disable_statistics() + elif write_statistics is not None: + # Deactivate statistics by default and enable for specified columns + props.disable_statistics() + for column in write_statistics: + props.enable_statistics(tobytes(column)) + + # sorting_columns + + if sorting_columns is not None: + props.set_sorting_columns(_convert_sorting_columns(sorting_columns)) + + # use_byte_stream_split + + if isinstance(use_byte_stream_split, bool): + if use_byte_stream_split: + if column_encoding is not None: + raise ValueError( + "'use_byte_stream_split' cannot be passed" + "together with 'column_encoding'") + else: + props.encoding(ParquetEncoding_BYTE_STREAM_SPLIT) + elif use_byte_stream_split is not None: + for column in use_byte_stream_split: + if column_encoding is None: + column_encoding = {column: 'BYTE_STREAM_SPLIT'} + elif column_encoding.get(column, None) is None: + column_encoding[column] = 'BYTE_STREAM_SPLIT' + else: + raise ValueError( + "'use_byte_stream_split' cannot be passed" + "together with 'column_encoding'") + + # column_encoding + # encoding map - encode individual columns + + if column_encoding is not None: + if isinstance(column_encoding, dict): + for column, _encoding in column_encoding.items(): + props.encoding(tobytes(column), + encoding_enum_from_name(_encoding)) + elif isinstance(column_encoding, str): + props.encoding(encoding_enum_from_name(column_encoding)) + else: + raise TypeError( + "'column_encoding' should be a dictionary or a string") + + if data_page_size is not None: + props.data_pagesize(data_page_size) + + if write_batch_size is not None: + props.write_batch_size(write_batch_size) + + if dictionary_pagesize_limit is not None: + props.dictionary_pagesize_limit(dictionary_pagesize_limit) + + # encryption + + if encryption_properties is not None: + props.encryption( + (encryption_properties).unwrap()) + + # For backwards compatibility reasons we cap the maximum row group size + # at 64Mi rows. This could be changed in the future, though it would be + # a breaking change. + # + # The user can always specify a smaller row group size (and the default + # is smaller) when calling write_table. If the call to write_table uses + # a size larger than this then it will be latched to this value. + props.max_row_group_length(_MAX_ROW_GROUP_SIZE) + + # checksum + + if write_page_checksum: + props.enable_page_checksum() + else: + props.disable_page_checksum() + + # page index + + if write_page_index: + props.enable_write_page_index() + else: + props.disable_write_page_index() + + properties = props.build() + + return properties + + +cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties( + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + writer_engine_version=None, + use_compliant_nested_type=True, + store_schema=True) except *: + """Arrow writer properties""" + cdef: + shared_ptr[ArrowWriterProperties] arrow_properties + ArrowWriterProperties.Builder arrow_props + + # Store the original Arrow schema so things like dictionary types can + # be automatically reconstructed + if store_schema: + arrow_props.store_schema() + + # int96 support + + if use_deprecated_int96_timestamps: + arrow_props.enable_deprecated_int96_timestamps() + else: + arrow_props.disable_deprecated_int96_timestamps() + + # coerce_timestamps + + if coerce_timestamps == 'ms': + arrow_props.coerce_timestamps(TimeUnit_MILLI) + elif coerce_timestamps == 'us': + arrow_props.coerce_timestamps(TimeUnit_MICRO) + elif coerce_timestamps is not None: + raise ValueError('Invalid value for coerce_timestamps: {0}' + .format(coerce_timestamps)) + + # allow_truncated_timestamps + + if allow_truncated_timestamps: + arrow_props.allow_truncated_timestamps() + else: + arrow_props.disallow_truncated_timestamps() + + # use_compliant_nested_type + + if use_compliant_nested_type: + arrow_props.enable_compliant_nested_types() + else: + arrow_props.disable_compliant_nested_types() + + # writer_engine_version + + if writer_engine_version == "V1": + warnings.warn("V1 parquet writer engine is a no-op. Use V2.") + arrow_props.set_engine_version(ArrowWriterEngineVersion.V1) + elif writer_engine_version != "V2": + raise ValueError("Unsupported Writer Engine Version: {0}" + .format(writer_engine_version)) + + arrow_properties = arrow_props.build() + + return arrow_properties + +cdef _name_to_index_map(Schema arrow_schema): + cdef: + shared_ptr[CSchema] sp_arrow_schema + shared_ptr[SchemaDescriptor] sp_parquet_schema + shared_ptr[WriterProperties] props = _create_writer_properties() + shared_ptr[ArrowWriterProperties] arrow_props = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + writer_engine_version="V2" + ) + + sp_arrow_schema = pyarrow_unwrap_schema(arrow_schema) + + with nogil: + check_status(ToParquetSchema( + sp_arrow_schema.get(), deref(props.get()), deref(arrow_props.get()), &sp_parquet_schema)) + + out = dict() + + cdef SchemaDescriptor* parquet_schema = sp_parquet_schema.get() + + for i in range(parquet_schema.num_columns()): + name = frombytes(parquet_schema.Column(i).path().get().ToDotString()) + out[name] = i + + return out + + +cdef class ParquetWriter(_Weakrefable): + cdef: + unique_ptr[FileWriter] writer + shared_ptr[COutputStream] sink + bint own_sink + + cdef readonly: + object use_dictionary + object use_deprecated_int96_timestamps + object use_byte_stream_split + object column_encoding + object coerce_timestamps + object allow_truncated_timestamps + object compression + object compression_level + object data_page_version + object use_compliant_nested_type + object version + object write_statistics + object writer_engine_version + int row_group_size + int64_t data_page_size + FileEncryptionProperties encryption_properties + int64_t write_batch_size + int64_t dictionary_pagesize_limit + object store_schema + + def __cinit__(self, where, Schema schema not None, use_dictionary=None, + compression=None, version=None, + write_statistics=None, + MemoryPool memory_pool=None, + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + data_page_size=None, + allow_truncated_timestamps=False, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + writer_engine_version=None, + data_page_version=None, + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + store_schema=True, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None): + cdef: + shared_ptr[WriterProperties] properties + shared_ptr[ArrowWriterProperties] arrow_properties + c_string c_where + CMemoryPool* pool + + try: + where = _stringify_path(where) + except TypeError: + get_writer(where, &self.sink) + self.own_sink = False + else: + c_where = tobytes(where) + with nogil: + self.sink = GetResultValue(FileOutputStream.Open(c_where)) + self.own_sink = True + + properties = _create_writer_properties( + use_dictionary=use_dictionary, + compression=compression, + version=version, + write_statistics=write_statistics, + data_page_size=data_page_size, + compression_level=compression_level, + use_byte_stream_split=use_byte_stream_split, + column_encoding=column_encoding, + data_page_version=data_page_version, + encryption_properties=encryption_properties, + write_batch_size=write_batch_size, + dictionary_pagesize_limit=dictionary_pagesize_limit, + write_page_index=write_page_index, + write_page_checksum=write_page_checksum, + sorting_columns=sorting_columns, + ) + arrow_properties = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=use_deprecated_int96_timestamps, + coerce_timestamps=coerce_timestamps, + allow_truncated_timestamps=allow_truncated_timestamps, + writer_engine_version=writer_engine_version, + use_compliant_nested_type=use_compliant_nested_type, + store_schema=store_schema, + ) + + pool = maybe_unbox_memory_pool(memory_pool) + with nogil: + self.writer = move(GetResultValue( + FileWriter.Open(deref(schema.schema), pool, + self.sink, properties, arrow_properties))) + + def close(self): + with nogil: + check_status(self.writer.get().Close()) + if self.own_sink: + check_status(self.sink.get().Close()) + + def write_table(self, Table table, row_group_size=None): + cdef: + CTable* ctable = table.table + int64_t c_row_group_size + + if row_group_size is None or row_group_size == -1: + c_row_group_size = min(ctable.num_rows(), _DEFAULT_ROW_GROUP_SIZE) + elif row_group_size == 0: + raise ValueError('Row group size cannot be 0') + else: + c_row_group_size = row_group_size + + with nogil: + check_status(self.writer.get() + .WriteTable(deref(ctable), c_row_group_size)) + + @property + def metadata(self): + cdef: + shared_ptr[CFileMetaData] metadata + FileMetaData result + with nogil: + metadata = self.writer.get().metadata() + if metadata: + result = FileMetaData() + result.init(metadata) + return result + raise RuntimeError( + 'file metadata is only available after writer close') diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..13b8c748cb8caa2fe61cdc849687c4746a6b2304 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.pyx @@ -0,0 +1,458 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + +from pyarrow.lib cimport (check_status, pyarrow_wrap_metadata, + pyarrow_unwrap_metadata) +from pyarrow.lib import frombytes, tobytes, KeyValueMetadata +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem + + +cpdef enum S3LogLevel: + Off = CS3LogLevel_Off + Fatal = CS3LogLevel_Fatal + Error = CS3LogLevel_Error + Warn = CS3LogLevel_Warn + Info = CS3LogLevel_Info + Debug = CS3LogLevel_Debug + Trace = CS3LogLevel_Trace + + +def initialize_s3(S3LogLevel log_level=S3LogLevel.Fatal, int num_event_loop_threads=1): + """ + Initialize S3 support + + Parameters + ---------- + log_level : S3LogLevel + level of logging + num_event_loop_threads : int, default 1 + how many threads to use for the AWS SDK's I/O event loop + + Examples + -------- + >>> fs.initialize_s3(fs.S3LogLevel.Error) # doctest: +SKIP + """ + cdef CS3GlobalOptions options + options.log_level = log_level + options.num_event_loop_threads = num_event_loop_threads + check_status(CInitializeS3(options)) + + +def ensure_s3_initialized(): + """ + Initialize S3 (with default options) if not already initialized + """ + check_status(CEnsureS3Initialized()) + + +def finalize_s3(): + check_status(CFinalizeS3()) + + +def ensure_s3_finalized(): + """ + Finalize S3 if already initialized + """ + check_status(CEnsureS3Finalized()) + + +def resolve_s3_region(bucket): + """ + Resolve the S3 region of a bucket. + + Parameters + ---------- + bucket : str + A S3 bucket name + + Returns + ------- + region : str + A S3 region name + + Examples + -------- + >>> fs.resolve_s3_region('voltrondata-labs-datasets') + 'us-east-2' + """ + cdef: + c_string c_bucket + c_string c_region + + ensure_s3_initialized() + + c_bucket = tobytes(bucket) + with nogil: + c_region = GetResultValue(ResolveS3BucketRegion(c_bucket)) + + return frombytes(c_region) + + +class S3RetryStrategy: + """ + Base class for AWS retry strategies for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + + def __init__(self, max_attempts=3): + self.max_attempts = max_attempts + + +class AwsStandardS3RetryStrategy(S3RetryStrategy): + """ + Represents an AWS Standard retry strategy for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + pass + + +class AwsDefaultS3RetryStrategy(S3RetryStrategy): + """ + Represents an AWS Default retry strategy for use with S3. + + Parameters + ---------- + max_attempts : int, default 3 + The maximum number of retry attempts to attempt before failing. + """ + pass + + +cdef class S3FileSystem(FileSystem): + """ + S3-backed FileSystem implementation + + AWS access_key and secret_key can be provided explicitly. + + If role_arn is provided instead of access_key and secret_key, temporary + credentials will be fetched by issuing a request to STS to assume the + specified role. + + If neither access_key nor secret_key are provided, and role_arn is also not + provided, then attempts to establish the credentials automatically. + S3FileSystem will try the following methods, in order: + + * ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` environment variables + * configuration files such as ``~/.aws/credentials`` and ``~/.aws/config`` + * for nodes on Amazon EC2, the EC2 Instance Metadata Service + + Note: S3 buckets are special and the operations available on them may be + limited or more expensive than desired. + + When S3FileSystem creates new buckets (assuming allow_bucket_creation is + True), it does not pass any non-default settings. In AWS S3, the bucket and + all objects will be not publicly visible, and will have no bucket policies + and no resource tags. To have more control over how buckets are created, + use a different API to create them. + + Parameters + ---------- + access_key : str, default None + AWS Access Key ID. Pass None to use the standard AWS environment + variables and/or configuration file. + secret_key : str, default None + AWS Secret Access key. Pass None to use the standard AWS environment + variables and/or configuration file. + session_token : str, default None + AWS Session Token. An optional session token, required if access_key + and secret_key are temporary credentials from STS. + anonymous : boolean, default False + Whether to connect anonymously if access_key and secret_key are None. + If true, will not attempt to look up credentials using standard AWS + configuration methods. + role_arn : str, default None + AWS Role ARN. If provided instead of access_key and secret_key, + temporary credentials will be fetched by assuming this role. + session_name : str, default None + An optional identifier for the assumed role session. + external_id : str, default None + An optional unique identifier that might be required when you assume + a role in another account. + load_frequency : int, default 900 + The frequency (in seconds) with which temporary credentials from an + assumed role session will be refreshed. + region : str, default None + AWS region to connect to. If not set, the AWS SDK will attempt to + determine the region using heuristics such as environment variables, + configuration profile, EC2 metadata, or default to 'us-east-1' when SDK + version <1.8. One can also use :func:`pyarrow.fs.resolve_s3_region` to + automatically resolve the region from a bucket name. + request_timeout : double, default None + Socket read timeouts on Windows and macOS, in seconds. + If omitted, the AWS SDK default value is used (typically 3 seconds). + This option is ignored on non-Windows, non-macOS systems. + connect_timeout : double, default None + Socket connection timeout, in seconds. + If omitted, the AWS SDK default value is used (typically 1 second). + scheme : str, default 'https' + S3 connection transport scheme. + endpoint_override : str, default None + Override region with a connect string such as "localhost:9000" + background_writes : boolean, default True + Whether file writes will be issued in the background, without + blocking. + default_metadata : mapping or pyarrow.KeyValueMetadata, default None + Default metadata for open_output_stream. This will be ignored if + non-empty metadata is passed to open_output_stream. + proxy_options : dict or str, default None + If a proxy is used, provide the options here. Supported options are: + 'scheme' (str: 'http' or 'https'; required), 'host' (str; required), + 'port' (int; required), 'username' (str; optional), + 'password' (str; optional). + A proxy URI (str) can also be provided, in which case these options + will be derived from the provided URI. + The following are equivalent:: + + S3FileSystem(proxy_options='http://username:password@localhost:8020') + S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost', + 'port': 8020, 'username': 'username', + 'password': 'password'}) + allow_bucket_creation : bool, default False + Whether to allow CreateDir at the bucket-level. This option may also be + passed in a URI query parameter. + allow_bucket_deletion : bool, default False + Whether to allow DeleteDir at the bucket-level. This option may also be + passed in a URI query parameter. + retry_strategy : S3RetryStrategy, default AwsStandardS3RetryStrategy(max_attempts=3) + The retry strategy to use with S3; fail after max_attempts. Available + strategies are AwsStandardS3RetryStrategy, AwsDefaultS3RetryStrategy. + + Examples + -------- + >>> from pyarrow import fs + >>> s3 = fs.S3FileSystem(region='us-west-2') + >>> s3.get_file_info(fs.FileSelector( + ... 'power-analysis-ready-datastore/power_901_constants.zarr/FROCEAN', recursive=True + ... )) + [ wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.s3fs = wrapped.get() + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return S3FileSystem(**kwargs) + + def __reduce__(self): + cdef CS3Options opts = self.s3fs.options() + + # if creds were explicitly provided, then use them + # else obtain them as they were last time. + if opts.credentials_kind == CS3CredentialsKind_Explicit: + access_key = frombytes(opts.GetAccessKey()) + secret_key = frombytes(opts.GetSecretKey()) + session_token = frombytes(opts.GetSessionToken()) + else: + access_key = None + secret_key = None + session_token = None + + return ( + S3FileSystem._reconstruct, (dict( + access_key=access_key, + secret_key=secret_key, + session_token=session_token, + anonymous=(opts.credentials_kind == + CS3CredentialsKind_Anonymous), + region=frombytes(opts.region), + scheme=frombytes(opts.scheme), + connect_timeout=opts.connect_timeout, + request_timeout=opts.request_timeout, + endpoint_override=frombytes(opts.endpoint_override), + role_arn=frombytes(opts.role_arn), + session_name=frombytes(opts.session_name), + external_id=frombytes(opts.external_id), + load_frequency=opts.load_frequency, + background_writes=opts.background_writes, + allow_bucket_creation=opts.allow_bucket_creation, + allow_bucket_deletion=opts.allow_bucket_deletion, + default_metadata=pyarrow_wrap_metadata(opts.default_metadata), + proxy_options={'scheme': frombytes(opts.proxy_options.scheme), + 'host': frombytes(opts.proxy_options.host), + 'port': opts.proxy_options.port, + 'username': frombytes( + opts.proxy_options.username), + 'password': frombytes( + opts.proxy_options.password)}, + ),) + ) + + @property + def region(self): + """ + The AWS region this filesystem connects to. + """ + return frombytes(self.s3fs.region()) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..aa033905212c2e79d8f513fd97b77ddc380471f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/flight.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/flight.py new file mode 100644 index 0000000000000000000000000000000000000000..b1836907c6744161c86f32e873316923c60b4226 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/flight.py @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +try: + from pyarrow._flight import ( # noqa:F401 + connect, + Action, + ActionType, + BasicAuth, + CallInfo, + CertKeyPair, + ClientAuthHandler, + ClientMiddleware, + ClientMiddlewareFactory, + DescriptorType, + FlightCallOptions, + FlightCancelledError, + FlightClient, + FlightDataStream, + FlightDescriptor, + FlightEndpoint, + FlightError, + FlightInfo, + FlightInternalError, + FlightMetadataReader, + FlightMetadataWriter, + FlightMethod, + FlightServerBase, + FlightServerError, + FlightStreamChunk, + FlightStreamReader, + FlightStreamWriter, + FlightTimedOutError, + FlightUnauthenticatedError, + FlightUnauthorizedError, + FlightUnavailableError, + FlightWriteSizeExceededError, + GeneratorStream, + Location, + MetadataRecordBatchReader, + MetadataRecordBatchWriter, + RecordBatchStream, + Result, + SchemaResult, + ServerAuthHandler, + ServerCallContext, + ServerMiddleware, + ServerMiddlewareFactory, + Ticket, + TracingServerMiddlewareFactory, + ) +except ImportError as exc: + raise ImportError( + f"The pyarrow installation is not built with support for 'flight' ({str(exc)})" + ) from None diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/api.h new file mode 100644 index 0000000000000000000000000000000000000000..ac568a00eedc32984758f4675b58ac626c9c947a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/api.h @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Coarse public API while the library is in development + +#pragma once + +#include "arrow/array.h" // IWYU pragma: export +#include "arrow/array/array_run_end.h" // IWYU pragma: export +#include "arrow/array/concatenate.h" // IWYU pragma: export +#include "arrow/buffer.h" // IWYU pragma: export +#include "arrow/builder.h" // IWYU pragma: export +#include "arrow/chunked_array.h" // IWYU pragma: export +#include "arrow/compare.h" // IWYU pragma: export +#include "arrow/config.h" // IWYU pragma: export +#include "arrow/datum.h" // IWYU pragma: export +#include "arrow/extension_type.h" // IWYU pragma: export +#include "arrow/memory_pool.h" // IWYU pragma: export +#include "arrow/pretty_print.h" // IWYU pragma: export +#include "arrow/record_batch.h" // IWYU pragma: export +#include "arrow/result.h" // IWYU pragma: export +#include "arrow/status.h" // IWYU pragma: export +#include "arrow/table.h" // IWYU pragma: export +#include "arrow/table_builder.h" // IWYU pragma: export +#include "arrow/tensor.h" // IWYU pragma: export +#include "arrow/type.h" // IWYU pragma: export +#include "arrow/util/key_value_metadata.h" // IWYU pragma: export +#include "arrow/visit_array_inline.h" // IWYU pragma: export +#include "arrow/visit_scalar_inline.h" // IWYU pragma: export +#include "arrow/visitor.h" // IWYU pragma: export + +/// \brief Top-level namespace for Apache Arrow C++ API +namespace arrow {} diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..52fd94ec1f7d4031aa18257da5c9a88e72441c5f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h @@ -0,0 +1,587 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/device.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/span.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Buffer classes + +/// \class Buffer +/// \brief Object containing a pointer to a piece of contiguous memory with a +/// particular size. +/// +/// Buffers have two related notions of length: size and capacity. Size is +/// the number of bytes that might have valid data. Capacity is the number +/// of bytes that were allocated for the buffer in total. +/// +/// The Buffer base class does not own its memory, but subclasses often do. +/// +/// The following invariant is always true: Size <= Capacity +class ARROW_EXPORT Buffer { + public: + ARROW_DISALLOW_COPY_AND_ASSIGN(Buffer); + + /// \brief Construct from buffer and size without copying memory + /// + /// \param[in] data a memory buffer + /// \param[in] size buffer size + /// + /// \note The passed memory must be kept alive through some other means + Buffer(const uint8_t* data, int64_t size) + : is_mutable_(false), + is_cpu_(true), + data_(data), + size_(size), + capacity_(size), + device_type_(DeviceAllocationType::kCPU) { + SetMemoryManager(default_cpu_memory_manager()); + } + + Buffer(const uint8_t* data, int64_t size, std::shared_ptr mm, + std::shared_ptr parent = NULLPTR, + std::optional device_type_override = std::nullopt) + : is_mutable_(false), + data_(data), + size_(size), + capacity_(size), + parent_(std::move(parent)) { + // SetMemoryManager will also set device_type_ + SetMemoryManager(std::move(mm)); + // If a device type is specified, use that instead. Example of when this can be + // useful: the CudaMemoryManager can set device_type_ to kCUDA, but you can specify + // device_type_override=kCUDA_HOST as the device type to override it. + if (device_type_override != std::nullopt) { + device_type_ = *device_type_override; + } + } + + Buffer(uintptr_t address, int64_t size, std::shared_ptr mm, + std::shared_ptr parent = NULLPTR) + : Buffer(reinterpret_cast(address), size, std::move(mm), + std::move(parent)) {} + + /// \brief Construct from string_view without copying memory + /// + /// \param[in] data a string_view object + /// + /// \note The memory viewed by data must not be deallocated in the lifetime of the + /// Buffer; temporary rvalue strings must be stored in an lvalue somewhere + explicit Buffer(std::string_view data) + : Buffer(reinterpret_cast(data.data()), + static_cast(data.size())) {} + + virtual ~Buffer() = default; + + /// An offset into data that is owned by another buffer, but we want to be + /// able to retain a valid pointer to it even after other shared_ptr's to the + /// parent buffer have been destroyed + /// + /// This method makes no assertions about alignment or padding of the buffer but + /// in general we expected buffers to be aligned and padded to 64 bytes. In the future + /// we might add utility methods to help determine if a buffer satisfies this contract. + Buffer(const std::shared_ptr& parent, const int64_t offset, const int64_t size) + : Buffer(parent->data_ + offset, size) { + parent_ = parent; + SetMemoryManager(parent->memory_manager_); + } + + uint8_t operator[](std::size_t i) const { return data_[i]; } + + /// \brief Construct a new std::string with a hexadecimal representation of the buffer. + /// \return std::string + std::string ToHexString(); + + /// Return true if both buffers are the same size and contain the same bytes + /// up to the number of compared bytes + bool Equals(const Buffer& other, int64_t nbytes) const; + + /// Return true if both buffers are the same size and contain the same bytes + bool Equals(const Buffer& other) const; + + /// Copy a section of the buffer into a new Buffer. + Result> CopySlice( + const int64_t start, const int64_t nbytes, + MemoryPool* pool = default_memory_pool()) const; + + /// Zero bytes in padding, i.e. bytes between size_ and capacity_. + void ZeroPadding() { +#ifndef NDEBUG + CheckMutable(); +#endif + // A zero-capacity buffer can have a null data pointer + if (capacity_ != 0) { + memset(mutable_data() + size_, 0, static_cast(capacity_ - size_)); + } + } + + /// \brief Construct an immutable buffer that takes ownership of the contents + /// of an std::string (without copying it). + /// + /// \param[in] data a string to own + /// \return a new Buffer instance + static std::shared_ptr FromString(std::string data); + + /// \brief Construct an immutable buffer that takes ownership of the contents + /// of an std::vector (without copying it). Only vectors of TrivialType objects + /// (integers, floating point numbers, ...) can be wrapped by this function. + /// + /// \param[in] vec a vector to own + /// \return a new Buffer instance + template + static std::shared_ptr FromVector(std::vector vec) { + static_assert(std::is_trivial_v, + "Buffer::FromVector can only wrap vectors of trivial objects"); + + if (vec.empty()) { + return std::shared_ptr{new Buffer()}; + } + + auto* data = reinterpret_cast(vec.data()); + auto size_in_bytes = static_cast(vec.size() * sizeof(T)); + return std::shared_ptr{ + new Buffer{data, size_in_bytes}, + // Keep the vector's buffer alive inside the shared_ptr's destructor until after + // we have deleted the Buffer. Note we can't use this trick in FromString since + // std::string's data is inline for short strings so moving invalidates pointers + // into the string's buffer. + [vec = std::move(vec)](Buffer* buffer) { delete buffer; }}; + } + + /// \brief Create buffer referencing typed memory with some length without + /// copying + /// \param[in] data the typed memory as C array + /// \param[in] length the number of values in the array + /// \return a new shared_ptr + template + static std::shared_ptr Wrap(const T* data, SizeType length) { + return std::make_shared(reinterpret_cast(data), + static_cast(sizeof(T) * length)); + } + + /// \brief Create buffer referencing std::vector with some length without + /// copying + /// \param[in] data the vector to be referenced. If this vector is changed, + /// the buffer may become invalid + /// \return a new shared_ptr + template + static std::shared_ptr Wrap(const std::vector& data) { + return std::make_shared(reinterpret_cast(data.data()), + static_cast(sizeof(T) * data.size())); + } + + /// \brief Copy buffer contents into a new std::string + /// \return std::string + /// \note Can throw std::bad_alloc if buffer is large + std::string ToString() const; + + /// \brief View buffer contents as a std::string_view + /// \return std::string_view + explicit operator std::string_view() const { + return {reinterpret_cast(data_), static_cast(size_)}; + } + + /// \brief Return a pointer to the buffer's data + /// + /// The buffer has to be a CPU buffer (`is_cpu()` is true). + /// Otherwise, an assertion may be thrown or a null pointer may be returned. + /// + /// To get the buffer's data address regardless of its device, call `address()`. + const uint8_t* data() const { +#ifndef NDEBUG + CheckCPU(); +#endif + return ARROW_PREDICT_TRUE(is_cpu_) ? data_ : NULLPTR; + } + + /// \brief Return a pointer to the buffer's data cast to a specific type + /// + /// The buffer has to be a CPU buffer (`is_cpu()` is true). + /// Otherwise, an assertion may be thrown or a null pointer may be returned. + template + const T* data_as() const { + return reinterpret_cast(data()); + } + + /// \brief Return the buffer's data as a span + template + util::span span_as() const { + return util::span(data_as(), static_cast(size() / sizeof(T))); + } + + /// \brief Return a writable pointer to the buffer's data + /// + /// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()` + /// are true). Otherwise, an assertion may be thrown or a null pointer may + /// be returned. + /// + /// To get the buffer's mutable data address regardless of its device, call + /// `mutable_address()`. + uint8_t* mutable_data() { +#ifndef NDEBUG + CheckCPU(); + CheckMutable(); +#endif + return ARROW_PREDICT_TRUE(is_cpu_ && is_mutable_) ? const_cast(data_) + : NULLPTR; + } + + /// \brief Return a writable pointer to the buffer's data cast to a specific type + /// + /// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()` + /// are true). Otherwise, an assertion may be thrown or a null pointer may + /// be returned. + template + T* mutable_data_as() { + return reinterpret_cast(mutable_data()); + } + + /// \brief Return the buffer's mutable data as a span + template + util::span mutable_span_as() const { + return util::span(mutable_data_as(), static_cast(size() / sizeof(T))); + } + + /// \brief Return the device address of the buffer's data + uintptr_t address() const { return reinterpret_cast(data_); } + + /// \brief Return a writable device address to the buffer's data + /// + /// The buffer has to be a mutable buffer (`is_mutable()` is true). + /// Otherwise, an assertion may be thrown or 0 may be returned. + uintptr_t mutable_address() const { +#ifndef NDEBUG + CheckMutable(); +#endif + return ARROW_PREDICT_TRUE(is_mutable_) ? reinterpret_cast(data_) : 0; + } + + /// \brief Return the buffer's size in bytes + int64_t size() const { return size_; } + + /// \brief Return the buffer's capacity (number of allocated bytes) + int64_t capacity() const { return capacity_; } + + /// \brief Whether the buffer is directly CPU-accessible + /// + /// If this function returns true, you can read directly from the buffer's + /// `data()` pointer. Otherwise, you'll have to `View()` or `Copy()` it. + bool is_cpu() const { return is_cpu_; } + + /// \brief Whether the buffer is mutable + /// + /// If this function returns true, you are allowed to modify buffer contents + /// using the pointer returned by `mutable_data()` or `mutable_address()`. + bool is_mutable() const { return is_mutable_; } + + const std::shared_ptr& device() const { return memory_manager_->device(); } + + const std::shared_ptr& memory_manager() const { return memory_manager_; } + + DeviceAllocationType device_type() const { return device_type_; } + + std::shared_ptr parent() const { return parent_; } + + /// \brief Get a RandomAccessFile for reading a buffer + /// + /// The returned file object reads from this buffer's underlying memory. + static Result> GetReader(std::shared_ptr); + + /// \brief Get a OutputStream for writing to a buffer + /// + /// The buffer must be mutable. The returned stream object writes into the buffer's + /// underlying memory (but it won't resize it). + static Result> GetWriter(std::shared_ptr); + + /// \brief Copy buffer + /// + /// The buffer contents will be copied into a new buffer allocated by the + /// given MemoryManager. This function supports cross-device copies. + static Result> Copy(std::shared_ptr source, + const std::shared_ptr& to); + + /// \brief Copy a non-owned buffer + /// + /// This is useful for cases where the source memory area is externally managed + /// (its lifetime not tied to the source Buffer), otherwise please use Copy(). + static Result> CopyNonOwned( + const Buffer& source, const std::shared_ptr& to); + + /// \brief View buffer + /// + /// Return a Buffer that reflects this buffer, seen potentially from another + /// device, without making an explicit copy of the contents. The underlying + /// mechanism is typically implemented by the kernel or device driver, and may + /// involve lazy caching of parts of the buffer contents on the destination + /// device's memory. + /// + /// If a non-copy view is unsupported for the buffer on the given device, + /// nullptr is returned. An error can be returned if some low-level + /// operation fails (such as an out-of-memory condition). + static Result> View(std::shared_ptr source, + const std::shared_ptr& to); + + /// \brief View or copy buffer + /// + /// Try to view buffer contents on the given MemoryManager's device, but + /// fall back to copying if a no-copy view isn't supported. + static Result> ViewOrCopy( + std::shared_ptr source, const std::shared_ptr& to); + + virtual std::shared_ptr device_sync_event() { return NULLPTR; } + + protected: + bool is_mutable_; + bool is_cpu_; + const uint8_t* data_; + int64_t size_; + int64_t capacity_; + DeviceAllocationType device_type_; + + // null by default, but may be set + std::shared_ptr parent_; + + private: + // private so that subclasses are forced to call SetMemoryManager() + std::shared_ptr memory_manager_; + + protected: + Buffer(); + + void CheckMutable() const; + void CheckCPU() const; + + void SetMemoryManager(std::shared_ptr mm) { + memory_manager_ = std::move(mm); + is_cpu_ = memory_manager_->is_cpu(); + device_type_ = memory_manager_->device()->device_type(); + } +}; + +/// \defgroup buffer-slicing-functions Functions for slicing buffers +/// +/// @{ + +/// \brief Construct a view on a buffer at the given offset and length. +/// +/// This function cannot fail and does not check for errors (except in debug builds) +static inline std::shared_ptr SliceBuffer(const std::shared_ptr& buffer, + const int64_t offset, + const int64_t length) { + return std::make_shared(buffer, offset, length); +} + +/// \brief Construct a view on a buffer at the given offset, up to the buffer's end. +/// +/// This function cannot fail and does not check for errors (except in debug builds) +static inline std::shared_ptr SliceBuffer(const std::shared_ptr& buffer, + const int64_t offset) { + int64_t length = buffer->size() - offset; + return SliceBuffer(buffer, offset, length); +} + +/// \brief Input-checking version of SliceBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +ARROW_EXPORT +Result> SliceBufferSafe(const std::shared_ptr& buffer, + int64_t offset); +/// \brief Input-checking version of SliceBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size. +ARROW_EXPORT +Result> SliceBufferSafe(const std::shared_ptr& buffer, + int64_t offset, int64_t length); + +/// \brief Like SliceBuffer, but construct a mutable buffer slice. +/// +/// If the parent buffer is not mutable, behavior is undefined (it may abort +/// in debug builds). +ARROW_EXPORT +std::shared_ptr SliceMutableBuffer(const std::shared_ptr& buffer, + const int64_t offset, const int64_t length); + +/// \brief Like SliceBuffer, but construct a mutable buffer slice. +/// +/// If the parent buffer is not mutable, behavior is undefined (it may abort +/// in debug builds). +static inline std::shared_ptr SliceMutableBuffer( + const std::shared_ptr& buffer, const int64_t offset) { + int64_t length = buffer->size() - offset; + return SliceMutableBuffer(buffer, offset, length); +} + +/// \brief Input-checking version of SliceMutableBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +ARROW_EXPORT +Result> SliceMutableBufferSafe( + const std::shared_ptr& buffer, int64_t offset); +/// \brief Input-checking version of SliceMutableBuffer +/// +/// An Invalid Status is returned if the requested slice falls out of bounds. +/// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size. +ARROW_EXPORT +Result> SliceMutableBufferSafe( + const std::shared_ptr& buffer, int64_t offset, int64_t length); + +/// @} + +/// \class MutableBuffer +/// \brief A Buffer whose contents can be mutated. May or may not own its data. +class ARROW_EXPORT MutableBuffer : public Buffer { + public: + MutableBuffer(uint8_t* data, const int64_t size) : Buffer(data, size) { + is_mutable_ = true; + } + + MutableBuffer(uint8_t* data, const int64_t size, std::shared_ptr mm) + : Buffer(data, size, std::move(mm)) { + is_mutable_ = true; + } + + MutableBuffer(const std::shared_ptr& parent, const int64_t offset, + const int64_t size); + + /// \brief Create buffer referencing typed memory with some length + /// \param[in] data the typed memory as C array + /// \param[in] length the number of values in the array + /// \return a new shared_ptr + template + static std::shared_ptr Wrap(T* data, SizeType length) { + return std::make_shared(reinterpret_cast(data), + static_cast(sizeof(T) * length)); + } + + protected: + MutableBuffer() : Buffer(NULLPTR, 0) {} +}; + +/// \class ResizableBuffer +/// \brief A mutable buffer that can be resized +class ARROW_EXPORT ResizableBuffer : public MutableBuffer { + public: + /// Change buffer reported size to indicated size, allocating memory if + /// necessary. This will ensure that the capacity of the buffer is a multiple + /// of 64 bytes as defined in Layout.md. + /// Consider using ZeroPadding afterwards, to conform to the Arrow layout + /// specification. + /// + /// @param new_size The new size for the buffer. + /// @param shrink_to_fit Whether to shrink the capacity if new size < current size + virtual Status Resize(const int64_t new_size, bool shrink_to_fit) = 0; + Status Resize(const int64_t new_size) { + return Resize(new_size, /*shrink_to_fit=*/true); + } + + /// Ensure that buffer has enough memory allocated to fit the indicated + /// capacity (and meets the 64 byte padding requirement in Layout.md). + /// It does not change buffer's reported size and doesn't zero the padding. + virtual Status Reserve(const int64_t new_capacity) = 0; + + template + Status TypedResize(const int64_t new_nb_elements, bool shrink_to_fit = true) { + return Resize(sizeof(T) * new_nb_elements, shrink_to_fit); + } + + template + Status TypedReserve(const int64_t new_nb_elements) { + return Reserve(sizeof(T) * new_nb_elements); + } + + protected: + ResizableBuffer(uint8_t* data, int64_t size) : MutableBuffer(data, size) {} + ResizableBuffer(uint8_t* data, int64_t size, std::shared_ptr mm) + : MutableBuffer(data, size, std::move(mm)) {} +}; + +/// \defgroup buffer-allocation-functions Functions for allocating buffers +/// +/// @{ + +/// \brief Allocate a fixed size mutable buffer from a memory pool, zero its padding. +/// +/// \param[in] size size of buffer to allocate +/// \param[in] pool a memory pool +ARROW_EXPORT +Result> AllocateBuffer(const int64_t size, + MemoryPool* pool = NULLPTR); +ARROW_EXPORT +Result> AllocateBuffer(const int64_t size, int64_t alignment, + MemoryPool* pool = NULLPTR); + +/// \brief Allocate a resizeable buffer from a memory pool, zero its padding. +/// +/// \param[in] size size of buffer to allocate +/// \param[in] pool a memory pool +ARROW_EXPORT +Result> AllocateResizableBuffer( + const int64_t size, MemoryPool* pool = NULLPTR); +ARROW_EXPORT +Result> AllocateResizableBuffer( + const int64_t size, const int64_t alignment, MemoryPool* pool = NULLPTR); + +/// \brief Allocate a bitmap buffer from a memory pool +/// no guarantee on values is provided. +/// +/// \param[in] length size in bits of bitmap to allocate +/// \param[in] pool memory pool to allocate memory from +ARROW_EXPORT +Result> AllocateBitmap(int64_t length, + MemoryPool* pool = NULLPTR); + +/// \brief Allocate a zero-initialized bitmap buffer from a memory pool +/// +/// \param[in] length size in bits of bitmap to allocate +/// \param[in] pool memory pool to allocate memory from +ARROW_EXPORT +Result> AllocateEmptyBitmap(int64_t length, + MemoryPool* pool = NULLPTR); + +ARROW_EXPORT +Result> AllocateEmptyBitmap(int64_t length, int64_t alignment, + MemoryPool* pool = NULLPTR); + +/// \brief Concatenate multiple buffers into a single buffer +/// +/// \param[in] buffers to be concatenated +/// \param[in] pool memory pool to allocate the new buffer from +ARROW_EXPORT +Result> ConcatenateBuffers(const BufferVector& buffers, + MemoryPool* pool = NULLPTR); + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h new file mode 100644 index 0000000000000000000000000000000000000000..818070ffe350afaa0810585b5716f04577f0d039 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +struct ChunkLocation { + int64_t chunk_index, index_in_chunk; +}; + +// An object that resolves an array chunk depending on a logical index +struct ARROW_EXPORT ChunkResolver { + explicit ChunkResolver(const ArrayVector& chunks); + + explicit ChunkResolver(const std::vector& chunks); + + explicit ChunkResolver(const RecordBatchVector& batches); + + ChunkResolver(ChunkResolver&& other) noexcept + : offsets_(std::move(other.offsets_)), cached_chunk_(other.cached_chunk_.load()) {} + + ChunkResolver& operator=(ChunkResolver&& other) { + offsets_ = std::move(other.offsets_); + cached_chunk_.store(other.cached_chunk_.load()); + return *this; + } + + /// \brief Return a ChunkLocation containing the chunk index and in-chunk value index of + /// the chunked array at logical index + inline ChunkLocation Resolve(const int64_t index) const { + // It is common for the algorithms below to make consecutive accesses at + // a relatively small distance from each other, hence often falling in + // the same chunk. + // This is trivial when merging (assuming each side of the merge uses + // its own resolver), but also in the inner recursive invocations of + // partitioning. + if (offsets_.size() <= 1) { + return {0, index}; + } + const auto cached_chunk = cached_chunk_.load(); + const bool cache_hit = + (index >= offsets_[cached_chunk] && index < offsets_[cached_chunk + 1]); + if (ARROW_PREDICT_TRUE(cache_hit)) { + return {cached_chunk, index - offsets_[cached_chunk]}; + } + auto chunk_index = Bisect(index); + cached_chunk_.store(chunk_index); + return {chunk_index, index - offsets_[chunk_index]}; + } + + protected: + // Find the chunk index corresponding to a value index using binary search + inline int64_t Bisect(const int64_t index) const { + // Like std::upper_bound(), but hand-written as it can help the compiler. + // Search [lo, lo + n) + int64_t lo = 0; + auto n = static_cast(offsets_.size()); + while (n > 1) { + const int64_t m = n >> 1; + const int64_t mid = lo + m; + if (static_cast(index) >= offsets_[mid]) { + lo = mid; + n -= m; + } else { + n = m; + } + } + return lo; + } + + private: + // Collection of starting offsets used for binary search + std::vector offsets_; + + // Tracks the most recently used chunk index to allow fast + // access for consecutive indices corresponding to the same chunk + mutable std::atomic cached_chunk_; +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h new file mode 100644 index 0000000000000000000000000000000000000000..5d300861d85c2c5d66d6689bfd010b275d50c745 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h @@ -0,0 +1,275 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/chunk_resolver.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; +class MemoryPool; +namespace stl { +template +class ChunkedArrayIterator; +} // namespace stl + +/// \class ChunkedArray +/// \brief A data structure managing a list of primitive Arrow arrays logically +/// as one large array +/// +/// Data chunking is treated throughout this project largely as an +/// implementation detail for performance and memory use optimization. +/// ChunkedArray allows Array objects to be collected and interpreted +/// as a single logical array without requiring an expensive concatenation +/// step. +/// +/// In some cases, data produced by a function may exceed the capacity of an +/// Array (like BinaryArray or StringArray) and so returning multiple Arrays is +/// the only possibility. In these cases, we recommend returning a ChunkedArray +/// instead of vector of Arrays or some alternative. +/// +/// When data is processed in parallel, it may not be practical or possible to +/// create large contiguous memory allocations and write output into them. With +/// some data types, like binary and string types, it is not possible at all to +/// produce non-chunked array outputs without requiring a concatenation step at +/// the end of processing. +/// +/// Application developers may tune chunk sizes based on analysis of +/// performance profiles but many developer-users will not need to be +/// especially concerned with the chunking details. +/// +/// Preserving the chunk layout/sizes in processing steps is generally not +/// considered to be a contract in APIs. A function may decide to alter the +/// chunking of its result. Similarly, APIs accepting multiple ChunkedArray +/// inputs should not expect the chunk layout to be the same in each input. +class ARROW_EXPORT ChunkedArray { + public: + ChunkedArray(ChunkedArray&&) = default; + ChunkedArray& operator=(ChunkedArray&&) = default; + + /// \brief Construct a chunked array from a single Array + explicit ChunkedArray(std::shared_ptr chunk) + : ChunkedArray(ArrayVector{std::move(chunk)}) {} + + /// \brief Construct a chunked array from a vector of arrays and an optional data type + /// + /// The vector elements must have the same data type. + /// If the data type is passed explicitly, the vector may be empty. + /// If the data type is omitted, the vector must be non-empty. + explicit ChunkedArray(ArrayVector chunks, std::shared_ptr type = NULLPTR); + + // \brief Constructor with basic input validation. + static Result> Make( + ArrayVector chunks, std::shared_ptr type = NULLPTR); + + /// \brief Create an empty ChunkedArray of a given type + /// + /// The output ChunkedArray will have one chunk with an empty + /// array of the given type. + /// + /// \param[in] type the data type of the empty ChunkedArray + /// \param[in] pool the memory pool to allocate memory from + /// \return the resulting ChunkedArray + static Result> MakeEmpty( + std::shared_ptr type, MemoryPool* pool = default_memory_pool()); + + /// \return the total length of the chunked array; computed on construction + int64_t length() const { return length_; } + + /// \return the total number of nulls among all chunks + int64_t null_count() const { return null_count_; } + + /// \return the total number of chunks in the chunked array + int num_chunks() const { return static_cast(chunks_.size()); } + + /// \return chunk a particular chunk from the chunked array + const std::shared_ptr& chunk(int i) const { return chunks_[i]; } + + /// \return an ArrayVector of chunks + const ArrayVector& chunks() const { return chunks_; } + + /// \brief Construct a zero-copy slice of the chunked array with the + /// indicated offset and length + /// + /// \param[in] offset the position of the first element in the constructed + /// slice + /// \param[in] length the length of the slice. If there are not enough + /// elements in the chunked array, the length will be adjusted accordingly + /// + /// \return a new object wrapped in std::shared_ptr + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// \brief Slice from offset until end of the chunked array + std::shared_ptr Slice(int64_t offset) const; + + /// \brief Flatten this chunked array as a vector of chunked arrays, one + /// for each struct field + /// + /// \param[in] pool The pool for buffer allocations, if any + Result>> Flatten( + MemoryPool* pool = default_memory_pool()) const; + + /// Construct a zero-copy view of this chunked array with the given + /// type. Calls Array::View on each constituent chunk. Always succeeds if + /// there are zero chunks + Result> View(const std::shared_ptr& type) const; + + /// \brief Return the type of the chunked array + const std::shared_ptr& type() const { return type_; } + + /// \brief Return a Scalar containing the value of this array at index + Result> GetScalar(int64_t index) const; + + /// \brief Determine if two chunked arrays are equal. + /// + /// Two chunked arrays can be equal only if they have equal datatypes. + /// However, they may be equal even if they have different chunkings. + bool Equals(const ChunkedArray& other, + const EqualOptions& opts = EqualOptions::Defaults()) const; + /// \brief Determine if two chunked arrays are equal. + bool Equals(const std::shared_ptr& other, + const EqualOptions& opts = EqualOptions::Defaults()) const; + /// \brief Determine if two chunked arrays approximately equal + bool ApproxEquals(const ChunkedArray& other, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \return PrettyPrint representation suitable for debugging + std::string ToString() const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the chunk array's internal data. + /// + /// This is O(k*m) where k is the number of array descendents, + /// and m is the number of chunks. + /// + /// \return Status + Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the chunk array's internal data. + /// + /// This is O(k*n) where k is the number of array descendents, + /// and n is the length in elements. + /// + /// \return Status + Status ValidateFull() const; + + protected: + ArrayVector chunks_; + std::shared_ptr type_; + int64_t length_; + int64_t null_count_; + + private: + template + friend class ::arrow::stl::ChunkedArrayIterator; + internal::ChunkResolver chunk_resolver_; + ARROW_DISALLOW_COPY_AND_ASSIGN(ChunkedArray); +}; + +namespace internal { + +/// \brief EXPERIMENTAL: Utility for incremental iteration over contiguous +/// pieces of potentially differently-chunked ChunkedArray objects +class ARROW_EXPORT MultipleChunkIterator { + public: + MultipleChunkIterator(const ChunkedArray& left, const ChunkedArray& right) + : left_(left), + right_(right), + pos_(0), + length_(left.length()), + chunk_idx_left_(0), + chunk_idx_right_(0), + chunk_pos_left_(0), + chunk_pos_right_(0) {} + + bool Next(std::shared_ptr* next_left, std::shared_ptr* next_right); + + int64_t position() const { return pos_; } + + private: + const ChunkedArray& left_; + const ChunkedArray& right_; + + // The amount of the entire ChunkedArray consumed + int64_t pos_; + + // Length of the chunked array(s) + int64_t length_; + + // Current left chunk + int chunk_idx_left_; + + // Current right chunk + int chunk_idx_right_; + + // Offset into the current left chunk + int64_t chunk_pos_left_; + + // Offset into the current right chunk + int64_t chunk_pos_right_; +}; + +/// \brief Evaluate binary function on two ChunkedArray objects having possibly +/// different chunk layouts. The passed binary function / functor should have +/// the following signature. +/// +/// Status(const Array&, const Array&, int64_t) +/// +/// The third argument is the absolute position relative to the start of each +/// ChunkedArray. The function is executed against each contiguous pair of +/// array segments, slicing if necessary. +/// +/// For example, if two arrays have chunk sizes +/// +/// left: [10, 10, 20] +/// right: [15, 10, 15] +/// +/// Then the following invocations take place (pseudocode) +/// +/// func(left.chunk[0][0:10], right.chunk[0][0:10], 0) +/// func(left.chunk[1][0:5], right.chunk[0][10:15], 10) +/// func(left.chunk[1][5:10], right.chunk[1][0:5], 15) +/// func(left.chunk[2][0:5], right.chunk[1][5:10], 20) +/// func(left.chunk[2][5:20], right.chunk[2][:], 25) +template +Status ApplyBinaryChunked(const ChunkedArray& left, const ChunkedArray& right, + Action&& action) { + MultipleChunkIterator iterator(left, right); + std::shared_ptr left_piece, right_piece; + while (iterator.Next(&left_piece, &right_piece)) { + ARROW_RETURN_NOT_OK(action(*left_piece, *right_piece, iterator.position())); + } + return Status::OK(); +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h new file mode 100644 index 0000000000000000000000000000000000000000..6dbacfa86af592c1e2aecf22aea2322ce5bc5090 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for comparing Arrow data structures + +#pragma once + +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; +class Tensor; +class SparseTensor; +struct Scalar; + +static constexpr double kDefaultAbsoluteTolerance = 1E-5; + +/// A container of options for equality comparisons +class EqualOptions { + public: + /// Whether or not NaNs are considered equal. + bool nans_equal() const { return nans_equal_; } + + /// Return a new EqualOptions object with the "nans_equal" property changed. + EqualOptions nans_equal(bool v) const { + auto res = EqualOptions(*this); + res.nans_equal_ = v; + return res; + } + + /// Whether or not zeros with differing signs are considered equal. + bool signed_zeros_equal() const { return signed_zeros_equal_; } + + /// Return a new EqualOptions object with the "signed_zeros_equal" property changed. + EqualOptions signed_zeros_equal(bool v) const { + auto res = EqualOptions(*this); + res.signed_zeros_equal_ = v; + return res; + } + + /// The absolute tolerance for approximate comparisons of floating-point values. + double atol() const { return atol_; } + + /// Return a new EqualOptions object with the "atol" property changed. + EqualOptions atol(double v) const { + auto res = EqualOptions(*this); + res.atol_ = v; + return res; + } + + /// The ostream to which a diff will be formatted if arrays disagree. + /// If this is null (the default) no diff will be formatted. + std::ostream* diff_sink() const { return diff_sink_; } + + /// Return a new EqualOptions object with the "diff_sink" property changed. + /// This option will be ignored if diff formatting of the types of compared arrays is + /// not supported. + EqualOptions diff_sink(std::ostream* diff_sink) const { + auto res = EqualOptions(*this); + res.diff_sink_ = diff_sink; + return res; + } + + static EqualOptions Defaults() { return {}; } + + protected: + double atol_ = kDefaultAbsoluteTolerance; + bool nans_equal_ = false; + bool signed_zeros_equal_ = true; + + std::ostream* diff_sink_ = NULLPTR; +}; + +/// Returns true if the arrays are exactly equal +ARROW_EXPORT bool ArrayEquals(const Array& left, const Array& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if the arrays are approximately equal. For non-floating point +/// types, this is equivalent to ArrayEquals(left, right) +ARROW_EXPORT bool ArrayApproxEquals(const Array& left, const Array& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if indicated equal-length segment of arrays are exactly equal +ARROW_EXPORT bool ArrayRangeEquals(const Array& left, const Array& right, + int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if indicated equal-length segment of arrays are approximately equal +ARROW_EXPORT bool ArrayRangeApproxEquals(const Array& left, const Array& right, + int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()); + +ARROW_EXPORT bool TensorEquals(const Tensor& left, const Tensor& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// EXPERIMENTAL: Returns true if the given sparse tensors are exactly equal +ARROW_EXPORT bool SparseTensorEquals(const SparseTensor& left, const SparseTensor& right, + const EqualOptions& = EqualOptions::Defaults()); + +/// Returns true if the type metadata are exactly equal +/// \param[in] left a DataType +/// \param[in] right a DataType +/// \param[in] check_metadata whether to compare KeyValueMetadata for child +/// fields +ARROW_EXPORT bool TypeEquals(const DataType& left, const DataType& right, + bool check_metadata = true); + +/// Returns true if scalars are equal +/// \param[in] left a Scalar +/// \param[in] right a Scalar +/// \param[in] options comparison options +ARROW_EXPORT bool ScalarEquals(const Scalar& left, const Scalar& right, + const EqualOptions& options = EqualOptions::Defaults()); + +/// Returns true if scalars are approximately equal +/// \param[in] left a Scalar +/// \param[in] right a Scalar +/// \param[in] options comparison options +ARROW_EXPORT bool ScalarApproxEquals( + const Scalar& left, const Scalar& right, + const EqualOptions& options = EqualOptions::Defaults()); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/config.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/config.h new file mode 100644 index 0000000000000000000000000000000000000000..617d6c268b55ea344a3fe7f96141ff0f7e4d3f88 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/config.h @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/config.h" // IWYU pragma: export +#include "arrow/util/visibility.h" + +namespace arrow { + +struct BuildInfo { + /// The packed version number, e.g. 1002003 (decimal) for Arrow 1.2.3 + int version; + /// The "major" version number, e.g. 1 for Arrow 1.2.3 + int version_major; + /// The "minor" version number, e.g. 2 for Arrow 1.2.3 + int version_minor; + /// The "patch" version number, e.g. 3 for Arrow 1.2.3 + int version_patch; + /// The version string, e.g. "1.2.3" + std::string version_string; + std::string so_version; + std::string full_so_version; + + /// The CMake compiler identifier, e.g. "GNU" + std::string compiler_id; + std::string compiler_version; + std::string compiler_flags; + + /// The git changeset id, if available + std::string git_id; + /// The git changeset description, if available + std::string git_description; + std::string package_kind; + + /// The uppercase build type, e.g. "DEBUG" or "RELEASE" + std::string build_type; +}; + +struct RuntimeInfo { + /// The enabled SIMD level + /// + /// This can be less than `detected_simd_level` if the ARROW_USER_SIMD_LEVEL + /// environment variable is set to another value. + std::string simd_level; + + /// The SIMD level available on the OS and CPU + std::string detected_simd_level; + + /// Whether using the OS-based timezone database + /// This is set at compile-time. + bool using_os_timezone_db; + + /// The path to the timezone database; by default None. + std::optional timezone_db_path; +}; + +/// \brief Get runtime build info. +/// +/// The returned values correspond to exact loaded version of the Arrow library, +/// rather than the values frozen at application compile-time through the `ARROW_*` +/// preprocessor definitions. +ARROW_EXPORT +const BuildInfo& GetBuildInfo(); + +/// \brief Get runtime info. +/// +ARROW_EXPORT +RuntimeInfo GetRuntimeInfo(); + +struct GlobalOptions { + /// Path to text timezone database. This is only configurable on Windows, + /// which does not have a compatible OS timezone database. + std::optional timezone_db_path; +}; + +ARROW_EXPORT +Status Initialize(const GlobalOptions& options) noexcept; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h new file mode 100644 index 0000000000000000000000000000000000000000..4af1835cd709d43e0abe3b39b46531cae9a047fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/csv/options.h" +#include "arrow/csv/reader.h" +#include "arrow/csv/writer.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h new file mode 100644 index 0000000000000000000000000000000000000000..662b16ec40a9485547ce01b32ea0325a23122711 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/csv/options.h" +#include "arrow/status.h" +#include "arrow/util/delimiting.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +ARROW_EXPORT +std::unique_ptr MakeChunker(const ParseOptions& options); + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..07279db313e92d2daeb93be12d0ab307d0c25201 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; +struct ConvertOptions; + +class ARROW_EXPORT ColumnBuilder { + public: + virtual ~ColumnBuilder() = default; + + /// Spawn a task that will try to convert and append the given CSV block. + /// All calls to Append() should happen on the same thread, otherwise + /// call Insert() instead. + virtual void Append(const std::shared_ptr& parser) = 0; + + /// Spawn a task that will try to convert and insert the given CSV block + virtual void Insert(int64_t block_index, + const std::shared_ptr& parser) = 0; + + /// Return the final chunked array. The TaskGroup _must_ have finished! + virtual Result> Finish() = 0; + + std::shared_ptr task_group() { return task_group_; } + + /// Construct a strictly-typed ColumnBuilder. + static Result> Make( + MemoryPool* pool, const std::shared_ptr& type, int32_t col_index, + const ConvertOptions& options, + const std::shared_ptr& task_group); + + /// Construct a type-inferring ColumnBuilder. + static Result> Make( + MemoryPool* pool, int32_t col_index, const ConvertOptions& options, + const std::shared_ptr& task_group); + + /// Construct a ColumnBuilder for a column of nulls + /// (i.e. not present in the CSV file). + static Result> MakeNull( + MemoryPool* pool, const std::shared_ptr& type, + const std::shared_ptr& task_group); + + protected: + explicit ColumnBuilder(std::shared_ptr task_group) + : task_group_(std::move(task_group)) {} + + std::shared_ptr task_group_; +}; + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h new file mode 100644 index 0000000000000000000000000000000000000000..5fbbd5df58b1c588b88e16b68da50b9399211abc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; +struct ConvertOptions; + +class ARROW_EXPORT ColumnDecoder { + public: + virtual ~ColumnDecoder() = default; + + /// Spawn a task that will try to convert and insert the given CSV block + virtual Future> Decode( + const std::shared_ptr& parser) = 0; + + /// Construct a strictly-typed ColumnDecoder. + static Result> Make(MemoryPool* pool, + std::shared_ptr type, + int32_t col_index, + const ConvertOptions& options); + + /// Construct a type-inferring ColumnDecoder. + /// Inference will run only on the first block, the type will be frozen afterwards. + static Result> Make(MemoryPool* pool, int32_t col_index, + const ConvertOptions& options); + + /// Construct a ColumnDecoder for a column of nulls + /// (i.e. not present in the CSV file). + static Result> MakeNull(MemoryPool* pool, + std::shared_ptr type); + + protected: + ColumnDecoder() = default; +}; + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..639f692f26a1ba3a134caac68a432ac22f068917 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/csv/options.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; + +class ARROW_EXPORT Converter { + public: + Converter(const std::shared_ptr& type, const ConvertOptions& options, + MemoryPool* pool); + virtual ~Converter() = default; + + virtual Result> Convert(const BlockParser& parser, + int32_t col_index) = 0; + + std::shared_ptr type() const { return type_; } + + // Create a Converter for the given data type + static Result> Make( + const std::shared_ptr& type, const ConvertOptions& options, + MemoryPool* pool = default_memory_pool()); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Converter); + + virtual Status Initialize() = 0; + + // CAUTION: ConvertOptions can grow large (if it customizes hundreds or + // thousands of columns), so avoid copying it in each Converter. + const ConvertOptions& options_; + MemoryPool* pool_; + std::shared_ptr type_; +}; + +class ARROW_EXPORT DictionaryConverter : public Converter { + public: + DictionaryConverter(const std::shared_ptr& value_type, + const ConvertOptions& options, MemoryPool* pool); + + // If the dictionary length goes above this value, conversion will fail + // with Status::IndexError. + virtual void SetMaxCardinality(int32_t max_length) = 0; + + // Create a Converter for the given dictionary value type. + // The dictionary index type will always be Int32. + static Result> Make( + const std::shared_ptr& value_type, const ConvertOptions& options, + MemoryPool* pool = default_memory_pool()); + + protected: + std::shared_ptr value_type_; +}; + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h new file mode 100644 index 0000000000000000000000000000000000000000..4360ceaaea6ac07dd218c93ce13c3ab14c16fc63 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace csv { + +/// \brief Description of an invalid row +struct InvalidRow { + /// \brief Number of columns expected in the row + int32_t expected_columns; + /// \brief Actual number of columns found in the row + int32_t actual_columns; + /// \brief The physical row number if known or -1 + /// + /// This number is one-based and also accounts for non-data rows (such as + /// CSV header rows). + int64_t number; + /// \brief View of the entire row. Memory will be freed after callback returns + const std::string_view text; +}; + +/// \brief Result returned by an InvalidRowHandler +enum class InvalidRowResult { + // Generate an error describing this row + Error, + // Skip over this row + Skip +}; + +/// \brief callback for handling a row with an invalid number of columns while parsing +/// \return result indicating if an error should be returned from the parser or the row is +/// skipped +using InvalidRowHandler = std::function; + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h new file mode 100644 index 0000000000000000000000000000000000000000..7723dcedc611e922c932d5f9e09e984044ab3c21 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h @@ -0,0 +1,220 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/csv/invalid_row.h" +#include "arrow/csv/type_fwd.h" +#include "arrow/io/interfaces.h" +#include "arrow/status.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class DataType; +class TimestampParser; + +namespace csv { + +// Silly workaround for https://github.com/michaeljones/breathe/issues/453 +constexpr char kDefaultEscapeChar = '\\'; + +struct ARROW_EXPORT ParseOptions { + // Parsing options + + /// Field delimiter + char delimiter = ','; + /// Whether quoting is used + bool quoting = true; + /// Quoting character (if `quoting` is true) + char quote_char = '"'; + /// Whether a quote inside a value is double-quoted + bool double_quote = true; + /// Whether escaping is used + bool escaping = false; + /// Escaping character (if `escaping` is true) + char escape_char = kDefaultEscapeChar; + /// Whether values are allowed to contain CR (0x0d) and LF (0x0a) characters + bool newlines_in_values = false; + /// Whether empty lines are ignored. If false, an empty line represents + /// a single empty value (assuming a one-column CSV file). + bool ignore_empty_lines = true; + /// A handler function for rows which do not have the correct number of columns + InvalidRowHandler invalid_row_handler; + + /// Create parsing options with default values + static ParseOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +struct ARROW_EXPORT ConvertOptions { + // Conversion options + + /// Whether to check UTF8 validity of string columns + bool check_utf8 = true; + /// Optional per-column types (disabling type inference on those columns) + std::unordered_map> column_types; + /// Recognized spellings for null values + std::vector null_values; + /// Recognized spellings for boolean true values + std::vector true_values; + /// Recognized spellings for boolean false values + std::vector false_values; + + /// Whether string / binary columns can have null values. + /// + /// If true, then strings in "null_values" are considered null for string columns. + /// If false, then all strings are valid string values. + bool strings_can_be_null = false; + + /// Whether quoted values can be null. + /// + /// If true, then strings in "null_values" are also considered null when they + /// appear quoted in the CSV file. Otherwise, quoted values are never considered null. + bool quoted_strings_can_be_null = true; + + /// Whether to try to automatically dict-encode string / binary data. + /// If true, then when type inference detects a string or binary column, + /// it is dict-encoded up to `auto_dict_max_cardinality` distinct values + /// (per chunk), after which it switches to regular encoding. + /// + /// This setting is ignored for non-inferred columns (those in `column_types`). + bool auto_dict_encode = false; + int32_t auto_dict_max_cardinality = 50; + + /// Decimal point character for floating-point and decimal data + char decimal_point = '.'; + + // XXX Should we have a separate FilterOptions? + + /// If non-empty, indicates the names of columns from the CSV file that should + /// be actually read and converted (in the vector's order). + /// Columns not in this vector will be ignored. + std::vector include_columns; + /// If false, columns in `include_columns` but not in the CSV file will error out. + /// If true, columns in `include_columns` but not in the CSV file will produce + /// a column of nulls (whose type is selected using `column_types`, + /// or null by default) + /// This option is ignored if `include_columns` is empty. + bool include_missing_columns = false; + + /// User-defined timestamp parsers, using the virtual parser interface in + /// arrow/util/value_parsing.h. More than one parser can be specified, and + /// the CSV conversion logic will try parsing values starting from the + /// beginning of this vector. If no parsers are specified, we use the default + /// built-in ISO-8601 parser. + std::vector> timestamp_parsers; + + /// Create conversion options with default values, including conventional + /// values for `null_values`, `true_values` and `false_values` + static ConvertOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +struct ARROW_EXPORT ReadOptions { + // Reader options + + /// Whether to use the global CPU thread pool + bool use_threads = true; + + /// \brief Block size we request from the IO layer. + /// + /// This will determine multi-threading granularity as well as + /// the size of individual record batches. + /// Minimum valid value for block size is 1 + int32_t block_size = 1 << 20; // 1 MB + + /// Number of header rows to skip (not including the row of column names, if any) + int32_t skip_rows = 0; + + /// Number of rows to skip after the column names are read, if any + int32_t skip_rows_after_names = 0; + + /// Column names for the target table. + /// If empty, fall back on autogenerate_column_names. + std::vector column_names; + + /// Whether to autogenerate column names if `column_names` is empty. + /// If true, column names will be of the form "f0", "f1"... + /// If false, column names will be read from the first CSV row after `skip_rows`. + bool autogenerate_column_names = false; + + /// Create read options with default values + static ReadOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +/// \brief Quoting style for CSV writing +enum class ARROW_EXPORT QuotingStyle { + /// Only enclose values in quotes which need them, because their CSV rendering can + /// contain quotes itself (e.g. strings or binary values) + Needed, + /// Enclose all valid values in quotes. Nulls are not quoted. May cause readers to + /// interpret all values as strings if schema is inferred. + AllValid, + /// Do not enclose any values in quotes. Prevents values from containing quotes ("), + /// cell delimiters (,) or line endings (\\r, \\n), (following RFC4180). If values + /// contain these characters, an error is caused when attempting to write. + None +}; + +struct ARROW_EXPORT WriteOptions { + /// Whether to write an initial header line with column names + bool include_header = true; + + /// \brief Maximum number of rows processed at a time + /// + /// The CSV writer converts and writes data in batches of N rows. + /// This number can impact performance. + int32_t batch_size = 1024; + + /// Field delimiter + char delimiter = ','; + + /// \brief The string to write for null values. Quotes are not allowed in this string. + std::string null_string; + + /// \brief IO context for writing. + io::IOContext io_context; + + /// \brief The end of line character to use for ending rows + std::string eol = "\n"; + + /// \brief Quoting style + QuotingStyle quoting_style = QuotingStyle::Needed; + + /// Create write options with default values + static WriteOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h new file mode 100644 index 0000000000000000000000000000000000000000..c73e52ce831ed95b4abe83084b483c15660bae7e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h @@ -0,0 +1,228 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/csv/options.h" +#include "arrow/csv/type_fwd.h" +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; + +namespace csv { + +/// Skip at most num_rows from the given input. The input pointer is updated +/// and the number of actually skipped rows is returns (may be less than +/// requested if the input is too short). +ARROW_EXPORT +int32_t SkipRows(const uint8_t* data, uint32_t size, int32_t num_rows, + const uint8_t** out_data); + +class BlockParserImpl; + +namespace detail { + +struct ParsedValueDesc { + uint32_t offset : 31; + bool quoted : 1; +}; + +class ARROW_EXPORT DataBatch { + public: + explicit DataBatch(int32_t num_cols) : num_cols_(num_cols) {} + + /// \brief Return the number of parsed rows (not skipped) + int32_t num_rows() const { return num_rows_; } + /// \brief Return the number of parsed columns + int32_t num_cols() const { return num_cols_; } + /// \brief Return the total size in bytes of parsed data + uint32_t num_bytes() const { return parsed_size_; } + /// \brief Return the number of skipped rows + int32_t num_skipped_rows() const { return static_cast(skipped_rows_.size()); } + + template + Status VisitColumn(int32_t col_index, int64_t first_row, Visitor&& visit) const { + using detail::ParsedValueDesc; + + int32_t batch_row = 0; + for (size_t buf_index = 0; buf_index < values_buffers_.size(); ++buf_index) { + const auto& values_buffer = values_buffers_[buf_index]; + const auto values = reinterpret_cast(values_buffer->data()); + const auto max_pos = + static_cast(values_buffer->size() / sizeof(ParsedValueDesc)) - 1; + for (int32_t pos = col_index; pos < max_pos; pos += num_cols_, ++batch_row) { + auto start = values[pos].offset; + auto stop = values[pos + 1].offset; + auto quoted = values[pos + 1].quoted; + Status status = visit(parsed_ + start, stop - start, quoted); + if (ARROW_PREDICT_FALSE(!status.ok())) { + return DecorateWithRowNumber(std::move(status), first_row, batch_row); + } + } + } + return Status::OK(); + } + + template + Status VisitLastRow(Visitor&& visit) const { + using detail::ParsedValueDesc; + + const auto& values_buffer = values_buffers_.back(); + const auto values = reinterpret_cast(values_buffer->data()); + const auto start_pos = + static_cast(values_buffer->size() / sizeof(ParsedValueDesc)) - + num_cols_ - 1; + for (int32_t col_index = 0; col_index < num_cols_; ++col_index) { + auto start = values[start_pos + col_index].offset; + auto stop = values[start_pos + col_index + 1].offset; + auto quoted = values[start_pos + col_index + 1].quoted; + ARROW_RETURN_NOT_OK(visit(parsed_ + start, stop - start, quoted)); + } + return Status::OK(); + } + + protected: + Status DecorateWithRowNumber(Status&& status, int64_t first_row, + int32_t batch_row) const { + if (first_row >= 0) { + // `skipped_rows_` is in ascending order by construction, so use bisection + // to find out how many rows were skipped before `batch_row`. + const auto skips_before = + std::upper_bound(skipped_rows_.begin(), skipped_rows_.end(), batch_row) - + skipped_rows_.begin(); + status = status.WithMessage("Row #", batch_row + skips_before + first_row, ": ", + status.message()); + } + // Use return_if so that when extra context is enabled it will be added + ARROW_RETURN_IF_(true, std::move(status), ARROW_STRINGIFY(status)); + return std::move(status); + } + + // The number of rows in this batch (not including any skipped ones) + int32_t num_rows_ = 0; + // The number of columns + int32_t num_cols_ = 0; + + // XXX should we ensure the parsed buffer is padded with 8 or 16 excess zero bytes? + // It may help with null parsing... + std::vector> values_buffers_; + std::shared_ptr parsed_buffer_; + const uint8_t* parsed_ = NULLPTR; + int32_t parsed_size_ = 0; + + // Record the current num_rows_ each time a row is skipped + std::vector skipped_rows_; + + friend class ::arrow::csv::BlockParserImpl; +}; + +} // namespace detail + +constexpr int32_t kMaxParserNumRows = 100000; + +/// \class BlockParser +/// \brief A reusable block-based parser for CSV data +/// +/// The parser takes a block of CSV data and delimits rows and fields, +/// unquoting and unescaping them on the fly. Parsed data is own by the +/// parser, so the original buffer can be discarded after Parse() returns. +/// +/// If the block is truncated (i.e. not all data can be parsed), it is up +/// to the caller to arrange the next block to start with the trailing data. +/// Also, if the previous block ends with CR (0x0d) and a new block starts +/// with LF (0x0a), the parser will consider the leading newline as an empty +/// line; the caller should therefore strip it. +class ARROW_EXPORT BlockParser { + public: + explicit BlockParser(ParseOptions options, int32_t num_cols = -1, + int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows); + explicit BlockParser(MemoryPool* pool, ParseOptions options, int32_t num_cols = -1, + int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows); + ~BlockParser(); + + /// \brief Parse a block of data + /// + /// Parse a block of CSV data, ingesting up to max_num_rows rows. + /// The number of bytes actually parsed is returned in out_size. + Status Parse(std::string_view data, uint32_t* out_size); + + /// \brief Parse sequential blocks of data + /// + /// Only the last block is allowed to be truncated. + Status Parse(const std::vector& data, uint32_t* out_size); + + /// \brief Parse the final block of data + /// + /// Like Parse(), but called with the final block in a file. + /// The last row may lack a trailing line separator. + Status ParseFinal(std::string_view data, uint32_t* out_size); + + /// \brief Parse the final sequential blocks of data + /// + /// Only the last block is allowed to be truncated. + Status ParseFinal(const std::vector& data, uint32_t* out_size); + + /// \brief Return the number of parsed rows + int32_t num_rows() const { return parsed_batch().num_rows(); } + /// \brief Return the number of parsed columns + int32_t num_cols() const { return parsed_batch().num_cols(); } + /// \brief Return the total size in bytes of parsed data + uint32_t num_bytes() const { return parsed_batch().num_bytes(); } + + /// \brief Return the total number of rows including rows which were skipped + int32_t total_num_rows() const { + return parsed_batch().num_rows() + parsed_batch().num_skipped_rows(); + } + + /// \brief Return the row number of the first row in the block or -1 if unsupported + int64_t first_row_num() const; + + /// \brief Visit parsed values in a column + /// + /// The signature of the visitor is + /// Status(const uint8_t* data, uint32_t size, bool quoted) + template + Status VisitColumn(int32_t col_index, Visitor&& visit) const { + return parsed_batch().VisitColumn(col_index, first_row_num(), + std::forward(visit)); + } + + template + Status VisitLastRow(Visitor&& visit) const { + return parsed_batch().VisitLastRow(std::forward(visit)); + } + + protected: + std::unique_ptr impl_; + + const detail::DataBatch& parsed_batch() const; +}; + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..bae301dc14815a6fdf9388a08c4f9068155f20a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/csv/options.h" // IWYU pragma: keep +#include "arrow/io/interfaces.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/future.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { +class InputStream; +} // namespace io + +namespace csv { + +/// A class that reads an entire CSV file into a Arrow Table +class ARROW_EXPORT TableReader { + public: + virtual ~TableReader() = default; + + /// Read the entire CSV file and convert it to a Arrow Table + virtual Result> Read() = 0; + /// Read the entire CSV file and convert it to a Arrow Table + virtual Future> ReadAsync() = 0; + + /// Create a TableReader instance + static Result> Make(io::IOContext io_context, + std::shared_ptr input, + const ReadOptions&, + const ParseOptions&, + const ConvertOptions&); +}; + +/// \brief A class that reads a CSV file incrementally +/// +/// Caveats: +/// - For now, this is always single-threaded (regardless of `ReadOptions::use_threads`. +/// - Type inference is done on the first block and types are frozen afterwards; +/// to make sure the right data types are inferred, either set +/// `ReadOptions::block_size` to a large enough value, or use +/// `ConvertOptions::column_types` to set the desired data types explicitly. +class ARROW_EXPORT StreamingReader : public RecordBatchReader { + public: + virtual ~StreamingReader() = default; + + virtual Future> ReadNextAsync() = 0; + + /// \brief Return the number of bytes which have been read and processed + /// + /// The returned number includes CSV bytes which the StreamingReader has + /// finished processing, but not bytes for which some processing (e.g. + /// CSV parsing or conversion to Arrow layout) is still ongoing. + /// + /// Furthermore, the following rules apply: + /// - bytes skipped by `ReadOptions.skip_rows` are counted as being read before + /// any records are returned. + /// - bytes read while parsing the header are counted as being read before any + /// records are returned. + /// - bytes skipped by `ReadOptions.skip_rows_after_names` are counted after the + /// first batch is returned. + virtual int64_t bytes_read() const = 0; + + /// Create a StreamingReader instance + /// + /// This involves some I/O as the first batch must be loaded during the creation process + /// so it is returned as a future + /// + /// Currently, the StreamingReader is not async-reentrant and does not do any fan-out + /// parsing (see ARROW-11889) + static Future> MakeAsync( + io::IOContext io_context, std::shared_ptr input, + arrow::internal::Executor* cpu_executor, const ReadOptions&, const ParseOptions&, + const ConvertOptions&); + + static Result> Make( + io::IOContext io_context, std::shared_ptr input, + const ReadOptions&, const ParseOptions&, const ConvertOptions&); +}; + +/// \brief Count the logical rows of data in a CSV file (i.e. the +/// number of rows you would get if you read the file into a table). +ARROW_EXPORT +Future CountRowsAsync(io::IOContext io_context, + std::shared_ptr input, + arrow::internal::Executor* cpu_executor, + const ReadOptions&, const ParseOptions&); + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..07a41604478e81ac760e8d0b3501ef24996b0a4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/csv/parser.h" +#include "arrow/testing/visibility.h" + +namespace arrow { +namespace csv { + +ARROW_TESTING_EXPORT +std::string MakeCSVData(std::vector lines); + +// Make a BlockParser from a vector of lines representing a CSV file +ARROW_TESTING_EXPORT +void MakeCSVParser(std::vector lines, ParseOptions options, int32_t num_cols, + MemoryPool* pool, std::shared_ptr* out); + +ARROW_TESTING_EXPORT +void MakeCSVParser(std::vector lines, ParseOptions options, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +void MakeCSVParser(std::vector lines, std::shared_ptr* out); + +// Make a BlockParser from a vector of strings representing a single CSV column +ARROW_TESTING_EXPORT +void MakeColumnParser(std::vector items, std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Result> MakeSampleCsvBuffer( + size_t num_rows, std::function is_valid = {}); + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..c0a53847a90ddb82067e0c9ac955cf4222c61742 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +namespace arrow { +namespace csv { + +class TableReader; +struct ConvertOptions; +struct ReadOptions; +struct ParseOptions; +struct WriteOptions; + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..43233372124728ba41f0bead721dd3f8006f19b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/csv/options.h" +#include "arrow/io/interfaces.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/record_batch.h" +#include "arrow/table.h" + +namespace arrow { +namespace csv { + +// Functionality for converting Arrow data to Comma separated value text. +// This library supports all primitive types that can be cast to a StringArrays. +// It applies to following formatting rules: +// - For non-binary types no quotes surround values. Nulls are represented as the empty +// string. +// - For binary types all non-null data is quoted (and quotes within data are escaped +// with an additional quote). +// Null values are empty and unquoted. + +/// \defgroup csv-write-functions High-level functions for writing CSV files +/// @{ + +/// \brief Convert table to CSV and write the result to output. +/// Experimental +ARROW_EXPORT Status WriteCSV(const Table& table, const WriteOptions& options, + arrow::io::OutputStream* output); +/// \brief Convert batch to CSV and write the result to output. +/// Experimental +ARROW_EXPORT Status WriteCSV(const RecordBatch& batch, const WriteOptions& options, + arrow::io::OutputStream* output); +/// \brief Convert batches read through a RecordBatchReader +/// to CSV and write the results to output. +/// Experimental +ARROW_EXPORT Status WriteCSV(const std::shared_ptr& reader, + const WriteOptions& options, + arrow::io::OutputStream* output); + +/// @} + +/// \defgroup csv-writer-factories Functions for creating an incremental CSV writer +/// @{ + +/// \brief Create a new CSV writer. User is responsible for closing the +/// actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeCSVWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const WriteOptions& options = WriteOptions::Defaults()); + +/// \brief Create a new CSV writer. +/// +/// \param[in] sink output stream to write to (does not take ownership) +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeCSVWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const WriteOptions& options = WriteOptions::Defaults()); + +/// @} + +} // namespace csv +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h new file mode 100644 index 0000000000000000000000000000000000000000..31b2d2274c9008b32d63a4130b315a1ecba3e5a3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h @@ -0,0 +1,311 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/scalar.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class RecordBatch; +class Table; + +/// \class Datum +/// \brief Variant type for various Arrow C++ data structures +struct ARROW_EXPORT Datum { + /// \brief The kind of datum stored + enum Kind { NONE, SCALAR, ARRAY, CHUNKED_ARRAY, RECORD_BATCH, TABLE }; + + /// \brief A placeholder type to represent empty datum + struct Empty {}; + + /// \brief Datums variants may have a length. This special value indicate that the + /// current variant does not have a length. + static constexpr int64_t kUnknownLength = -1; + + /// \brief Storage of the actual datum. + /// + /// Note: For arrays, ArrayData is stored instead of Array for easier processing + std::variant, std::shared_ptr, + std::shared_ptr, std::shared_ptr, + std::shared_ptr> + value; + + /// \brief Empty datum, to be populated elsewhere + Datum() = default; + + Datum(const Datum& other) = default; + Datum& operator=(const Datum& other) = default; + Datum(Datum&& other) = default; + Datum& operator=(Datum&& other) = default; + + /// \brief Construct from a Scalar + Datum(std::shared_ptr value) // NOLINT implicit conversion + : value(std::move(value)) {} + + /// \brief Construct from an ArrayData + Datum(std::shared_ptr value) // NOLINT implicit conversion + : value(std::move(value)) {} + + /// \brief Construct from an ArrayData + Datum(ArrayData arg) // NOLINT implicit conversion + : value(std::make_shared(std::move(arg))) {} + + /// \brief Construct from an Array + Datum(const Array& value); // NOLINT implicit conversion + + /// \brief Construct from an Array + Datum(const std::shared_ptr& value); // NOLINT implicit conversion + + /// \brief Construct from a ChunkedArray + Datum(std::shared_ptr value); // NOLINT implicit conversion + + /// \brief Construct from a RecordBatch + Datum(std::shared_ptr value); // NOLINT implicit conversion + + /// \brief Construct from a Table + Datum(std::shared_ptr
value); // NOLINT implicit conversion + + /// \brief Construct from a ChunkedArray. + /// + /// This can be expensive, prefer the shared_ptr constructor + explicit Datum(const ChunkedArray& value); + + /// \brief Construct from a RecordBatch. + /// + /// This can be expensive, prefer the shared_ptr constructor + explicit Datum(const RecordBatch& value); + + /// \brief Construct from a Table. + /// + /// This can be expensive, prefer the shared_ptr
constructor + explicit Datum(const Table& value); + + /// \brief Cast from concrete subtypes of Array or Scalar to Datum + template , + bool IsScalar = std::is_base_of_v, + typename = enable_if_t> + Datum(std::shared_ptr value) // NOLINT implicit conversion + : Datum(std::shared_ptr::type>( + std::move(value))) {} + + /// \brief Cast from concrete subtypes of Array or Scalar to Datum + template , + bool IsArray = std::is_base_of_v, + bool IsScalar = std::is_base_of_v, + typename = enable_if_t> + Datum(T&& value) // NOLINT implicit conversion + : Datum(std::make_shared(std::forward(value))) {} + + /// \brief Copy from concrete subtypes of Scalar. + /// + /// The concrete scalar type must be copyable (not all of them are). + template >> + Datum(const T& value) // NOLINT implicit conversion + : Datum(std::make_shared(value)) {} + + // Convenience constructors + /// \brief Convenience constructor storing a bool scalar. + explicit Datum(bool value); + /// \brief Convenience constructor storing an int8 scalar. + explicit Datum(int8_t value); + /// \brief Convenience constructor storing a uint8 scalar. + explicit Datum(uint8_t value); + /// \brief Convenience constructor storing an int16 scalar. + explicit Datum(int16_t value); + /// \brief Convenience constructor storing a uint16 scalar. + explicit Datum(uint16_t value); + /// \brief Convenience constructor storing an int32 scalar. + explicit Datum(int32_t value); + /// \brief Convenience constructor storing a uint32 scalar. + explicit Datum(uint32_t value); + /// \brief Convenience constructor storing an int64 scalar. + explicit Datum(int64_t value); + /// \brief Convenience constructor storing a uint64 scalar. + explicit Datum(uint64_t value); + /// \brief Convenience constructor storing a float scalar. + explicit Datum(float value); + /// \brief Convenience constructor storing a double scalar. + explicit Datum(double value); + /// \brief Convenience constructor storing a string scalar. + explicit Datum(std::string value); + /// \brief Convenience constructor storing a string scalar. + explicit Datum(const char* value); + + /// \brief Convenience constructor for a DurationScalar from std::chrono::duration + template