diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..8cc54b4c6bfdaa0e347b3927d7932934916a1ade --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/__init__.pxd @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from libcpp.memory cimport shared_ptr +from pyarrow.includes.libarrow cimport (CArray, CBuffer, CDataType, + CField, CRecordBatch, CSchema, + CTable, CTensor, CSparseCOOTensor, + CSparseCSRMatrix, CSparseCSCMatrix, + CSparseCSFTensor) + +cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py": + cdef int import_pyarrow() except -1 + cdef object wrap_buffer(const shared_ptr[CBuffer]& buffer) + cdef object wrap_data_type(const shared_ptr[CDataType]& type) + cdef object wrap_field(const shared_ptr[CField]& field) + cdef object wrap_schema(const shared_ptr[CSchema]& schema) + cdef object wrap_array(const shared_ptr[CArray]& sp_array) + cdef object wrap_tensor(const shared_ptr[CTensor]& sp_tensor) + cdef object wrap_sparse_tensor_coo( + const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csr( + const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csc( + const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor) + cdef object wrap_sparse_tensor_csf( + const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor) + cdef object wrap_table(const shared_ptr[CTable]& ctable) + cdef object wrap_batch(const shared_ptr[CRecordBatch]& cbatch) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4553aee9d6f16c391340aa45489471bdcfe0cb76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_acero.pxd @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * + + +cdef class ExecNodeOptions(_Weakrefable): + cdef: + shared_ptr[CExecNodeOptions] wrapped + + cdef void init(self, const shared_ptr[CExecNodeOptions]& sp) + cdef inline shared_ptr[CExecNodeOptions] unwrap(self) nogil + + +cdef class Declaration(_Weakrefable): + + cdef: + CDeclaration decl + + cdef void init(self, const CDeclaration& c_decl) + + @staticmethod + cdef wrap(const CDeclaration& c_decl) + + cdef inline CDeclaration unwrap(self) nogil diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pyx new file mode 100644 index 0000000000000000000000000000000000000000..a267d53599436e4f7eecd2a8a28beb66d5ad502f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.pyx @@ -0,0 +1,3242 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +import sys + +from cpython.object cimport Py_LT, Py_EQ, Py_GT, Py_LE, Py_NE, Py_GE +from cython.operator cimport dereference as deref + +from collections import namedtuple + +from pyarrow.lib import frombytes, tobytes, ArrowInvalid +from pyarrow.lib cimport * +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +import pyarrow.lib as lib +from pyarrow.util import _DEPR_MSG +from libcpp cimport bool as c_bool + +import inspect +import numpy as np +import warnings + + +__pas = None +_substrait_msg = ( + "The pyarrow installation is not built with support for Substrait." +) + + +def _pas(): + global __pas + if __pas is None: + try: + import pyarrow.substrait as pas + __pas = pas + except ImportError: + raise ImportError(_substrait_msg) + return __pas + + +def _forbid_instantiation(klass, subclasses_instead=True): + msg = '{} is an abstract class thus cannot be initialized.'.format( + klass.__name__ + ) + if subclasses_instead: + subclasses = [cls.__name__ for cls in klass.__subclasses__] + msg += ' Use one of the subclasses instead: {}'.format( + ', '.join(subclasses) + ) + raise TypeError(msg) + + +cdef wrap_scalar_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ scalar Function in a ScalarFunction object. + """ + cdef ScalarFunction func = ScalarFunction.__new__(ScalarFunction) + func.init(sp_func) + return func + + +cdef wrap_vector_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ vector Function in a VectorFunction object. + """ + cdef VectorFunction func = VectorFunction.__new__(VectorFunction) + func.init(sp_func) + return func + + +cdef wrap_scalar_aggregate_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ aggregate Function in a ScalarAggregateFunction object. + """ + cdef ScalarAggregateFunction func = \ + ScalarAggregateFunction.__new__(ScalarAggregateFunction) + func.init(sp_func) + return func + + +cdef wrap_hash_aggregate_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ aggregate Function in a HashAggregateFunction object. + """ + cdef HashAggregateFunction func = \ + HashAggregateFunction.__new__(HashAggregateFunction) + func.init(sp_func) + return func + + +cdef wrap_meta_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ meta Function in a MetaFunction object. + """ + cdef MetaFunction func = MetaFunction.__new__(MetaFunction) + func.init(sp_func) + return func + + +cdef wrap_function(const shared_ptr[CFunction]& sp_func): + """ + Wrap a C++ Function in a Function object. + + This dispatches to specialized wrappers depending on the function kind. + """ + if sp_func.get() == NULL: + raise ValueError("Function was NULL") + + cdef FunctionKind c_kind = sp_func.get().kind() + if c_kind == FunctionKind_SCALAR: + return wrap_scalar_function(sp_func) + elif c_kind == FunctionKind_VECTOR: + return wrap_vector_function(sp_func) + elif c_kind == FunctionKind_SCALAR_AGGREGATE: + return wrap_scalar_aggregate_function(sp_func) + elif c_kind == FunctionKind_HASH_AGGREGATE: + return wrap_hash_aggregate_function(sp_func) + elif c_kind == FunctionKind_META: + return wrap_meta_function(sp_func) + else: + raise NotImplementedError("Unknown Function::Kind") + + +cdef wrap_scalar_kernel(const CScalarKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef ScalarKernel kernel = ScalarKernel.__new__(ScalarKernel) + kernel.init(c_kernel) + return kernel + + +cdef wrap_vector_kernel(const CVectorKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef VectorKernel kernel = VectorKernel.__new__(VectorKernel) + kernel.init(c_kernel) + return kernel + + +cdef wrap_scalar_aggregate_kernel(const CScalarAggregateKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef ScalarAggregateKernel kernel = \ + ScalarAggregateKernel.__new__(ScalarAggregateKernel) + kernel.init(c_kernel) + return kernel + + +cdef wrap_hash_aggregate_kernel(const CHashAggregateKernel* c_kernel): + if c_kernel == NULL: + raise ValueError("Kernel was NULL") + cdef HashAggregateKernel kernel = \ + HashAggregateKernel.__new__(HashAggregateKernel) + kernel.init(c_kernel) + return kernel + + +cdef class Kernel(_Weakrefable): + """ + A kernel object. + + Kernels handle the execution of a Function for a certain signature. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly" + .format(self.__class__.__name__)) + + +cdef class ScalarKernel(Kernel): + cdef const CScalarKernel* kernel + + cdef void init(self, const CScalarKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("ScalarKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +cdef class VectorKernel(Kernel): + cdef const CVectorKernel* kernel + + cdef void init(self, const CVectorKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("VectorKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +cdef class ScalarAggregateKernel(Kernel): + cdef const CScalarAggregateKernel* kernel + + cdef void init(self, const CScalarAggregateKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("ScalarAggregateKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +cdef class HashAggregateKernel(Kernel): + cdef const CHashAggregateKernel* kernel + + cdef void init(self, const CHashAggregateKernel* kernel) except *: + self.kernel = kernel + + def __repr__(self): + return ("HashAggregateKernel<{}>" + .format(frombytes(self.kernel.signature.get().ToString()))) + + +FunctionDoc = namedtuple( + "FunctionDoc", + ("summary", "description", "arg_names", "options_class", + "options_required")) + + +cdef class Function(_Weakrefable): + """ + A compute function. + + A function implements a certain logical computation over a range of + possible input signatures. Each signature accepts a range of input + types and is implemented by a given Kernel. + + Functions can be of different kinds: + + * "scalar" functions apply an item-wise computation over all items + of their inputs. Each item in the output only depends on the values + of the inputs at the same position. Examples: addition, comparisons, + string predicates... + + * "vector" functions apply a collection-wise computation, such that + each item in the output may depend on the values of several items + in each input. Examples: dictionary encoding, sorting, extracting + unique values... + + * "scalar_aggregate" functions reduce the dimensionality of the inputs by + applying a reduction function. Examples: sum, min_max, mode... + + * "hash_aggregate" functions apply a reduction function to an input + subdivided by grouping criteria. They may not be directly called. + Examples: hash_sum, hash_min_max... + + * "meta" functions dispatch to other functions. + """ + + cdef: + shared_ptr[CFunction] sp_func + CFunction* base_func + + _kind_map = { + FunctionKind_SCALAR: "scalar", + FunctionKind_VECTOR: "vector", + FunctionKind_SCALAR_AGGREGATE: "scalar_aggregate", + FunctionKind_HASH_AGGREGATE: "hash_aggregate", + FunctionKind_META: "meta", + } + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly" + .format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + self.sp_func = sp_func + self.base_func = sp_func.get() + + def __repr__(self): + return ("arrow.compute.Function" + .format(self.name, self.kind, self.arity, self.num_kernels)) + + def __reduce__(self): + # Reduction uses the global registry + return get_function, (self.name,) + + @property + def name(self): + """ + The function name. + """ + return frombytes(self.base_func.name()) + + @property + def arity(self): + """ + The function arity. + + If Ellipsis (i.e. `...`) is returned, the function takes a variable + number of arguments. + """ + cdef CArity arity = self.base_func.arity() + if arity.is_varargs: + return ... + else: + return arity.num_args + + @property + def kind(self): + """ + The function kind. + """ + cdef FunctionKind c_kind = self.base_func.kind() + try: + return self._kind_map[c_kind] + except KeyError: + raise NotImplementedError("Unknown Function::Kind") + + @property + def _doc(self): + """ + The C++-like function documentation (for internal use). + """ + cdef CFunctionDoc c_doc = self.base_func.doc() + return FunctionDoc(frombytes(c_doc.summary), + frombytes(c_doc.description), + [frombytes(s) for s in c_doc.arg_names], + frombytes(c_doc.options_class), + c_doc.options_required) + + @property + def num_kernels(self): + """ + The number of kernels implementing this function. + """ + return self.base_func.num_kernels() + + def call(self, args, FunctionOptions options=None, + MemoryPool memory_pool=None, length=None): + """ + Call the function on the given arguments. + + Parameters + ---------- + args : iterable + The arguments to pass to the function. Accepted types depend + on the specific function. + options : FunctionOptions, optional + Options instance for executing this function. This should have + the right concrete options type. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + length : int, optional + Batch size for execution, for nullary (no argument) functions. If + not passed, will be inferred from passed data. + """ + cdef: + const CFunctionOptions* c_options = NULL + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + CExecContext c_exec_ctx = CExecContext(pool) + CExecBatch c_batch + CDatum result + + _pack_compute_args(args, &c_batch.values) + + if options is not None: + c_options = options.get_options() + + if length is not None: + c_batch.length = length + with nogil: + result = GetResultValue( + self.base_func.Execute(c_batch, c_options, &c_exec_ctx) + ) + else: + with nogil: + result = GetResultValue( + self.base_func.Execute(c_batch.values, c_options, + &c_exec_ctx) + ) + + return wrap_datum(result) + + +cdef class ScalarFunction(Function): + cdef const CScalarFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CScalarKernel*] kernels = self.func.kernels() + return [wrap_scalar_kernel(k) for k in kernels] + + +cdef class VectorFunction(Function): + cdef const CVectorFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CVectorKernel*] kernels = self.func.kernels() + return [wrap_vector_kernel(k) for k in kernels] + + +cdef class ScalarAggregateFunction(Function): + cdef const CScalarAggregateFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CScalarAggregateKernel*] kernels = \ + self.func.kernels() + return [wrap_scalar_aggregate_kernel(k) for k in kernels] + + +cdef class HashAggregateFunction(Function): + cdef const CHashAggregateFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + @property + def kernels(self): + """ + The kernels implementing this function. + """ + cdef vector[const CHashAggregateKernel*] kernels = self.func.kernels() + return [wrap_hash_aggregate_kernel(k) for k in kernels] + + +cdef class MetaFunction(Function): + cdef const CMetaFunction* func + + cdef void init(self, const shared_ptr[CFunction]& sp_func) except *: + Function.init(self, sp_func) + self.func = sp_func.get() + + # Since num_kernels is exposed, also expose a kernels property + @property + def kernels(self): + """ + The kernels implementing this function. + """ + return [] + + +cdef _pack_compute_args(object values, vector[CDatum]* out): + for val in values: + if isinstance(val, (list, np.ndarray)): + val = lib.asarray(val) + + if isinstance(val, Array): + out.push_back(CDatum(( val).sp_array)) + continue + elif isinstance(val, ChunkedArray): + out.push_back(CDatum(( val).sp_chunked_array)) + continue + elif isinstance(val, Scalar): + out.push_back(CDatum(( val).unwrap())) + continue + elif isinstance(val, RecordBatch): + out.push_back(CDatum(( val).sp_batch)) + continue + elif isinstance(val, Table): + out.push_back(CDatum(( val).sp_table)) + continue + else: + # Is it a Python scalar? + try: + scal = lib.scalar(val) + except Exception: + # Raise dedicated error below + pass + else: + out.push_back(CDatum(( scal).unwrap())) + continue + + raise TypeError(f"Got unexpected argument type {type(val)} " + "for compute function") + + +cdef class FunctionRegistry(_Weakrefable): + cdef CFunctionRegistry* registry + + def __init__(self): + self.registry = GetFunctionRegistry() + + def list_functions(self): + """ + Return all function names in the registry. + """ + cdef vector[c_string] names = self.registry.GetFunctionNames() + return [frombytes(name) for name in names] + + def get_function(self, name): + """ + Look up a function by name in the registry. + + Parameters + ---------- + name : str + The name of the function to lookup + """ + cdef: + c_string c_name = tobytes(name) + shared_ptr[CFunction] func + with nogil: + func = GetResultValue(self.registry.GetFunction(c_name)) + return wrap_function(func) + + +cdef FunctionRegistry _global_func_registry = FunctionRegistry() + + +def function_registry(): + return _global_func_registry + + +def get_function(name): + """ + Get a function by name. + + The function is looked up in the global registry + (as returned by `function_registry()`). + + Parameters + ---------- + name : str + The name of the function to lookup + """ + return _global_func_registry.get_function(name) + + +def list_functions(): + """ + Return all function names in the global registry. + """ + return _global_func_registry.list_functions() + + +def call_function(name, args, options=None, memory_pool=None, length=None): + """ + Call a named function. + + The function is looked up in the global registry + (as returned by `function_registry()`). + + Parameters + ---------- + name : str + The name of the function to call. + args : list + The arguments to the function. + options : optional + options provided to the function. + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + length : int, optional + Batch size for execution, for nullary (no argument) functions. If not + passed, inferred from data. + """ + func = _global_func_registry.get_function(name) + return func.call(args, options=options, memory_pool=memory_pool, + length=length) + + +cdef class FunctionOptions(_Weakrefable): + __slots__ = () # avoid mistakingly creating attributes + + cdef const CFunctionOptions* get_options(self) except NULL: + return self.wrapped.get() + + cdef void init(self, const shared_ptr[CFunctionOptions]& sp): + self.wrapped = sp + + cdef inline shared_ptr[CFunctionOptions] unwrap(self): + return self.wrapped + + def serialize(self): + cdef: + CResult[shared_ptr[CBuffer]] res = self.get_options().Serialize() + shared_ptr[CBuffer] c_buf = GetResultValue(res) + return pyarrow_wrap_buffer(c_buf) + + @staticmethod + def deserialize(buf): + """ + Deserialize options for a function. + + Parameters + ---------- + buf : Buffer + The buffer containing the data to deserialize. + """ + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + CResult[unique_ptr[CFunctionOptions]] maybe_options = \ + DeserializeFunctionOptions(deref(c_buf)) + shared_ptr[CFunctionOptions] c_options + c_options = to_shared(GetResultValue(move(maybe_options))) + type_name = frombytes(c_options.get().options_type().type_name()) + module = globals() + if type_name not in module: + raise ValueError(f'Cannot deserialize "{type_name}"') + klass = module[type_name] + options = klass.__new__(klass) + ( options).init(c_options) + return options + + def __repr__(self): + type_name = self.__class__.__name__ + # Remove {} so we can use our own braces + string_repr = frombytes(self.get_options().ToString())[1:-1] + return f"{type_name}({string_repr})" + + def __eq__(self, FunctionOptions other): + return self.get_options().Equals(deref(other.get_options())) + + +def _raise_invalid_function_option(value, description, *, + exception_class=ValueError): + raise exception_class(f"\"{value}\" is not a valid {description}") + + +# NOTE: +# To properly expose the constructor signature of FunctionOptions +# subclasses, we use a two-level inheritance: +# 1. a C extension class that implements option validation and setting +# (won't expose function signatures because of +# https://github.com/cython/cython/issues/3873) +# 2. a Python derived class that implements the constructor + +cdef class _CastOptions(FunctionOptions): + cdef CCastOptions* options + + cdef void init(self, const shared_ptr[CFunctionOptions]& sp): + FunctionOptions.init(self, sp) + self.options = self.wrapped.get() + + def _set_options(self, DataType target_type, allow_int_overflow, + allow_time_truncate, allow_time_overflow, + allow_decimal_truncate, allow_float_truncate, + allow_invalid_utf8): + cdef: + shared_ptr[CCastOptions] wrapped = make_shared[CCastOptions]() + self.init( wrapped) + self._set_type(target_type) + if allow_int_overflow is not None: + self.allow_int_overflow = allow_int_overflow + if allow_time_truncate is not None: + self.allow_time_truncate = allow_time_truncate + if allow_time_overflow is not None: + self.allow_time_overflow = allow_time_overflow + if allow_decimal_truncate is not None: + self.allow_decimal_truncate = allow_decimal_truncate + if allow_float_truncate is not None: + self.allow_float_truncate = allow_float_truncate + if allow_invalid_utf8 is not None: + self.allow_invalid_utf8 = allow_invalid_utf8 + + def _set_type(self, target_type=None): + if target_type is not None: + deref(self.options).to_type = \ + ( ensure_type(target_type)).sp_type + + def _set_safe(self): + self.init(shared_ptr[CFunctionOptions]( + new CCastOptions(CCastOptions.Safe()))) + + def _set_unsafe(self): + self.init(shared_ptr[CFunctionOptions]( + new CCastOptions(CCastOptions.Unsafe()))) + + def is_safe(self): + return not (deref(self.options).allow_int_overflow or + deref(self.options).allow_time_truncate or + deref(self.options).allow_time_overflow or + deref(self.options).allow_decimal_truncate or + deref(self.options).allow_float_truncate or + deref(self.options).allow_invalid_utf8) + + @property + def allow_int_overflow(self): + return deref(self.options).allow_int_overflow + + @allow_int_overflow.setter + def allow_int_overflow(self, c_bool flag): + deref(self.options).allow_int_overflow = flag + + @property + def allow_time_truncate(self): + return deref(self.options).allow_time_truncate + + @allow_time_truncate.setter + def allow_time_truncate(self, c_bool flag): + deref(self.options).allow_time_truncate = flag + + @property + def allow_time_overflow(self): + return deref(self.options).allow_time_overflow + + @allow_time_overflow.setter + def allow_time_overflow(self, c_bool flag): + deref(self.options).allow_time_overflow = flag + + @property + def allow_decimal_truncate(self): + return deref(self.options).allow_decimal_truncate + + @allow_decimal_truncate.setter + def allow_decimal_truncate(self, c_bool flag): + deref(self.options).allow_decimal_truncate = flag + + @property + def allow_float_truncate(self): + return deref(self.options).allow_float_truncate + + @allow_float_truncate.setter + def allow_float_truncate(self, c_bool flag): + deref(self.options).allow_float_truncate = flag + + @property + def allow_invalid_utf8(self): + return deref(self.options).allow_invalid_utf8 + + @allow_invalid_utf8.setter + def allow_invalid_utf8(self, c_bool flag): + deref(self.options).allow_invalid_utf8 = flag + + +class CastOptions(_CastOptions): + """ + Options for the `cast` function. + + Parameters + ---------- + target_type : DataType, optional + The PyArrow type to cast to. + allow_int_overflow : bool, default False + Whether integer overflow is allowed when casting. + allow_time_truncate : bool, default False + Whether time precision truncation is allowed when casting. + allow_time_overflow : bool, default False + Whether date/time range overflow is allowed when casting. + allow_decimal_truncate : bool, default False + Whether decimal precision truncation is allowed when casting. + allow_float_truncate : bool, default False + Whether floating-point precision truncation is allowed when casting. + allow_invalid_utf8 : bool, default False + Whether producing invalid utf8 data is allowed when casting. + """ + + def __init__(self, target_type=None, *, allow_int_overflow=None, + allow_time_truncate=None, allow_time_overflow=None, + allow_decimal_truncate=None, allow_float_truncate=None, + allow_invalid_utf8=None): + self._set_options(target_type, allow_int_overflow, allow_time_truncate, + allow_time_overflow, allow_decimal_truncate, + allow_float_truncate, allow_invalid_utf8) + + @staticmethod + def safe(target_type=None): + """" + Create a CastOptions for a safe cast. + + Parameters + ---------- + target_type : optional + Target cast type for the safe cast. + """ + self = CastOptions() + self._set_safe() + self._set_type(target_type) + return self + + @staticmethod + def unsafe(target_type=None): + """" + Create a CastOptions for an unsafe cast. + + Parameters + ---------- + target_type : optional + Target cast type for the unsafe cast. + """ + self = CastOptions() + self._set_unsafe() + self._set_type(target_type) + return self + + +def _skip_nulls_doc(): + # (note the weird indent because of how the string is inserted + # by callers) + return """skip_nulls : bool, default True + Whether to skip (ignore) nulls in the input. + If False, any null in the input forces the output to null. +""" + + +def _min_count_doc(*, default): + return f"""min_count : int, default {default} + Minimum number of non-null values in the input. If the number + of non-null values is below `min_count`, the output is null. +""" + + +cdef class _ElementWiseAggregateOptions(FunctionOptions): + def _set_options(self, skip_nulls): + self.wrapped.reset(new CElementWiseAggregateOptions(skip_nulls)) + + +class ElementWiseAggregateOptions(_ElementWiseAggregateOptions): + __doc__ = f""" + Options for element-wise aggregate functions. + + Parameters + ---------- + {_skip_nulls_doc()} + """ + + def __init__(self, *, skip_nulls=True): + self._set_options(skip_nulls) + + +cdef CRoundMode unwrap_round_mode(round_mode) except *: + if round_mode == "down": + return CRoundMode_DOWN + elif round_mode == "up": + return CRoundMode_UP + elif round_mode == "towards_zero": + return CRoundMode_TOWARDS_ZERO + elif round_mode == "towards_infinity": + return CRoundMode_TOWARDS_INFINITY + elif round_mode == "half_down": + return CRoundMode_HALF_DOWN + elif round_mode == "half_up": + return CRoundMode_HALF_UP + elif round_mode == "half_towards_zero": + return CRoundMode_HALF_TOWARDS_ZERO + elif round_mode == "half_towards_infinity": + return CRoundMode_HALF_TOWARDS_INFINITY + elif round_mode == "half_to_even": + return CRoundMode_HALF_TO_EVEN + elif round_mode == "half_to_odd": + return CRoundMode_HALF_TO_ODD + _raise_invalid_function_option(round_mode, "round mode") + + +cdef class _RoundOptions(FunctionOptions): + def _set_options(self, ndigits, round_mode): + self.wrapped.reset( + new CRoundOptions(ndigits, unwrap_round_mode(round_mode)) + ) + + +class RoundOptions(_RoundOptions): + """ + Options for rounding numbers. + + Parameters + ---------- + ndigits : int, default 0 + Number of fractional digits to round to. + round_mode : str, default "half_to_even" + Rounding and tie-breaking mode. + Accepted values are "down", "up", "towards_zero", "towards_infinity", + "half_down", "half_up", "half_towards_zero", "half_towards_infinity", + "half_to_even", "half_to_odd". + """ + + def __init__(self, ndigits=0, round_mode="half_to_even"): + self._set_options(ndigits, round_mode) + + +cdef class _RoundBinaryOptions(FunctionOptions): + def _set_options(self, round_mode): + self.wrapped.reset( + new CRoundBinaryOptions(unwrap_round_mode(round_mode)) + ) + + +class RoundBinaryOptions(_RoundBinaryOptions): + """ + Options for rounding numbers when ndigits is provided by a second array + + Parameters + ---------- + round_mode : str, default "half_to_even" + Rounding and tie-breaking mode. + Accepted values are "down", "up", "towards_zero", "towards_infinity", + "half_down", "half_up", "half_towards_zero", "half_towards_infinity", + "half_to_even", "half_to_odd". + """ + + def __init__(self, round_mode="half_to_even"): + self._set_options(round_mode) + + +cdef CCalendarUnit unwrap_round_temporal_unit(unit) except *: + if unit == "nanosecond": + return CCalendarUnit_NANOSECOND + elif unit == "microsecond": + return CCalendarUnit_MICROSECOND + elif unit == "millisecond": + return CCalendarUnit_MILLISECOND + elif unit == "second": + return CCalendarUnit_SECOND + elif unit == "minute": + return CCalendarUnit_MINUTE + elif unit == "hour": + return CCalendarUnit_HOUR + elif unit == "day": + return CCalendarUnit_DAY + elif unit == "week": + return CCalendarUnit_WEEK + elif unit == "month": + return CCalendarUnit_MONTH + elif unit == "quarter": + return CCalendarUnit_QUARTER + elif unit == "year": + return CCalendarUnit_YEAR + _raise_invalid_function_option(unit, "Calendar unit") + + +cdef class _RoundTemporalOptions(FunctionOptions): + def _set_options(self, multiple, unit, week_starts_monday, + ceil_is_strictly_greater, calendar_based_origin): + self.wrapped.reset( + new CRoundTemporalOptions( + multiple, unwrap_round_temporal_unit(unit), + week_starts_monday, ceil_is_strictly_greater, + calendar_based_origin) + ) + + +class RoundTemporalOptions(_RoundTemporalOptions): + """ + Options for rounding temporal values. + + Parameters + ---------- + multiple : int, default 1 + Number of units to round to. + unit : str, default "day" + The unit in which `multiple` is expressed. + Accepted values are "year", "quarter", "month", "week", "day", + "hour", "minute", "second", "millisecond", "microsecond", + "nanosecond". + week_starts_monday : bool, default True + If True, weeks start on Monday; if False, on Sunday. + ceil_is_strictly_greater : bool, default False + If True, ceil returns a rounded value that is strictly greater than the + input. For example: ceiling 1970-01-01T00:00:00 to 3 hours would + yield 1970-01-01T03:00:00 if set to True and 1970-01-01T00:00:00 + if set to False. + This applies to the ceil_temporal function only. + calendar_based_origin : bool, default False + By default, the origin is 1970-01-01T00:00:00. By setting this to True, + rounding origin will be beginning of one less precise calendar unit. + E.g.: rounding to hours will use beginning of day as origin. + + By default time is rounded to a multiple of units since + 1970-01-01T00:00:00. By setting calendar_based_origin to true, + time will be rounded to number of units since the last greater + calendar unit. + For example: rounding to multiple of days since the beginning of the + month or to hours since the beginning of the day. + Exceptions: week and quarter are not used as greater units, + therefore days will be rounded to the beginning of the month not + week. Greater unit of week is a year. + Note that ceiling and rounding might change sorting order of an array + near greater unit change. For example rounding YYYY-mm-dd 23:00:00 to + 5 hours will ceil and round to YYYY-mm-dd+1 01:00:00 and floor to + YYYY-mm-dd 20:00:00. On the other hand YYYY-mm-dd+1 00:00:00 will + ceil, round and floor to YYYY-mm-dd+1 00:00:00. This can break the + order of an already ordered array. + + """ + + def __init__(self, multiple=1, unit="day", *, week_starts_monday=True, + ceil_is_strictly_greater=False, + calendar_based_origin=False): + self._set_options(multiple, unit, week_starts_monday, + ceil_is_strictly_greater, + calendar_based_origin) + + +cdef class _RoundToMultipleOptions(FunctionOptions): + def _set_options(self, multiple, round_mode): + if not isinstance(multiple, Scalar): + try: + multiple = lib.scalar(multiple) + except Exception: + _raise_invalid_function_option( + multiple, "multiple type for RoundToMultipleOptions", + exception_class=TypeError) + + self.wrapped.reset( + new CRoundToMultipleOptions( + pyarrow_unwrap_scalar(multiple), unwrap_round_mode(round_mode)) + ) + + +class RoundToMultipleOptions(_RoundToMultipleOptions): + """ + Options for rounding numbers to a multiple. + + Parameters + ---------- + multiple : numeric scalar, default 1.0 + Multiple to round to. Should be a scalar of a type compatible + with the argument to be rounded. + round_mode : str, default "half_to_even" + Rounding and tie-breaking mode. + Accepted values are "down", "up", "towards_zero", "towards_infinity", + "half_down", "half_up", "half_towards_zero", "half_towards_infinity", + "half_to_even", "half_to_odd". + """ + + def __init__(self, multiple=1.0, round_mode="half_to_even"): + self._set_options(multiple, round_mode) + + +cdef class _JoinOptions(FunctionOptions): + _null_handling_map = { + "emit_null": CJoinNullHandlingBehavior_EMIT_NULL, + "skip": CJoinNullHandlingBehavior_SKIP, + "replace": CJoinNullHandlingBehavior_REPLACE, + } + + def _set_options(self, null_handling, null_replacement): + try: + self.wrapped.reset( + new CJoinOptions(self._null_handling_map[null_handling], + tobytes(null_replacement)) + ) + except KeyError: + _raise_invalid_function_option(null_handling, "null handling") + + +class JoinOptions(_JoinOptions): + """ + Options for the `binary_join_element_wise` function. + + Parameters + ---------- + null_handling : str, default "emit_null" + How to handle null values in the inputs. + Accepted values are "emit_null", "skip", "replace". + null_replacement : str, default "" + Replacement string to emit for null inputs if `null_handling` + is "replace". + """ + + def __init__(self, null_handling="emit_null", null_replacement=""): + self._set_options(null_handling, null_replacement) + + +cdef class _MatchSubstringOptions(FunctionOptions): + def _set_options(self, pattern, ignore_case): + self.wrapped.reset( + new CMatchSubstringOptions(tobytes(pattern), ignore_case) + ) + + +class MatchSubstringOptions(_MatchSubstringOptions): + """ + Options for looking for a substring. + + Parameters + ---------- + pattern : str + Substring pattern to look for inside input values. + ignore_case : bool, default False + Whether to perform a case-insensitive match. + """ + + def __init__(self, pattern, *, ignore_case=False): + self._set_options(pattern, ignore_case) + + +cdef class _PadOptions(FunctionOptions): + def _set_options(self, width, padding): + self.wrapped.reset(new CPadOptions(width, tobytes(padding))) + + +class PadOptions(_PadOptions): + """ + Options for padding strings. + + Parameters + ---------- + width : int + Desired string length. + padding : str, default " " + What to pad the string with. Should be one byte or codepoint. + """ + + def __init__(self, width, padding=' '): + self._set_options(width, padding) + + +cdef class _TrimOptions(FunctionOptions): + def _set_options(self, characters): + self.wrapped.reset(new CTrimOptions(tobytes(characters))) + + +class TrimOptions(_TrimOptions): + """ + Options for trimming characters from strings. + + Parameters + ---------- + characters : str + Individual characters to be trimmed from the string. + """ + + def __init__(self, characters): + self._set_options(tobytes(characters)) + + +cdef class _ReplaceSubstringOptions(FunctionOptions): + def _set_options(self, pattern, replacement, max_replacements): + self.wrapped.reset( + new CReplaceSubstringOptions(tobytes(pattern), + tobytes(replacement), + max_replacements) + ) + + +class ReplaceSubstringOptions(_ReplaceSubstringOptions): + """ + Options for replacing matched substrings. + + Parameters + ---------- + pattern : str + Substring pattern to look for inside input values. + replacement : str + What to replace the pattern with. + max_replacements : int or None, default None + The maximum number of strings to replace in each + input value (unlimited if None). + """ + + def __init__(self, pattern, replacement, *, max_replacements=None): + if max_replacements is None: + max_replacements = -1 + self._set_options(pattern, replacement, max_replacements) + + +cdef class _ExtractRegexOptions(FunctionOptions): + def _set_options(self, pattern): + self.wrapped.reset(new CExtractRegexOptions(tobytes(pattern))) + + +class ExtractRegexOptions(_ExtractRegexOptions): + """ + Options for the `extract_regex` function. + + Parameters + ---------- + pattern : str + Regular expression with named capture fields. + """ + + def __init__(self, pattern): + self._set_options(pattern) + + +cdef class _SliceOptions(FunctionOptions): + def _set_options(self, start, stop, step): + self.wrapped.reset(new CSliceOptions(start, stop, step)) + + +class SliceOptions(_SliceOptions): + """ + Options for slicing. + + Parameters + ---------- + start : int + Index to start slicing at (inclusive). + stop : int or None, default None + If given, index to stop slicing at (exclusive). + If not given, slicing will stop at the end. + step : int, default 1 + Slice step. + """ + + def __init__(self, start, stop=None, step=1): + if stop is None: + stop = sys.maxsize + if step < 0: + stop = -stop + self._set_options(start, stop, step) + + +cdef class _ListSliceOptions(FunctionOptions): + cpdef _set_options(self, start, stop=None, step=1, return_fixed_size_list=None): + cdef: + CListSliceOptions* opts + opts = new CListSliceOptions( + start, + nullopt if stop is None + else (stop), + step, + nullopt if return_fixed_size_list is None + else (return_fixed_size_list) + ) + self.wrapped.reset(opts) + + +class ListSliceOptions(_ListSliceOptions): + """ + Options for list array slicing. + + Parameters + ---------- + start : int + Index to start slicing inner list elements (inclusive). + stop : Optional[int], default None + If given, index to stop slicing at (exclusive). + If not given, slicing will stop at the end. (NotImplemented) + step : int, default 1 + Slice step. + return_fixed_size_list : Optional[bool], default None + Whether to return a FixedSizeListArray. If true _and_ stop is after + a list element's length, nulls will be appended to create the + requested slice size. The default of `None` will return the same + type which was passed in. + """ + + def __init__(self, start, stop=None, step=1, return_fixed_size_list=None): + self._set_options(start, stop, step, return_fixed_size_list) + + +cdef class _ReplaceSliceOptions(FunctionOptions): + def _set_options(self, start, stop, replacement): + self.wrapped.reset( + new CReplaceSliceOptions(start, stop, tobytes(replacement)) + ) + + +class ReplaceSliceOptions(_ReplaceSliceOptions): + """ + Options for replacing slices. + + Parameters + ---------- + start : int + Index to start slicing at (inclusive). + stop : int + Index to stop slicing at (exclusive). + replacement : str + What to replace the slice with. + """ + + def __init__(self, start, stop, replacement): + self._set_options(start, stop, replacement) + + +cdef class _FilterOptions(FunctionOptions): + _null_selection_map = { + "drop": CFilterNullSelectionBehavior_DROP, + "emit_null": CFilterNullSelectionBehavior_EMIT_NULL, + } + + def _set_options(self, null_selection_behavior): + try: + self.wrapped.reset( + new CFilterOptions( + self._null_selection_map[null_selection_behavior] + ) + ) + except KeyError: + _raise_invalid_function_option(null_selection_behavior, + "null selection behavior") + + +class FilterOptions(_FilterOptions): + """ + Options for selecting with a boolean filter. + + Parameters + ---------- + null_selection_behavior : str, default "drop" + How to handle nulls in the selection filter. + Accepted values are "drop", "emit_null". + """ + + def __init__(self, null_selection_behavior="drop"): + self._set_options(null_selection_behavior) + + +cdef class _DictionaryEncodeOptions(FunctionOptions): + _null_encoding_map = { + "encode": CDictionaryEncodeNullEncodingBehavior_ENCODE, + "mask": CDictionaryEncodeNullEncodingBehavior_MASK, + } + + def _set_options(self, null_encoding): + try: + self.wrapped.reset( + new CDictionaryEncodeOptions( + self._null_encoding_map[null_encoding] + ) + ) + except KeyError: + _raise_invalid_function_option(null_encoding, "null encoding") + + +class DictionaryEncodeOptions(_DictionaryEncodeOptions): + """ + Options for dictionary encoding. + + Parameters + ---------- + null_encoding : str, default "mask" + How to encode nulls in the input. + Accepted values are "mask" (null inputs emit a null in the indices + array), "encode" (null inputs emit a non-null index pointing to + a null value in the dictionary array). + """ + + def __init__(self, null_encoding="mask"): + self._set_options(null_encoding) + + +cdef class _RunEndEncodeOptions(FunctionOptions): + def _set_options(self, run_end_type): + run_end_ty = ensure_type(run_end_type) + self.wrapped.reset(new CRunEndEncodeOptions(pyarrow_unwrap_data_type(run_end_ty))) + + +class RunEndEncodeOptions(_RunEndEncodeOptions): + """ + Options for run-end encoding. + + Parameters + ---------- + run_end_type : DataType, default pyarrow.int32() + The data type of the run_ends array. + + Accepted values are pyarrow.{int16(), int32(), int64()}. + """ + + def __init__(self, run_end_type=lib.int32()): + self._set_options(run_end_type) + + +cdef class _TakeOptions(FunctionOptions): + def _set_options(self, boundscheck): + self.wrapped.reset(new CTakeOptions(boundscheck)) + + +class TakeOptions(_TakeOptions): + """ + Options for the `take` and `array_take` functions. + + Parameters + ---------- + boundscheck : boolean, default True + Whether to check indices are within bounds. If False and an + index is out of bounds, behavior is undefined (the process + may crash). + """ + + def __init__(self, *, boundscheck=True): + self._set_options(boundscheck) + + +cdef class _MakeStructOptions(FunctionOptions): + def _set_options(self, field_names, field_nullability, field_metadata): + cdef: + vector[c_string] c_field_names + vector[shared_ptr[const CKeyValueMetadata]] c_field_metadata + for name in field_names: + c_field_names.push_back(tobytes(name)) + for metadata in field_metadata: + c_field_metadata.push_back(pyarrow_unwrap_metadata(metadata)) + self.wrapped.reset( + new CMakeStructOptions(c_field_names, field_nullability, + c_field_metadata) + ) + + +class MakeStructOptions(_MakeStructOptions): + """ + Options for the `make_struct` function. + + Parameters + ---------- + field_names : sequence of str + Names of the struct fields to create. + field_nullability : sequence of bool, optional + Nullability information for each struct field. + If omitted, all fields are nullable. + field_metadata : sequence of KeyValueMetadata, optional + Metadata for each struct field. + """ + + def __init__(self, field_names=(), *, field_nullability=None, + field_metadata=None): + if field_nullability is None: + field_nullability = [True] * len(field_names) + if field_metadata is None: + field_metadata = [None] * len(field_names) + self._set_options(field_names, field_nullability, field_metadata) + + +cdef CFieldRef _ensure_field_ref(value) except *: + cdef: + CFieldRef field_ref + const CFieldRef* field_ref_ptr + + if isinstance(value, (list, tuple)): + value = Expression._nested_field(tuple(value)) + + if isinstance(value, Expression): + field_ref_ptr = (value).unwrap().field_ref() + if field_ref_ptr is NULL: + raise ValueError("Unable to get FieldRef from Expression") + field_ref = deref(field_ref_ptr) + elif isinstance(value, (bytes, str)): + if value.startswith(b'.' if isinstance(value, bytes) else '.'): + field_ref = GetResultValue( + CFieldRef.FromDotPath(tobytes(value))) + else: + field_ref = CFieldRef(tobytes(value)) + elif isinstance(value, int): + field_ref = CFieldRef( value) + else: + raise TypeError("Expected a field reference as a str or int, list of " + f"str or int, or Expression. Got {type(value)} instead.") + return field_ref + + +cdef class _StructFieldOptions(FunctionOptions): + def _set_options(self, indices): + + if isinstance(indices, (list, tuple)) and not len(indices): + # Allow empty indices; effectively return same array + self.wrapped.reset( + new CStructFieldOptions(indices)) + return + + cdef CFieldRef field_ref = _ensure_field_ref(indices) + self.wrapped.reset(new CStructFieldOptions(field_ref)) + + +class StructFieldOptions(_StructFieldOptions): + """ + Options for the `struct_field` function. + + Parameters + ---------- + indices : List[str], List[bytes], List[int], Expression, bytes, str, or int + List of indices for chained field lookup, for example `[4, 1]` + will look up the second nested field in the fifth outer field. + """ + + def __init__(self, indices): + self._set_options(indices) + + +cdef class _ScalarAggregateOptions(FunctionOptions): + def _set_options(self, skip_nulls, min_count): + self.wrapped.reset(new CScalarAggregateOptions(skip_nulls, min_count)) + + +class ScalarAggregateOptions(_ScalarAggregateOptions): + __doc__ = f""" + Options for scalar aggregations. + + Parameters + ---------- + {_skip_nulls_doc()} + {_min_count_doc(default=1)} + """ + + def __init__(self, *, skip_nulls=True, min_count=1): + self._set_options(skip_nulls, min_count) + + +cdef class _CountOptions(FunctionOptions): + _mode_map = { + "only_valid": CCountMode_ONLY_VALID, + "only_null": CCountMode_ONLY_NULL, + "all": CCountMode_ALL, + } + + def _set_options(self, mode): + try: + self.wrapped.reset(new CCountOptions(self._mode_map[mode])) + except KeyError: + _raise_invalid_function_option(mode, "count mode") + + +class CountOptions(_CountOptions): + """ + Options for the `count` function. + + Parameters + ---------- + mode : str, default "only_valid" + Which values to count in the input. + Accepted values are "only_valid", "only_null", "all". + """ + + def __init__(self, mode="only_valid"): + self._set_options(mode) + + +cdef class _IndexOptions(FunctionOptions): + def _set_options(self, scalar): + self.wrapped.reset(new CIndexOptions(pyarrow_unwrap_scalar(scalar))) + + +class IndexOptions(_IndexOptions): + """ + Options for the `index` function. + + Parameters + ---------- + value : Scalar + The value to search for. + """ + + def __init__(self, value): + self._set_options(value) + + +cdef class _MapLookupOptions(FunctionOptions): + _occurrence_map = { + "all": CMapLookupOccurrence_ALL, + "first": CMapLookupOccurrence_FIRST, + "last": CMapLookupOccurrence_LAST, + } + + def _set_options(self, query_key, occurrence): + try: + self.wrapped.reset( + new CMapLookupOptions( + pyarrow_unwrap_scalar(query_key), + self._occurrence_map[occurrence] + ) + ) + except KeyError: + _raise_invalid_function_option(occurrence, + "Should either be first, last, or all") + + +class MapLookupOptions(_MapLookupOptions): + """ + Options for the `map_lookup` function. + + Parameters + ---------- + query_key : Scalar or Object can be converted to Scalar + The key to search for. + occurrence : str + The occurrence(s) to return from the Map + Accepted values are "first", "last", or "all". + """ + + def __init__(self, query_key, occurrence): + if not isinstance(query_key, lib.Scalar): + query_key = lib.scalar(query_key) + + self._set_options(query_key, occurrence) + + +cdef class _ModeOptions(FunctionOptions): + def _set_options(self, n, skip_nulls, min_count): + self.wrapped.reset(new CModeOptions(n, skip_nulls, min_count)) + + +class ModeOptions(_ModeOptions): + __doc__ = f""" + Options for the `mode` function. + + Parameters + ---------- + n : int, default 1 + Number of distinct most-common values to return. + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, n=1, *, skip_nulls=True, min_count=0): + self._set_options(n, skip_nulls, min_count) + + +cdef class _SetLookupOptions(FunctionOptions): + def _set_options(self, value_set, c_bool skip_nulls): + cdef unique_ptr[CDatum] valset + if isinstance(value_set, Array): + valset.reset(new CDatum(( value_set).sp_array)) + elif isinstance(value_set, ChunkedArray): + valset.reset( + new CDatum(( value_set).sp_chunked_array) + ) + elif isinstance(value_set, Scalar): + valset.reset(new CDatum(( value_set).unwrap())) + else: + _raise_invalid_function_option(value_set, "value set", + exception_class=TypeError) + + self.wrapped.reset(new CSetLookupOptions(deref(valset), skip_nulls)) + + +class SetLookupOptions(_SetLookupOptions): + """ + Options for the `is_in` and `index_in` functions. + + Parameters + ---------- + value_set : Array + Set of values to look for in the input. + skip_nulls : bool, default False + If False, nulls in the input are matched in the value_set just + like regular values. + If True, nulls in the input always fail matching. + """ + + def __init__(self, value_set, *, skip_nulls=False): + self._set_options(value_set, skip_nulls) + + +cdef class _StrptimeOptions(FunctionOptions): + _unit_map = { + "s": TimeUnit_SECOND, + "ms": TimeUnit_MILLI, + "us": TimeUnit_MICRO, + "ns": TimeUnit_NANO, + } + + def _set_options(self, format, unit, error_is_null): + try: + self.wrapped.reset( + new CStrptimeOptions(tobytes(format), self._unit_map[unit], + error_is_null) + ) + except KeyError: + _raise_invalid_function_option(unit, "time unit") + + +class StrptimeOptions(_StrptimeOptions): + """ + Options for the `strptime` function. + + Parameters + ---------- + format : str + Pattern for parsing input strings as timestamps, such as "%Y/%m/%d". + Note that the semantics of the format follow the C/C++ strptime, not the Python one. + There are differences in behavior, for example how the "%y" placeholder + handles years with less than four digits. + unit : str + Timestamp unit of the output. + Accepted values are "s", "ms", "us", "ns". + error_is_null : boolean, default False + Return null on parsing errors if true or raise if false. + """ + + def __init__(self, format, unit, error_is_null=False): + self._set_options(format, unit, error_is_null) + + +cdef class _StrftimeOptions(FunctionOptions): + def _set_options(self, format, locale): + self.wrapped.reset( + new CStrftimeOptions(tobytes(format), tobytes(locale)) + ) + + +class StrftimeOptions(_StrftimeOptions): + """ + Options for the `strftime` function. + + Parameters + ---------- + format : str, default "%Y-%m-%dT%H:%M:%S" + Pattern for formatting input values. + locale : str, default "C" + Locale to use for locale-specific format specifiers. + """ + + def __init__(self, format="%Y-%m-%dT%H:%M:%S", locale="C"): + self._set_options(format, locale) + + +cdef class _DayOfWeekOptions(FunctionOptions): + def _set_options(self, count_from_zero, week_start): + self.wrapped.reset( + new CDayOfWeekOptions(count_from_zero, week_start) + ) + + +class DayOfWeekOptions(_DayOfWeekOptions): + """ + Options for the `day_of_week` function. + + Parameters + ---------- + count_from_zero : bool, default True + If True, number days from 0, otherwise from 1. + week_start : int, default 1 + Which day does the week start with (Monday=1, Sunday=7). + How this value is numbered is unaffected by `count_from_zero`. + """ + + def __init__(self, *, count_from_zero=True, week_start=1): + self._set_options(count_from_zero, week_start) + + +cdef class _WeekOptions(FunctionOptions): + def _set_options(self, week_starts_monday, count_from_zero, + first_week_is_fully_in_year): + self.wrapped.reset( + new CWeekOptions(week_starts_monday, count_from_zero, + first_week_is_fully_in_year) + ) + + +class WeekOptions(_WeekOptions): + """ + Options for the `week` function. + + Parameters + ---------- + week_starts_monday : bool, default True + If True, weeks start on Monday; if False, on Sunday. + count_from_zero : bool, default False + If True, dates at the start of a year that fall into the last week + of the previous year emit 0. + If False, they emit 52 or 53 (the week number of the last week + of the previous year). + first_week_is_fully_in_year : bool, default False + If True, week number 0 is fully in January. + If False, a week that begins on December 29, 30 or 31 is considered + to be week number 0 of the following year. + """ + + def __init__(self, *, week_starts_monday=True, count_from_zero=False, + first_week_is_fully_in_year=False): + self._set_options(week_starts_monday, + count_from_zero, first_week_is_fully_in_year) + + +cdef class _AssumeTimezoneOptions(FunctionOptions): + _ambiguous_map = { + "raise": CAssumeTimezoneAmbiguous_AMBIGUOUS_RAISE, + "earliest": CAssumeTimezoneAmbiguous_AMBIGUOUS_EARLIEST, + "latest": CAssumeTimezoneAmbiguous_AMBIGUOUS_LATEST, + } + _nonexistent_map = { + "raise": CAssumeTimezoneNonexistent_NONEXISTENT_RAISE, + "earliest": CAssumeTimezoneNonexistent_NONEXISTENT_EARLIEST, + "latest": CAssumeTimezoneNonexistent_NONEXISTENT_LATEST, + } + + def _set_options(self, timezone, ambiguous, nonexistent): + if ambiguous not in self._ambiguous_map: + _raise_invalid_function_option(ambiguous, + "'ambiguous' timestamp handling") + if nonexistent not in self._nonexistent_map: + _raise_invalid_function_option(nonexistent, + "'nonexistent' timestamp handling") + self.wrapped.reset( + new CAssumeTimezoneOptions(tobytes(timezone), + self._ambiguous_map[ambiguous], + self._nonexistent_map[nonexistent]) + ) + + +class AssumeTimezoneOptions(_AssumeTimezoneOptions): + """ + Options for the `assume_timezone` function. + + Parameters + ---------- + timezone : str + Timezone to assume for the input. + ambiguous : str, default "raise" + How to handle timestamps that are ambiguous in the assumed timezone. + Accepted values are "raise", "earliest", "latest". + nonexistent : str, default "raise" + How to handle timestamps that don't exist in the assumed timezone. + Accepted values are "raise", "earliest", "latest". + """ + + def __init__(self, timezone, *, ambiguous="raise", nonexistent="raise"): + self._set_options(timezone, ambiguous, nonexistent) + + +cdef class _NullOptions(FunctionOptions): + def _set_options(self, nan_is_null): + self.wrapped.reset(new CNullOptions(nan_is_null)) + + +class NullOptions(_NullOptions): + """ + Options for the `is_null` function. + + Parameters + ---------- + nan_is_null : bool, default False + Whether floating-point NaN values are considered null. + """ + + def __init__(self, *, nan_is_null=False): + self._set_options(nan_is_null) + + +cdef class _VarianceOptions(FunctionOptions): + def _set_options(self, ddof, skip_nulls, min_count): + self.wrapped.reset(new CVarianceOptions(ddof, skip_nulls, min_count)) + + +class VarianceOptions(_VarianceOptions): + __doc__ = f""" + Options for the `variance` and `stddev` functions. + + Parameters + ---------- + ddof : int, default 0 + Number of degrees of freedom. + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, *, ddof=0, skip_nulls=True, min_count=0): + self._set_options(ddof, skip_nulls, min_count) + + +cdef class _SplitOptions(FunctionOptions): + def _set_options(self, max_splits, reverse): + self.wrapped.reset(new CSplitOptions(max_splits, reverse)) + + +class SplitOptions(_SplitOptions): + """ + Options for splitting on whitespace. + + Parameters + ---------- + max_splits : int or None, default None + Maximum number of splits for each input value (unlimited if None). + reverse : bool, default False + Whether to start splitting from the end of each input value. + This only has an effect if `max_splits` is not None. + """ + + def __init__(self, *, max_splits=None, reverse=False): + if max_splits is None: + max_splits = -1 + self._set_options(max_splits, reverse) + + +cdef class _SplitPatternOptions(FunctionOptions): + def _set_options(self, pattern, max_splits, reverse): + self.wrapped.reset( + new CSplitPatternOptions(tobytes(pattern), max_splits, reverse) + ) + + +class SplitPatternOptions(_SplitPatternOptions): + """ + Options for splitting on a string pattern. + + Parameters + ---------- + pattern : str + String pattern to split on. + max_splits : int or None, default None + Maximum number of splits for each input value (unlimited if None). + reverse : bool, default False + Whether to start splitting from the end of each input value. + This only has an effect if `max_splits` is not None. + """ + + def __init__(self, pattern, *, max_splits=None, reverse=False): + if max_splits is None: + max_splits = -1 + self._set_options(pattern, max_splits, reverse) + + +cdef CSortOrder unwrap_sort_order(order) except *: + if order == "ascending": + return CSortOrder_Ascending + elif order == "descending": + return CSortOrder_Descending + _raise_invalid_function_option(order, "sort order") + + +cdef CNullPlacement unwrap_null_placement(null_placement) except *: + if null_placement == "at_start": + return CNullPlacement_AtStart + elif null_placement == "at_end": + return CNullPlacement_AtEnd + _raise_invalid_function_option(null_placement, "null placement") + + +cdef class _PartitionNthOptions(FunctionOptions): + def _set_options(self, pivot, null_placement): + self.wrapped.reset(new CPartitionNthOptions( + pivot, unwrap_null_placement(null_placement))) + + +class PartitionNthOptions(_PartitionNthOptions): + """ + Options for the `partition_nth_indices` function. + + Parameters + ---------- + pivot : int + Index into the equivalent sorted array of the pivot element. + null_placement : str, default "at_end" + Where nulls in the input should be partitioned. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, pivot, *, null_placement="at_end"): + self._set_options(pivot, null_placement) + + +cdef class _CumulativeOptions(FunctionOptions): + def _set_options(self, start, skip_nulls): + if start is None: + self.wrapped.reset(new CCumulativeOptions(skip_nulls)) + elif isinstance(start, Scalar): + self.wrapped.reset(new CCumulativeOptions( + pyarrow_unwrap_scalar(start), skip_nulls)) + else: + try: + start = lib.scalar(start) + self.wrapped.reset(new CCumulativeOptions( + pyarrow_unwrap_scalar(start), skip_nulls)) + except Exception: + _raise_invalid_function_option( + start, "`start` type for CumulativeOptions", TypeError) + + +class CumulativeOptions(_CumulativeOptions): + """ + Options for `cumulative_*` functions. + + - cumulative_sum + - cumulative_sum_checked + - cumulative_prod + - cumulative_prod_checked + - cumulative_max + - cumulative_min + + Parameters + ---------- + start : Scalar, default None + Starting value for the cumulative operation. If none is given, + a default value depending on the operation and input type is used. + skip_nulls : bool, default False + When false, the first encountered null is propagated. + """ + + def __init__(self, start=None, *, skip_nulls=False): + self._set_options(start, skip_nulls) + + +class CumulativeSumOptions(_CumulativeOptions): + """ + Options for `cumulative_sum` function. + + Parameters + ---------- + start : Scalar, default None + Starting value for sum computation + skip_nulls : bool, default False + When false, the first encountered null is propagated. + """ + + def __init__(self, start=None, *, skip_nulls=False): + warnings.warn( + _DEPR_MSG.format("CumulativeSumOptions", "14.0", "CumulativeOptions"), + FutureWarning, + stacklevel=2 + ) + self._set_options(start, skip_nulls) + + +cdef class _PairwiseOptions(FunctionOptions): + def _set_options(self, period): + self.wrapped.reset(new CPairwiseOptions(period)) + + +class PairwiseOptions(_PairwiseOptions): + """ + Options for `pairwise` functions. + + Parameters + ---------- + period : int, default 1 + Period for applying the period function. + """ + + def __init__(self, period=1): + self._set_options(period) + + +cdef class _ArraySortOptions(FunctionOptions): + def _set_options(self, order, null_placement): + self.wrapped.reset(new CArraySortOptions( + unwrap_sort_order(order), unwrap_null_placement(null_placement))) + + +class ArraySortOptions(_ArraySortOptions): + """ + Options for the `array_sort_indices` function. + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + null_placement : str, default "at_end" + Where nulls in the input should be sorted. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, order="ascending", *, null_placement="at_end"): + self._set_options(order, null_placement) + + +cdef class _SortOptions(FunctionOptions): + def _set_options(self, sort_keys, null_placement): + cdef vector[CSortKey] c_sort_keys + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + self.wrapped.reset(new CSortOptions( + c_sort_keys, unwrap_null_placement(null_placement))) + + +class SortOptions(_SortOptions): + """ + Options for the `sort_indices` function. + + Parameters + ---------- + sort_keys : sequence of (name, order) tuples + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + The field name can be a string column name or expression. + null_placement : str, default "at_end" + Where nulls in input should be sorted, only applying to + columns/fields mentioned in `sort_keys`. + Accepted values are "at_start", "at_end". + """ + + def __init__(self, sort_keys=(), *, null_placement="at_end"): + self._set_options(sort_keys, null_placement) + + +cdef class _SelectKOptions(FunctionOptions): + def _set_options(self, k, sort_keys): + cdef vector[CSortKey] c_sort_keys + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + self.wrapped.reset(new CSelectKOptions(k, c_sort_keys)) + + +class SelectKOptions(_SelectKOptions): + """ + Options for top/bottom k-selection. + + Parameters + ---------- + k : int + Number of leading values to select in sorted order + (i.e. the largest values if sort order is "descending", + the smallest otherwise). + sort_keys : sequence of (name, order) tuples + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + The field name can be a string column name or expression. + """ + + def __init__(self, k, sort_keys): + self._set_options(k, sort_keys) + + +cdef class _QuantileOptions(FunctionOptions): + _interp_map = { + "linear": CQuantileInterp_LINEAR, + "lower": CQuantileInterp_LOWER, + "higher": CQuantileInterp_HIGHER, + "nearest": CQuantileInterp_NEAREST, + "midpoint": CQuantileInterp_MIDPOINT, + } + + def _set_options(self, quantiles, interp, skip_nulls, min_count): + try: + self.wrapped.reset( + new CQuantileOptions(quantiles, self._interp_map[interp], + skip_nulls, min_count) + ) + except KeyError: + _raise_invalid_function_option(interp, "quantile interpolation") + + +class QuantileOptions(_QuantileOptions): + __doc__ = f""" + Options for the `quantile` function. + + Parameters + ---------- + q : double or sequence of double, default 0.5 + Probability levels of the quantiles to compute. All values must be in + [0, 1]. + interpolation : str, default "linear" + How to break ties between competing data points for a given quantile. + Accepted values are: + + - "linear": compute an interpolation + - "lower": always use the smallest of the two data points + - "higher": always use the largest of the two data points + - "nearest": select the data point that is closest to the quantile + - "midpoint": compute the (unweighted) mean of the two data points + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, q=0.5, *, interpolation="linear", skip_nulls=True, + min_count=0): + if not isinstance(q, (list, tuple, np.ndarray)): + q = [q] + self._set_options(q, interpolation, skip_nulls, min_count) + + +cdef class _TDigestOptions(FunctionOptions): + def _set_options(self, quantiles, delta, buffer_size, skip_nulls, + min_count): + self.wrapped.reset( + new CTDigestOptions(quantiles, delta, buffer_size, skip_nulls, + min_count) + ) + + +class TDigestOptions(_TDigestOptions): + __doc__ = f""" + Options for the `tdigest` function. + + Parameters + ---------- + q : double or sequence of double, default 0.5 + Probability levels of the quantiles to approximate. All values must be + in [0, 1]. + delta : int, default 100 + Compression parameter for the T-digest algorithm. + buffer_size : int, default 500 + Buffer size for the T-digest algorithm. + {_skip_nulls_doc()} + {_min_count_doc(default=0)} + """ + + def __init__(self, q=0.5, *, delta=100, buffer_size=500, skip_nulls=True, + min_count=0): + if not isinstance(q, (list, tuple, np.ndarray)): + q = [q] + self._set_options(q, delta, buffer_size, skip_nulls, min_count) + + +cdef class _Utf8NormalizeOptions(FunctionOptions): + _form_map = { + "NFC": CUtf8NormalizeForm_NFC, + "NFKC": CUtf8NormalizeForm_NFKC, + "NFD": CUtf8NormalizeForm_NFD, + "NFKD": CUtf8NormalizeForm_NFKD, + } + + def _set_options(self, form): + try: + self.wrapped.reset( + new CUtf8NormalizeOptions(self._form_map[form]) + ) + except KeyError: + _raise_invalid_function_option(form, + "Unicode normalization form") + + +class Utf8NormalizeOptions(_Utf8NormalizeOptions): + """ + Options for the `utf8_normalize` function. + + Parameters + ---------- + form : str + Unicode normalization form. + Accepted values are "NFC", "NFKC", "NFD", NFKD". + """ + + def __init__(self, form): + self._set_options(form) + + +cdef class _RandomOptions(FunctionOptions): + def _set_options(self, initializer): + if initializer == 'system': + self.wrapped.reset(new CRandomOptions( + CRandomOptions.FromSystemRandom())) + return + + if not isinstance(initializer, int): + try: + initializer = hash(initializer) + except TypeError: + raise TypeError( + f"initializer should be 'system', an integer, " + f"or a hashable object; got {initializer!r}") + + if initializer < 0: + initializer += 2**64 + self.wrapped.reset(new CRandomOptions( + CRandomOptions.FromSeed(initializer))) + + +class RandomOptions(_RandomOptions): + """ + Options for random generation. + + Parameters + ---------- + initializer : int or str + How to initialize the underlying random generator. + If an integer is given, it is used as a seed. + If "system" is given, the random generator is initialized with + a system-specific source of (hopefully true) randomness. + Other values are invalid. + """ + + def __init__(self, *, initializer='system'): + self._set_options(initializer) + + +cdef class _RankOptions(FunctionOptions): + + _tiebreaker_map = { + "min": CRankOptionsTiebreaker_Min, + "max": CRankOptionsTiebreaker_Max, + "first": CRankOptionsTiebreaker_First, + "dense": CRankOptionsTiebreaker_Dense, + } + + def _set_options(self, sort_keys, null_placement, tiebreaker): + cdef vector[CSortKey] c_sort_keys + if isinstance(sort_keys, str): + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(""), unwrap_sort_order(sort_keys)) + ) + else: + for name, order in sort_keys: + c_sort_keys.push_back( + CSortKey(_ensure_field_ref(name), unwrap_sort_order(order)) + ) + try: + self.wrapped.reset( + new CRankOptions(c_sort_keys, + unwrap_null_placement(null_placement), + self._tiebreaker_map[tiebreaker]) + ) + except KeyError: + _raise_invalid_function_option(tiebreaker, "tiebreaker") + + +class RankOptions(_RankOptions): + """ + Options for the `rank` function. + + Parameters + ---------- + sort_keys : sequence of (name, order) tuples or str, default "ascending" + Names of field/column keys to sort the input on, + along with the order each field/column is sorted in. + Accepted values for `order` are "ascending", "descending". + The field name can be a string column name or expression. + Alternatively, one can simply pass "ascending" or "descending" as a string + if the input is array-like. + null_placement : str, default "at_end" + Where nulls in input should be sorted. + Accepted values are "at_start", "at_end". + tiebreaker : str, default "first" + Configure how ties between equal values are handled. + Accepted values are: + + - "min": Ties get the smallest possible rank in sorted order. + - "max": Ties get the largest possible rank in sorted order. + - "first": Ranks are assigned in order of when ties appear in the + input. This ensures the ranks are a stable permutation + of the input. + - "dense": The ranks span a dense [1, M] interval where M is the + number of distinct values in the input. + """ + + def __init__(self, sort_keys="ascending", *, null_placement="at_end", tiebreaker="first"): + self._set_options(sort_keys, null_placement, tiebreaker) + + +cdef class Expression(_Weakrefable): + """ + A logical expression to be evaluated against some input. + + To create an expression: + + - Use the factory function ``pyarrow.compute.scalar()`` to create a + scalar (not necessary when combined, see example below). + - Use the factory function ``pyarrow.compute.field()`` to reference + a field (column in table). + - Compare fields and scalars with ``<``, ``<=``, ``==``, ``>=``, ``>``. + - Combine expressions using python operators ``&`` (logical and), + ``|`` (logical or) and ``~`` (logical not). + Note: python keywords ``and``, ``or`` and ``not`` cannot be used + to combine expressions. + - Create expression predicates using Expression methods such as + ``pyarrow.compute.Expression.isin()``. + + Examples + -------- + + >>> import pyarrow.compute as pc + >>> (pc.field("a") < pc.scalar(3)) | (pc.field("b") > 7) + 7))> + >>> pc.field('a') != 3 + + >>> pc.field('a').isin([1, 2, 3]) + + """ + + def __init__(self): + msg = 'Expression is an abstract class thus cannot be initialized.' + raise TypeError(msg) + + cdef void init(self, const CExpression& sp): + self.expr = sp + + @staticmethod + cdef wrap(const CExpression& sp): + cdef Expression self = Expression.__new__(Expression) + self.init(sp) + return self + + cdef inline CExpression unwrap(self): + return self.expr + + def equals(self, Expression other): + """ + Parameters + ---------- + other : pyarrow.dataset.Expression + + Returns + ------- + bool + """ + return self.expr.Equals(other.unwrap()) + + def __str__(self): + return frombytes(self.expr.ToString()) + + def __repr__(self): + return "".format( + self.__class__.__name__, str(self) + ) + + @staticmethod + def from_substrait(object buffer not None): + """ + Deserialize an expression from Substrait + + The serialized message must be an ExtendedExpression message that has + only a single expression. The name of the expression and the schema + the expression was bound to will be ignored. Use + pyarrow.substrait.deserialize_expressions if this information is needed + or if the message might contain multiple expressions. + + Parameters + ---------- + buffer : bytes or Buffer + The Substrait message to deserialize + + Returns + ------- + Expression + The deserialized expression + """ + expressions = _pas().deserialize_expressions(buffer).expressions + if len(expressions) == 0: + raise ValueError("Substrait message did not contain any expressions") + if len(expressions) > 1: + raise ValueError( + "Substrait message contained multiple expressions. Use pyarrow.substrait.deserialize_expressions instead") + return next(iter(expressions.values())) + + def to_substrait(self, Schema schema not None, c_bool allow_arrow_extensions=False): + """ + Serialize the expression using Substrait + + The expression will be serialized as an ExtendedExpression message that has a + single expression named "expression" + + Parameters + ---------- + schema : Schema + The input schema the expression will be bound to + allow_arrow_extensions : bool, default False + If False then only functions that are part of the core Substrait function + definitions will be allowed. Set this to True to allow pyarrow-specific functions + but the result may not be accepted by other compute libraries. + + Returns + ------- + Buffer + A buffer containing the serialized Protobuf plan. + """ + return _pas().serialize_expressions([self], ["expression"], schema, allow_arrow_extensions=allow_arrow_extensions) + + @staticmethod + def _deserialize(Buffer buffer not None): + return Expression.wrap(GetResultValue(CDeserializeExpression( + pyarrow_unwrap_buffer(buffer)))) + + def __reduce__(self): + buffer = pyarrow_wrap_buffer(GetResultValue( + CSerializeExpression(self.expr))) + return Expression._deserialize, (buffer,) + + @staticmethod + cdef Expression _expr_or_scalar(object expr): + if isinstance(expr, Expression): + return ( expr) + return ( Expression._scalar(expr)) + + @staticmethod + def _call(str function_name, list arguments, FunctionOptions options=None): + cdef: + vector[CExpression] c_arguments + shared_ptr[CFunctionOptions] c_options + + for argument in arguments: + if not isinstance(argument, Expression): + # Attempt to help convert this to an expression + try: + argument = Expression._scalar(argument) + except ArrowInvalid: + raise TypeError( + "only other expressions allowed as arguments") + c_arguments.push_back(( argument).expr) + + if options is not None: + c_options = options.unwrap() + + return Expression.wrap(CMakeCallExpression( + tobytes(function_name), move(c_arguments), c_options)) + + def __richcmp__(self, other, int op): + other = Expression._expr_or_scalar(other) + return Expression._call({ + Py_EQ: "equal", + Py_NE: "not_equal", + Py_GT: "greater", + Py_GE: "greater_equal", + Py_LT: "less", + Py_LE: "less_equal", + }[op], [self, other]) + + def __bool__(self): + raise ValueError( + "An Expression cannot be evaluated to python True or False. " + "If you are using the 'and', 'or' or 'not' operators, use '&', " + "'|' or '~' instead." + ) + + def __invert__(self): + return Expression._call("invert", [self]) + + def __and__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("and_kleene", [self, other]) + + def __or__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("or_kleene", [self, other]) + + def __add__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("add_checked", [self, other]) + + def __mul__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("multiply_checked", [self, other]) + + def __sub__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("subtract_checked", [self, other]) + + def __truediv__(Expression self, other): + other = Expression._expr_or_scalar(other) + return Expression._call("divide_checked", [self, other]) + + def is_valid(self): + """ + Check whether the expression is not-null (valid). + + This creates a new expression equivalent to calling the + `is_valid` compute function on this expression. + + Returns + ------- + is_valid : Expression + """ + return Expression._call("is_valid", [self]) + + def is_null(self, bint nan_is_null=False): + """ + Check whether the expression is null. + + This creates a new expression equivalent to calling the + `is_null` compute function on this expression. + + Parameters + ---------- + nan_is_null : boolean, default False + Whether floating-point NaNs are considered null. + + Returns + ------- + is_null : Expression + """ + options = NullOptions(nan_is_null=nan_is_null) + return Expression._call("is_null", [self], options) + + def is_nan(self): + """ + Check whether the expression is NaN. + + This creates a new expression equivalent to calling the + `is_nan` compute function on this expression. + + Returns + ------- + is_nan : Expression + """ + return Expression._call("is_nan", [self]) + + def cast(self, type=None, safe=None, options=None): + """ + Explicitly set or change the expression's data type. + + This creates a new expression equivalent to calling the + `cast` compute function on this expression. + + Parameters + ---------- + type : DataType, default None + Type to cast array to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + + Returns + ------- + cast : Expression + """ + safe_vars_passed = (safe is not None) or (type is not None) + + if safe_vars_passed and (options is not None): + raise ValueError("Must either pass values for 'type' and 'safe' or pass a " + "value for 'options'") + + if options is None: + type = ensure_type(type, allow_none=False) + if safe is False: + options = CastOptions.unsafe(type) + else: + options = CastOptions.safe(type) + return Expression._call("cast", [self], options) + + def isin(self, values): + """ + Check whether the expression is contained in values. + + This creates a new expression equivalent to calling the + `is_in` compute function on this expression. + + Parameters + ---------- + values : Array or iterable + The values to check for. + + Returns + ------- + isin : Expression + A new expression that, when evaluated, checks whether + this expression's value is contained in `values`. + """ + if not isinstance(values, Array): + values = lib.array(values) + + options = SetLookupOptions(values) + return Expression._call("is_in", [self], options) + + @staticmethod + def _field(name_or_idx not None): + cdef: + CFieldRef c_field + + if isinstance(name_or_idx, int): + return Expression.wrap(CMakeFieldExpressionByIndex(name_or_idx)) + else: + c_field = CFieldRef( tobytes(name_or_idx)) + return Expression.wrap(CMakeFieldExpression(c_field)) + + @staticmethod + def _nested_field(tuple names not None): + cdef: + vector[CFieldRef] nested + + if len(names) == 0: + raise ValueError("nested field reference should be non-empty") + nested.reserve(len(names)) + for name in names: + if isinstance(name, int): + nested.push_back(CFieldRef(name)) + else: + nested.push_back(CFieldRef( tobytes(name))) + return Expression.wrap(CMakeFieldExpression(CFieldRef(move(nested)))) + + @staticmethod + def _scalar(value): + cdef: + Scalar scalar + + if isinstance(value, Scalar): + scalar = value + else: + scalar = lib.scalar(value) + + return Expression.wrap(CMakeScalarExpression(scalar.unwrap())) + + +_deserialize = Expression._deserialize +cdef CExpression _true = CMakeScalarExpression( + make_shared[CBooleanScalar](True) +) + + +cdef CExpression _bind(Expression filter, Schema schema) except *: + assert schema is not None + + if filter is None: + return _true + + return GetResultValue(filter.unwrap().Bind( + deref(pyarrow_unwrap_schema(schema).get()))) + + +cdef class UdfContext: + """ + Per-invocation function context/state. + + This object will always be the first argument to a user-defined + function. It should not be used outside of a call to the function. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly" + .format(self.__class__.__name__)) + + cdef void init(self, const CUdfContext &c_context): + self.c_context = c_context + + @property + def batch_length(self): + """ + The common length of all input arguments (int). + + In the case that all arguments are scalars, this value + is used to pass the "actual length" of the arguments, + e.g. because the scalar values are encoding a column + with a constant value. + """ + return self.c_context.batch_length + + @property + def memory_pool(self): + """ + A memory pool for allocations (:class:`MemoryPool`). + + This is the memory pool supplied by the user when they invoked + the function and it should be used in any calls to arrow that the + UDF makes if that call accepts a memory_pool. + """ + return box_memory_pool(self.c_context.pool) + + +cdef inline CFunctionDoc _make_function_doc(dict func_doc) except *: + """ + Helper function to generate the FunctionDoc + This function accepts a dictionary and expects the + summary(str), description(str) and arg_names(List[str]) keys. + """ + cdef: + CFunctionDoc f_doc + vector[c_string] c_arg_names + + f_doc.summary = tobytes(func_doc["summary"]) + f_doc.description = tobytes(func_doc["description"]) + for arg_name in func_doc["arg_names"]: + c_arg_names.push_back(tobytes(arg_name)) + f_doc.arg_names = c_arg_names + # UDFOptions integration: + # TODO: https://issues.apache.org/jira/browse/ARROW-16041 + f_doc.options_class = b"" + f_doc.options_required = False + return f_doc + + +cdef object box_udf_context(const CUdfContext& c_context): + cdef UdfContext context = UdfContext.__new__(UdfContext) + context.init(c_context) + return context + + +cdef _udf_callback(user_function, const CUdfContext& c_context, inputs): + """ + Helper callback function used to wrap the UdfContext from Python to C++ + execution. + """ + context = box_udf_context(c_context) + return user_function(context, *inputs) + + +def _get_udf_context(memory_pool, batch_length): + cdef CUdfContext c_context + c_context.pool = maybe_unbox_memory_pool(memory_pool) + c_context.batch_length = batch_length + context = box_udf_context(c_context) + return context + + +ctypedef CStatus (*CRegisterUdf)(PyObject* function, function[CallbackUdf] wrapper, + const CUdfOptions& options, CFunctionRegistry* registry) + +cdef class RegisterUdf(_Weakrefable): + cdef CRegisterUdf register_func + + cdef void init(self, const CRegisterUdf register_func): + self.register_func = register_func + + +cdef get_register_scalar_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterScalarFunction + return reg + + +cdef get_register_tabular_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterTabularFunction + return reg + + +cdef get_register_aggregate_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterAggregateFunction + return reg + +cdef get_register_vector_function(): + cdef RegisterUdf reg = RegisterUdf.__new__(RegisterUdf) + reg.register_func = RegisterVectorFunction + return reg + + +def register_scalar_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined scalar function. + + This API is EXPERIMENTAL. + + A scalar function is a function that executes elementwise + operations on arrays or scalars, i.e. a scalar function must + be computed row-by-row with no state where each output row + is computed only from its corresponding input row. + In other words, all argument arrays have the same length, + and the output array is of the same length as the arguments. + Scalar functions are the only functions allowed in query engine + expressions. + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The first argument is the context argument of type + UdfContext. + Then, it must take arguments equal to the number of + in_types defined. It must return an Array or Scalar + matching the out_type. It must return a Scalar if + all arguments are scalar, else it must return an Array. + + To define a varargs function, pass a callable that takes + *args. The last in_type will be the type of all varargs + arguments. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + The argument names will be used to generate + documentation for the function. The number of + arguments specified here determines the function + arity. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> + >>> func_doc = {} + >>> func_doc["summary"] = "simple udf" + >>> func_doc["description"] = "add a constant to a scalar" + >>> + >>> def add_constant(ctx, array): + ... return pc.add(array, 1, memory_pool=ctx.memory_pool) + >>> + >>> func_name = "py_add_func" + >>> in_types = {"array": pa.int64()} + >>> out_type = pa.int64() + >>> pc.register_scalar_function(add_constant, func_name, func_doc, + ... in_types, out_type) + >>> + >>> func = pc.get_function(func_name) + >>> func.name + 'py_add_func' + >>> answer = pc.call_function(func_name, [pa.array([20])]) + >>> answer + + [ + 21 + ] + """ + return _register_user_defined_function(get_register_scalar_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def register_vector_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined vector function. + + This API is EXPERIMENTAL. + + A vector function is a function that executes vector + operations on arrays. Vector function is often used + when compute doesn't fit other more specific types of + functions (e.g., scalar and aggregate). + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The first argument is the context argument of type + UdfContext. + Then, it must take arguments equal to the number of + in_types defined. It must return an Array or Scalar + matching the out_type. It must return a Scalar if + all arguments are scalar, else it must return an Array. + + To define a varargs function, pass a callable that takes + *args. The last in_type will be the type of all varargs + arguments. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + The argument names will be used to generate + documentation for the function. The number of + arguments specified here determines the function + arity. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> + >>> func_doc = {} + >>> func_doc["summary"] = "percent rank" + >>> func_doc["description"] = "compute percent rank" + >>> + >>> def list_flatten_udf(ctx, x): + ... return pc.list_flatten(x) + >>> + >>> func_name = "list_flatten_udf" + >>> in_types = {"array": pa.list_(pa.int64())} + >>> out_type = pa.int64() + >>> pc.register_vector_function(list_flatten_udf, func_name, func_doc, + ... in_types, out_type) + >>> + >>> answer = pc.call_function(func_name, [pa.array([[1, 2], [3, 4]])]) + >>> answer + + [ + 1, + 2, + 3, + 4 + ] + """ + return _register_user_defined_function(get_register_vector_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def register_aggregate_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined non-decomposable aggregate function. + + This API is EXPERIMENTAL. + + A non-decomposable aggregation function is a function that executes + aggregate operations on the whole data that it is aggregating. + In other words, non-decomposable aggregate function cannot be + split into consume/merge/finalize steps. + + This is often used with ordered or segmented aggregation where groups + can be emit before accumulating all of the input data. + + Note that currently the size of any input column cannot exceed 2 GB + for a single segment (all groups combined). + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The first argument is the context argument of type + UdfContext. + Then, it must take arguments equal to the number of + in_types defined. It must return a Scalar matching the + out_type. + To define a varargs function, pass a callable that takes + *args. The in_type needs to match in type of inputs when + the function gets called. + function_name : str + Name of the function. This name must be unique, i.e., + there should only be one function registered with + this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + The argument names will be used to generate + documentation for the function. The number of + arguments specified here determines the function + arity. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + + Examples + -------- + >>> import numpy as np + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> + >>> func_doc = {} + >>> func_doc["summary"] = "simple median udf" + >>> func_doc["description"] = "compute median" + >>> + >>> def compute_median(ctx, array): + ... return pa.scalar(np.median(array)) + >>> + >>> func_name = "py_compute_median" + >>> in_types = {"array": pa.int64()} + >>> out_type = pa.float64() + >>> pc.register_aggregate_function(compute_median, func_name, func_doc, + ... in_types, out_type) + >>> + >>> func = pc.get_function(func_name) + >>> func.name + 'py_compute_median' + >>> answer = pc.call_function(func_name, [pa.array([20, 40])]) + >>> answer + + >>> table = pa.table([pa.array([1, 1, 2, 2]), pa.array([10, 20, 30, 40])], names=['k', 'v']) + >>> result = table.group_by('k').aggregate([('v', 'py_compute_median')]) + >>> result + pyarrow.Table + k: int64 + v_py_compute_median: double + ---- + k: [[1,2]] + v_py_compute_median: [[15,35]] + """ + return _register_user_defined_function(get_register_aggregate_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def register_tabular_function(func, function_name, function_doc, in_types, out_type, + func_registry=None): + """ + Register a user-defined tabular function. + + This API is EXPERIMENTAL. + + A tabular function is one accepting a context argument of type + UdfContext and returning a generator of struct arrays. + The in_types argument must be empty and the out_type argument + specifies a schema. Each struct array must have field types + corresponding to the schema. + + Parameters + ---------- + func : callable + A callable implementing the user-defined function. + The only argument is the context argument of type + UdfContext. It must return a callable that + returns on each invocation a StructArray matching + the out_type, where an empty array indicates end. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + Must be an empty dictionary (reserved for future use). + out_type : Union[Schema, DataType] + Schema of the function's output, or a corresponding flat struct type. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + """ + cdef: + shared_ptr[CSchema] c_schema + shared_ptr[CDataType] c_type + + if isinstance(out_type, Schema): + c_schema = pyarrow_unwrap_schema(out_type) + with nogil: + c_type = make_shared[CStructType](deref(c_schema).fields()) + out_type = pyarrow_wrap_data_type(c_type) + return _register_user_defined_function(get_register_tabular_function(), + func, function_name, function_doc, in_types, + out_type, func_registry) + + +def _register_user_defined_function(register_func, func, function_name, function_doc, in_types, + out_type, func_registry=None): + """ + Register a user-defined function. + + This method itself doesn't care about the type of the UDF + (i.e., scalar vs tabular vs aggregate) + + Parameters + ---------- + register_func: object + An object holding a CRegisterUdf in a "register_func" attribute. + func : callable + A callable implementing the user-defined function. + function_name : str + Name of the function. There should only be one function + registered with this name in the function registry. + function_doc : dict + A dictionary object with keys "summary" (str), + and "description" (str). + in_types : Dict[str, DataType] + A dictionary mapping function argument names to + their respective DataType. + out_type : DataType + Output type of the function. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + """ + cdef: + CRegisterUdf c_register_func + c_string c_func_name + CArity c_arity + CFunctionDoc c_func_doc + vector[shared_ptr[CDataType]] c_in_types + PyObject* c_function + shared_ptr[CDataType] c_out_type + CUdfOptions c_options + CFunctionRegistry* c_func_registry + + if callable(func): + c_function = func + else: + raise TypeError("func must be a callable") + + c_func_name = tobytes(function_name) + + func_spec = inspect.getfullargspec(func) + num_args = -1 + if isinstance(in_types, dict): + for in_type in in_types.values(): + c_in_types.push_back( + pyarrow_unwrap_data_type(ensure_type(in_type))) + function_doc["arg_names"] = in_types.keys() + num_args = len(in_types) + else: + raise TypeError( + "in_types must be a dictionary of DataType") + + c_arity = CArity( num_args, func_spec.varargs) + + if "summary" not in function_doc: + raise ValueError("Function doc must contain a summary") + + if "description" not in function_doc: + raise ValueError("Function doc must contain a description") + + if "arg_names" not in function_doc: + raise ValueError("Function doc must contain arg_names") + + c_func_doc = _make_function_doc(function_doc) + + c_out_type = pyarrow_unwrap_data_type(ensure_type(out_type)) + + c_options.func_name = c_func_name + c_options.arity = c_arity + c_options.func_doc = c_func_doc + c_options.input_types = c_in_types + c_options.output_type = c_out_type + + if func_registry is None: + c_func_registry = NULL + else: + c_func_registry = (func_registry).registry + + c_register_func = (register_func).register_func + + check_status(c_register_func(c_function, + &_udf_callback, + c_options, c_func_registry)) + + +def call_tabular_function(function_name, args=None, func_registry=None): + """ + Get a record batch iterator from a tabular function. + + Parameters + ---------- + function_name : str + Name of the function. + args : iterable + The arguments to pass to the function. Accepted types depend + on the specific function. Currently, only an empty args is supported. + func_registry : FunctionRegistry + Optional function registry to use instead of the default global one. + """ + cdef: + c_string c_func_name + vector[CDatum] c_args + CFunctionRegistry* c_func_registry + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader reader + + c_func_name = tobytes(function_name) + if func_registry is None: + c_func_registry = NULL + else: + c_func_registry = (func_registry).registry + if args is None: + args = [] + _pack_compute_args(args, &c_args) + + with nogil: + c_reader = GetResultValue(CallTabularFunction( + c_func_name, c_args, c_func_registry)) + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = c_reader + return RecordBatchReader.from_batches(pyarrow_wrap_schema(deref(c_reader).schema()), reader) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dcc562a41c795896d12fc7cdd3baebf0122bedc9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_csv.pxd @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable + + +cdef class ConvertOptions(_Weakrefable): + cdef: + unique_ptr[CCSVConvertOptions] options + + @staticmethod + cdef ConvertOptions wrap(CCSVConvertOptions options) + + +cdef class ParseOptions(_Weakrefable): + cdef: + unique_ptr[CCSVParseOptions] options + object _invalid_row_handler + + @staticmethod + cdef ParseOptions wrap(CCSVParseOptions options) + + +cdef class ReadOptions(_Weakrefable): + cdef: + unique_ptr[CCSVReadOptions] options + public object encoding + + @staticmethod + cdef ReadOptions wrap(CCSVReadOptions options) + + +cdef class WriteOptions(_Weakrefable): + cdef: + unique_ptr[CCSVWriteOptions] options + + @staticmethod + cdef WriteOptions wrap(CCSVWriteOptions options) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pyx new file mode 100644 index 0000000000000000000000000000000000000000..ba799a105e7e15ab8414988cdefdaa4dc315cad8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_cuda.pyx @@ -0,0 +1,1058 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from pyarrow.lib cimport * +from pyarrow.includes.libarrow_cuda cimport * +from pyarrow.lib import allocate_buffer, as_buffer, ArrowTypeError +from pyarrow.util import get_contiguous_span +cimport cpython as cp + + +cdef class Context(_Weakrefable): + """ + CUDA driver context. + """ + + def __init__(self, *args, **kwargs): + """ + Create a CUDA driver context for a particular device. + + If a CUDA context handle is passed, it is wrapped, otherwise + a default CUDA context for the given device is requested. + + Parameters + ---------- + device_number : int (default 0) + Specify the GPU device for which the CUDA driver context is + requested. + handle : int, optional + Specify CUDA handle for a shared context that has been created + by another library. + """ + # This method exposed because autodoc doesn't pick __cinit__ + + def __cinit__(self, int device_number=0, uintptr_t handle=0): + cdef CCudaDeviceManager* manager + manager = GetResultValue(CCudaDeviceManager.Instance()) + cdef int n = manager.num_devices() + if device_number >= n or device_number < 0: + self.context.reset() + raise ValueError('device_number argument must be ' + 'non-negative less than %s' % (n)) + if handle == 0: + self.context = GetResultValue(manager.GetContext(device_number)) + else: + self.context = GetResultValue(manager.GetSharedContext( + device_number, handle)) + self.device_number = device_number + + @staticmethod + def from_numba(context=None): + """ + Create a Context instance from a Numba CUDA context. + + Parameters + ---------- + context : {numba.cuda.cudadrv.driver.Context, None} + A Numba CUDA context instance. + If None, the current Numba context is used. + + Returns + ------- + shared_context : pyarrow.cuda.Context + Context instance. + """ + if context is None: + import numba.cuda + context = numba.cuda.current_context() + return Context(device_number=context.device.id, + handle=context.handle.value) + + def to_numba(self): + """ + Convert Context to a Numba CUDA context. + + Returns + ------- + context : numba.cuda.cudadrv.driver.Context + Numba CUDA context instance. + """ + import ctypes + import numba.cuda + device = numba.cuda.gpus[self.device_number] + handle = ctypes.c_void_p(self.handle) + context = numba.cuda.cudadrv.driver.Context(device, handle) + + class DummyPendingDeallocs(object): + # Context is managed by pyarrow + def add_item(self, *args, **kwargs): + pass + + context.deallocations = DummyPendingDeallocs() + return context + + @staticmethod + def get_num_devices(): + """ Return the number of GPU devices. + """ + cdef CCudaDeviceManager* manager + manager = GetResultValue(CCudaDeviceManager.Instance()) + return manager.num_devices() + + @property + def device_number(self): + """ Return context device number. + """ + return self.device_number + + @property + def handle(self): + """ Return pointer to context handle. + """ + return self.context.get().handle() + + cdef void init(self, const shared_ptr[CCudaContext]& ctx): + self.context = ctx + + def synchronize(self): + """Blocks until the device has completed all preceding requested + tasks. + """ + check_status(self.context.get().Synchronize()) + + @property + def bytes_allocated(self): + """Return the number of allocated bytes. + """ + return self.context.get().bytes_allocated() + + def get_device_address(self, uintptr_t address): + """Return the device address that is reachable from kernels running in + the context + + Parameters + ---------- + address : int + Specify memory address value + + Returns + ------- + device_address : int + Device address accessible from device context + + Notes + ----- + The device address is defined as a memory address accessible + by device. While it is often a device memory address but it + can be also a host memory address, for instance, when the + memory is allocated as host memory (using cudaMallocHost or + cudaHostAlloc) or as managed memory (using cudaMallocManaged) + or the host memory is page-locked (using cudaHostRegister). + """ + return GetResultValue(self.context.get().GetDeviceAddress(address)) + + def new_buffer(self, int64_t nbytes): + """Return new device buffer. + + Parameters + ---------- + nbytes : int + Specify the number of bytes to be allocated. + + Returns + ------- + buf : CudaBuffer + Allocated buffer. + """ + cdef: + shared_ptr[CCudaBuffer] cudabuf + with nogil: + cudabuf = GetResultValue(self.context.get().Allocate(nbytes)) + return pyarrow_wrap_cudabuffer(cudabuf) + + def foreign_buffer(self, address, size, base=None): + """ + Create device buffer from address and size as a view. + + The caller is responsible for allocating and freeing the + memory. When `address==size==0` then a new zero-sized buffer + is returned. + + Parameters + ---------- + address : int + Specify the starting address of the buffer. The address can + refer to both device or host memory but it must be + accessible from device after mapping it with + `get_device_address` method. + size : int + Specify the size of device buffer in bytes. + base : {None, object} + Specify object that owns the referenced memory. + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of device reachable memory. + + """ + if not address and size == 0: + return self.new_buffer(0) + cdef: + uintptr_t c_addr = self.get_device_address(address) + int64_t c_size = size + shared_ptr[CCudaBuffer] cudabuf + + cudabuf = GetResultValue(self.context.get().View( + c_addr, c_size)) + return pyarrow_wrap_cudabuffer_base(cudabuf, base) + + def open_ipc_buffer(self, ipc_handle): + """ Open existing CUDA IPC memory handle + + Parameters + ---------- + ipc_handle : IpcMemHandle + Specify opaque pointer to CUipcMemHandle (driver API). + + Returns + ------- + buf : CudaBuffer + referencing device buffer + """ + handle = pyarrow_unwrap_cudaipcmemhandle(ipc_handle) + cdef shared_ptr[CCudaBuffer] cudabuf + with nogil: + cudabuf = GetResultValue( + self.context.get().OpenIpcBuffer(handle.get()[0])) + return pyarrow_wrap_cudabuffer(cudabuf) + + def buffer_from_data(self, object data, int64_t offset=0, int64_t size=-1): + """Create device buffer and initialize with data. + + Parameters + ---------- + data : {CudaBuffer, HostBuffer, Buffer, array-like} + Specify data to be copied to device buffer. + offset : int + Specify the offset of input buffer for device data + buffering. Default: 0. + size : int + Specify the size of device buffer in bytes. Default: all + (starting from input offset) + + Returns + ------- + cbuf : CudaBuffer + Device buffer with copied data. + """ + is_host_data = not pyarrow_is_cudabuffer(data) + buf = as_buffer(data) if is_host_data else data + + bsize = buf.size + if offset < 0 or (bsize and offset >= bsize): + raise ValueError('offset argument is out-of-range') + if size < 0: + size = bsize - offset + elif offset + size > bsize: + raise ValueError( + 'requested larger slice than available in device buffer') + + if offset != 0 or size != bsize: + buf = buf.slice(offset, size) + + result = self.new_buffer(size) + if is_host_data: + result.copy_from_host(buf, position=0, nbytes=size) + else: + result.copy_from_device(buf, position=0, nbytes=size) + return result + + def buffer_from_object(self, obj): + """Create device buffer view of arbitrary object that references + device accessible memory. + + When the object contains a non-contiguous view of device + accessible memory then the returned device buffer will contain + contiguous view of the memory, that is, including the + intermediate data that is otherwise invisible to the input + object. + + Parameters + ---------- + obj : {object, Buffer, HostBuffer, CudaBuffer, ...} + Specify an object that holds (device or host) address that + can be accessed from device. This includes objects with + types defined in pyarrow.cuda as well as arbitrary objects + that implement the CUDA array interface as defined by numba. + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of device accessible memory. + + """ + if isinstance(obj, HostBuffer): + return self.foreign_buffer(obj.address, obj.size, base=obj) + elif isinstance(obj, Buffer): + return CudaBuffer.from_buffer(obj) + elif isinstance(obj, CudaBuffer): + return obj + elif hasattr(obj, '__cuda_array_interface__'): + desc = obj.__cuda_array_interface__ + addr = desc['data'][0] + if addr is None: + return self.new_buffer(0) + import numpy as np + start, end = get_contiguous_span( + desc['shape'], desc.get('strides'), + np.dtype(desc['typestr']).itemsize) + return self.foreign_buffer(addr + start, end - start, base=obj) + raise ArrowTypeError('cannot create device buffer view from' + ' `%s` object' % (type(obj))) + + +cdef class IpcMemHandle(_Weakrefable): + """A serializable container for a CUDA IPC handle. + """ + cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h): + self.handle = h + + @staticmethod + def from_buffer(Buffer opaque_handle): + """Create IpcMemHandle from opaque buffer (e.g. from another + process) + + Parameters + ---------- + opaque_handle : + a CUipcMemHandle as a const void* + + Returns + ------- + ipc_handle : IpcMemHandle + """ + c_buf = pyarrow_unwrap_buffer(opaque_handle) + cdef: + shared_ptr[CCudaIpcMemHandle] handle + + handle = GetResultValue( + CCudaIpcMemHandle.FromBuffer(c_buf.get().data())) + return pyarrow_wrap_cudaipcmemhandle(handle) + + def serialize(self, pool=None): + """Write IpcMemHandle to a Buffer + + Parameters + ---------- + pool : {MemoryPool, None} + Specify a pool to allocate memory from + + Returns + ------- + buf : Buffer + The serialized buffer. + """ + cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + cdef shared_ptr[CBuffer] buf + cdef CCudaIpcMemHandle* h = self.handle.get() + with nogil: + buf = GetResultValue(h.Serialize(pool_)) + return pyarrow_wrap_buffer(buf) + + +cdef class CudaBuffer(Buffer): + """An Arrow buffer with data located in a GPU device. + + To create a CudaBuffer instance, use Context.device_buffer(). + + The memory allocated in a CudaBuffer is freed when the buffer object + is deleted. + """ + + def __init__(self): + raise TypeError("Do not call CudaBuffer's constructor directly, use " + "`.device_buffer`" + " method instead.") + + cdef void init_cuda(self, + const shared_ptr[CCudaBuffer]& buffer, + object base): + self.cuda_buffer = buffer + self.init( buffer) + self.base = base + + @staticmethod + def from_buffer(buf): + """ Convert back generic buffer into CudaBuffer + + Parameters + ---------- + buf : Buffer + Specify buffer containing CudaBuffer + + Returns + ------- + dbuf : CudaBuffer + Resulting device buffer. + """ + c_buf = pyarrow_unwrap_buffer(buf) + cuda_buffer = GetResultValue(CCudaBuffer.FromBuffer(c_buf)) + return pyarrow_wrap_cudabuffer(cuda_buffer) + + @staticmethod + def from_numba(mem): + """Create a CudaBuffer view from numba MemoryPointer instance. + + Parameters + ---------- + mem : numba.cuda.cudadrv.driver.MemoryPointer + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of numba MemoryPointer. + """ + ctx = Context.from_numba(mem.context) + if mem.device_pointer.value is None and mem.size==0: + return ctx.new_buffer(0) + return ctx.foreign_buffer(mem.device_pointer.value, mem.size, base=mem) + + def to_numba(self): + """Return numba memory pointer of CudaBuffer instance. + """ + import ctypes + from numba.cuda.cudadrv.driver import MemoryPointer + return MemoryPointer(self.context.to_numba(), + pointer=ctypes.c_void_p(self.address), + size=self.size) + + cdef getitem(self, int64_t i): + return self.copy_to_host(position=i, nbytes=1)[0] + + def copy_to_host(self, int64_t position=0, int64_t nbytes=-1, + Buffer buf=None, + MemoryPool memory_pool=None, c_bool resizable=False): + """Copy memory from GPU device to CPU host + + Caller is responsible for ensuring that all tasks affecting + the memory are finished. Use + + `.context.synchronize()` + + when needed. + + Parameters + ---------- + position : int + Specify the starting position of the source data in GPU + device buffer. Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + the position until host buffer is full). + buf : Buffer + Specify a pre-allocated output buffer in host. Default: None + (allocate new output buffer). + memory_pool : MemoryPool + resizable : bool + Specify extra arguments to allocate_buffer. Used only when + buf is None. + + Returns + ------- + buf : Buffer + Output buffer in host. + + """ + if position < 0 or (self.size and position > self.size) \ + or (self.size == 0 and position != 0): + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + if buf is None: + if nbytes < 0: + # copy all starting from position to new host buffer + c_nbytes = self.size - position + else: + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available from ' + 'device buffer') + # copy nbytes starting from position to new host buffer + c_nbytes = nbytes + buf = allocate_buffer(c_nbytes, memory_pool=memory_pool, + resizable=resizable) + else: + if nbytes < 0: + # copy all from position until given host buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested copy does not fit into host buffer') + # copy nbytes from position to given host buffer + c_nbytes = nbytes + + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + int64_t c_position = position + with nogil: + check_status(self.cuda_buffer.get() + .CopyToHost(c_position, c_nbytes, + c_buf.get().mutable_data())) + return buf + + def copy_from_host(self, data, int64_t position=0, int64_t nbytes=-1): + """Copy data from host to device. + + The device buffer must be pre-allocated. + + Parameters + ---------- + data : {Buffer, array-like} + Specify data in host. It can be array-like that is valid + argument to py_buffer + position : int + Specify the starting position of the copy in device buffer. + Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + source until device buffer, starting from position, is full) + + Returns + ------- + nbytes : int + Number of bytes copied. + """ + if position < 0 or position > self.size: + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + buf = as_buffer(data) + + if nbytes < 0: + # copy from host buffer to device buffer starting from + # position until device buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested more to copy than available from host buffer') + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available in device buffer') + # copy nbytes from host buffer to device buffer starting + # from position + c_nbytes = nbytes + + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + int64_t c_position = position + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromHost(c_position, c_buf.get().data(), + c_nbytes)) + return c_nbytes + + def copy_from_device(self, buf, int64_t position=0, int64_t nbytes=-1): + """Copy data from device to device. + + Parameters + ---------- + buf : CudaBuffer + Specify source device buffer. + position : int + Specify the starting position of the copy in device buffer. + Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + source until device buffer, starting from position, is full) + + Returns + ------- + nbytes : int + Number of bytes copied. + + """ + if position < 0 or position > self.size: + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + + if nbytes < 0: + # copy from source device buffer to device buffer starting + # from position until device buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested more to copy than available from device buffer') + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available in device buffer') + # copy nbytes from source device buffer to device buffer + # starting from position + c_nbytes = nbytes + + cdef: + shared_ptr[CCudaBuffer] c_buf = pyarrow_unwrap_cudabuffer(buf) + int64_t c_position = position + shared_ptr[CCudaContext] c_src_ctx = pyarrow_unwrap_cudacontext( + buf.context) + void* c_source_data = (c_buf.get().address()) + + if self.context.handle != buf.context.handle: + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromAnotherDevice(c_src_ctx, c_position, + c_source_data, c_nbytes)) + else: + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromDevice(c_position, c_source_data, + c_nbytes)) + return c_nbytes + + def export_for_ipc(self): + """ + Expose this device buffer as IPC memory which can be used in other + processes. + + After calling this function, this device memory will not be + freed when the CudaBuffer is destructed. + + Returns + ------- + ipc_handle : IpcMemHandle + The exported IPC handle + + """ + cdef shared_ptr[CCudaIpcMemHandle] handle + with nogil: + handle = GetResultValue(self.cuda_buffer.get().ExportForIpc()) + return pyarrow_wrap_cudaipcmemhandle(handle) + + @property + def context(self): + """Returns the CUDA driver context of this buffer. + """ + return pyarrow_wrap_cudacontext(self.cuda_buffer.get().context()) + + def slice(self, offset=0, length=None): + """Return slice of device buffer + + Parameters + ---------- + offset : int, default 0 + Specify offset from the start of device buffer to slice + length : int, default None + Specify the length of slice (default is until end of device + buffer starting from offset). If the length is larger than + the data available, the returned slice will have a size of + the available data starting from the offset. + + Returns + ------- + sliced : CudaBuffer + Zero-copy slice of device buffer. + + """ + if offset < 0 or (self.size and offset >= self.size): + raise ValueError('offset argument is out-of-range') + cdef int64_t offset_ = offset + cdef int64_t size + if length is None: + size = self.size - offset_ + elif offset + length <= self.size: + size = length + else: + size = self.size - offset + parent = pyarrow_unwrap_cudabuffer(self) + return pyarrow_wrap_cudabuffer(make_shared[CCudaBuffer](parent, + offset_, size)) + + def to_pybytes(self): + """Return device buffer content as Python bytes. + """ + return self.copy_to_host().to_pybytes() + + def __getbuffer__(self, cp.Py_buffer* buffer, int flags): + # Device buffer contains data pointers on the device. Hence, + # cannot support buffer protocol PEP-3118 for CudaBuffer. + raise BufferError('buffer protocol for device buffer not supported') + + +cdef class HostBuffer(Buffer): + """Device-accessible CPU memory created using cudaHostAlloc. + + To create a HostBuffer instance, use + + cuda.new_host_buffer() + """ + + def __init__(self): + raise TypeError("Do not call HostBuffer's constructor directly," + " use `cuda.new_host_buffer` function instead.") + + cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer): + self.host_buffer = buffer + self.init( buffer) + + @property + def size(self): + return self.host_buffer.get().size() + + +cdef class BufferReader(NativeFile): + """File interface for zero-copy read from CUDA buffers. + + Note: Read methods return pointers to device memory. This means + you must be careful using this interface with any Arrow code which + may expect to be able to do anything other than pointer arithmetic + on the returned buffers. + """ + + def __cinit__(self, CudaBuffer obj): + self.buffer = obj + self.reader = new CCudaBufferReader(self.buffer.buffer) + self.set_random_access_file( + shared_ptr[CRandomAccessFile](self.reader)) + self.is_readable = True + + def read_buffer(self, nbytes=None): + """Return a slice view of the underlying device buffer. + + The slice will start at the current reader position and will + have specified size in bytes. + + Parameters + ---------- + nbytes : int, default None + Specify the number of bytes to read. Default: None (read all + remaining bytes). + + Returns + ------- + cbuf : CudaBuffer + New device buffer. + + """ + cdef: + int64_t c_nbytes + shared_ptr[CCudaBuffer] output + + if nbytes is None: + c_nbytes = self.size() - self.tell() + else: + c_nbytes = nbytes + + with nogil: + output = static_pointer_cast[CCudaBuffer, CBuffer]( + GetResultValue(self.reader.Read(c_nbytes))) + + return pyarrow_wrap_cudabuffer(output) + + +cdef class BufferWriter(NativeFile): + """File interface for writing to CUDA buffers. + + By default writes are unbuffered. Use set_buffer_size to enable + buffering. + """ + + def __cinit__(self, CudaBuffer buffer): + self.buffer = buffer + self.writer = new CCudaBufferWriter(self.buffer.cuda_buffer) + self.set_output_stream(shared_ptr[COutputStream](self.writer)) + self.is_writable = True + + def writeat(self, int64_t position, object data): + """Write data to buffer starting from position. + + Parameters + ---------- + position : int + Specify device buffer position where the data will be + written. + data : array-like + Specify data, the data instance must implement buffer + protocol. + """ + cdef: + Buffer buf = as_buffer(data) + const uint8_t* c_data = buf.buffer.get().data() + int64_t c_size = buf.buffer.get().size() + + with nogil: + check_status(self.writer.WriteAt(position, c_data, c_size)) + + def flush(self): + """ Flush the buffer stream """ + with nogil: + check_status(self.writer.Flush()) + + def seek(self, int64_t position, int whence=0): + # TODO: remove this method after NativeFile.seek supports + # writable files. + cdef int64_t offset + + with nogil: + if whence == 0: + offset = position + elif whence == 1: + offset = GetResultValue(self.writer.Tell()) + offset = offset + position + else: + with gil: + raise ValueError("Invalid value of whence: {0}" + .format(whence)) + check_status(self.writer.Seek(offset)) + return self.tell() + + @property + def buffer_size(self): + """Returns size of host (CPU) buffer, 0 for unbuffered + """ + return self.writer.buffer_size() + + @buffer_size.setter + def buffer_size(self, int64_t buffer_size): + """Set CPU buffer size to limit calls to cudaMemcpy + + Parameters + ---------- + buffer_size : int + Specify the size of CPU buffer to allocate in bytes. + """ + with nogil: + check_status(self.writer.SetBufferSize(buffer_size)) + + @property + def num_bytes_buffered(self): + """Returns number of bytes buffered on host + """ + return self.writer.num_bytes_buffered() + +# Functions + + +def new_host_buffer(const int64_t size, int device=0): + """Return buffer with CUDA-accessible memory on CPU host + + Parameters + ---------- + size : int + Specify the number of bytes to be allocated. + device : int + Specify GPU device number. + + Returns + ------- + dbuf : HostBuffer + Allocated host buffer + """ + cdef shared_ptr[CCudaHostBuffer] buffer + with nogil: + buffer = GetResultValue(AllocateCudaHostBuffer(device, size)) + return pyarrow_wrap_cudahostbuffer(buffer) + + +def serialize_record_batch(object batch, object ctx): + """ Write record batch message to GPU device memory + + Parameters + ---------- + batch : RecordBatch + Record batch to write + ctx : Context + CUDA Context to allocate device memory from + + Returns + ------- + dbuf : CudaBuffer + device buffer which contains the record batch message + """ + cdef shared_ptr[CCudaBuffer] buffer + cdef CRecordBatch* batch_ = pyarrow_unwrap_batch(batch).get() + cdef CCudaContext* ctx_ = pyarrow_unwrap_cudacontext(ctx).get() + with nogil: + buffer = GetResultValue(CudaSerializeRecordBatch(batch_[0], ctx_)) + return pyarrow_wrap_cudabuffer(buffer) + + +def read_message(object source, pool=None): + """ Read Arrow IPC message located on GPU device + + Parameters + ---------- + source : {CudaBuffer, cuda.BufferReader} + Device buffer or reader of device buffer. + pool : MemoryPool (optional) + Pool to allocate CPU memory for the metadata + + Returns + ------- + message : Message + The deserialized message, body still on device + """ + cdef: + Message result = Message.__new__(Message) + cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + if not isinstance(source, BufferReader): + reader = BufferReader(source) + with nogil: + result.message = move( + GetResultValue(ReadMessage(reader.reader, pool_))) + return result + + +def read_record_batch(object buffer, object schema, *, + DictionaryMemo dictionary_memo=None, pool=None): + """Construct RecordBatch referencing IPC message located on CUDA device. + + While the metadata is copied to host memory for deserialization, + the record batch data remains on the device. + + Parameters + ---------- + buffer : + Device buffer containing the complete IPC message + schema : Schema + The schema for the record batch + dictionary_memo : DictionaryMemo, optional + If message contains dictionaries, must pass a populated + DictionaryMemo + pool : MemoryPool (optional) + Pool to allocate metadata from + + Returns + ------- + batch : RecordBatch + Reconstructed record batch, with device pointers + + """ + cdef: + shared_ptr[CSchema] schema_ = pyarrow_unwrap_schema(schema) + shared_ptr[CCudaBuffer] buffer_ = pyarrow_unwrap_cudabuffer(buffer) + CDictionaryMemo temp_memo + CDictionaryMemo* arg_dict_memo + CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + shared_ptr[CRecordBatch] batch + + if dictionary_memo is not None: + arg_dict_memo = dictionary_memo.memo + else: + arg_dict_memo = &temp_memo + + with nogil: + batch = GetResultValue(CudaReadRecordBatch( + schema_, arg_dict_memo, buffer_, pool_)) + return pyarrow_wrap_batch(batch) + + +# Public API + + +cdef public api bint pyarrow_is_buffer(object buffer): + return isinstance(buffer, Buffer) + +# cudabuffer + +cdef public api bint pyarrow_is_cudabuffer(object buffer): + return isinstance(buffer, CudaBuffer) + + +cdef public api object \ + pyarrow_wrap_cudabuffer_base(const shared_ptr[CCudaBuffer]& buf, base): + cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer) + result.init_cuda(buf, base) + return result + + +cdef public api object \ + pyarrow_wrap_cudabuffer(const shared_ptr[CCudaBuffer]& buf): + cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer) + result.init_cuda(buf, None) + return result + + +cdef public api shared_ptr[CCudaBuffer] pyarrow_unwrap_cudabuffer(object obj): + if pyarrow_is_cudabuffer(obj): + return (obj).cuda_buffer + raise TypeError('expected CudaBuffer instance, got %s' + % (type(obj).__name__)) + +# cudahostbuffer + +cdef public api bint pyarrow_is_cudahostbuffer(object buffer): + return isinstance(buffer, HostBuffer) + + +cdef public api object \ + pyarrow_wrap_cudahostbuffer(const shared_ptr[CCudaHostBuffer]& buf): + cdef HostBuffer result = HostBuffer.__new__(HostBuffer) + result.init_host(buf) + return result + + +cdef public api shared_ptr[CCudaHostBuffer] \ + pyarrow_unwrap_cudahostbuffer(object obj): + if pyarrow_is_cudahostbuffer(obj): + return (obj).host_buffer + raise TypeError('expected HostBuffer instance, got %s' + % (type(obj).__name__)) + +# cudacontext + +cdef public api bint pyarrow_is_cudacontext(object ctx): + return isinstance(ctx, Context) + + +cdef public api object \ + pyarrow_wrap_cudacontext(const shared_ptr[CCudaContext]& ctx): + cdef Context result = Context.__new__(Context) + result.init(ctx) + return result + + +cdef public api shared_ptr[CCudaContext] \ + pyarrow_unwrap_cudacontext(object obj): + if pyarrow_is_cudacontext(obj): + return (obj).context + raise TypeError('expected Context instance, got %s' + % (type(obj).__name__)) + +# cudaipcmemhandle + +cdef public api bint pyarrow_is_cudaipcmemhandle(object handle): + return isinstance(handle, IpcMemHandle) + + +cdef public api object \ + pyarrow_wrap_cudaipcmemhandle(shared_ptr[CCudaIpcMemHandle]& h): + cdef IpcMemHandle result = IpcMemHandle.__new__(IpcMemHandle) + result.init(h) + return result + + +cdef public api shared_ptr[CCudaIpcMemHandle] \ + pyarrow_unwrap_cudaipcmemhandle(object obj): + if pyarrow_is_cudaipcmemhandle(obj): + return (obj).handle + raise TypeError('expected IpcMemHandle instance, got %s' + % (type(obj).__name__)) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..220ab6b19affe6b520db3a3501fad2772919f5e4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pxd @@ -0,0 +1,183 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset is currently unstable. APIs subject to change without notice.""" + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.lib cimport * +from pyarrow._fs cimport FileSystem, FileInfo + + +cdef CFileSource _make_file_source(object file, FileSystem filesystem=*, object file_size=*) + +cdef class DatasetFactory(_Weakrefable): + + cdef: + SharedPtrNoGIL[CDatasetFactory] wrapped + CDatasetFactory* factory + + cdef init(self, const shared_ptr[CDatasetFactory]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CDatasetFactory]& sp) + + cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil + + +cdef class Dataset(_Weakrefable): + + cdef: + SharedPtrNoGIL[CDataset] wrapped + CDataset* dataset + public dict _scan_options + + cdef void init(self, const shared_ptr[CDataset]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CDataset]& sp) + + cdef shared_ptr[CDataset] unwrap(self) nogil + + +cdef class Scanner(_Weakrefable): + cdef: + SharedPtrNoGIL[CScanner] wrapped + CScanner* scanner + + cdef void init(self, const shared_ptr[CScanner]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CScanner]& sp) + + cdef shared_ptr[CScanner] unwrap(self) + + @staticmethod + cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except * + + +cdef class FragmentScanOptions(_Weakrefable): + + cdef: + shared_ptr[CFragmentScanOptions] wrapped + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFragmentScanOptions]& sp) + + +cdef class FileFormat(_Weakrefable): + + cdef: + shared_ptr[CFileFormat] wrapped + CFileFormat* format + + cdef void init(self, const shared_ptr[CFileFormat]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFileFormat]& sp) + + cdef inline shared_ptr[CFileFormat] unwrap(self) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options) + + # Return a WrittenFile after a file was written. + # May be overridden by subclasses, e.g. to add metadata. + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer) + + +cdef class FileWriteOptions(_Weakrefable): + + cdef: + shared_ptr[CFileWriteOptions] wrapped + CFileWriteOptions* c_options + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFileWriteOptions]& sp) + + cdef inline shared_ptr[CFileWriteOptions] unwrap(self) + + +cdef class Fragment(_Weakrefable): + + cdef: + SharedPtrNoGIL[CFragment] wrapped + CFragment* fragment + + cdef void init(self, const shared_ptr[CFragment]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CFragment]& sp) + + cdef inline shared_ptr[CFragment] unwrap(self) + + +cdef class FileFragment(Fragment): + + cdef: + CFileFragment* file_fragment + + cdef void init(self, const shared_ptr[CFragment]& sp) + + +cdef class Partitioning(_Weakrefable): + + cdef: + shared_ptr[CPartitioning] wrapped + CPartitioning* partitioning + + cdef init(self, const shared_ptr[CPartitioning]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CPartitioning]& sp) + + cdef inline shared_ptr[CPartitioning] unwrap(self) + + +cdef class PartitioningFactory(_Weakrefable): + + cdef: + shared_ptr[CPartitioningFactory] wrapped + CPartitioningFactory* factory + object constructor + object options + + cdef init(self, const shared_ptr[CPartitioningFactory]& sp) + + @staticmethod + cdef wrap(const shared_ptr[CPartitioningFactory]& sp, + object constructor, object options) + + cdef inline shared_ptr[CPartitioningFactory] unwrap(self) + + +cdef class WrittenFile(_Weakrefable): + + # The full path to the created file + cdef public str path + # Optional Parquet metadata + # This metadata will have the file path attribute set to the path of + # the written file. + cdef public object metadata + # The size of the file in bytes + cdef public int64_t size diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pyx new file mode 100644 index 0000000000000000000000000000000000000000..b93f71969e8d3bbe214aa3b53dc7df41fac53731 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.pyx @@ -0,0 +1,3970 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset is currently unstable. APIs subject to change without notice.""" + +from cython.operator cimport dereference as deref + +import codecs +import collections +from libcpp cimport bool + +import pyarrow as pa +from pyarrow.lib cimport * +from pyarrow.lib import ArrowTypeError, frombytes, tobytes, _pac +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow._acero cimport ExecNodeOptions +from pyarrow._compute cimport Expression, _bind +from pyarrow._compute import _forbid_instantiation +from pyarrow._fs cimport FileSystem, FileSelector, FileInfo +from pyarrow._csv cimport ( + ConvertOptions, ParseOptions, ReadOptions, WriteOptions) +from pyarrow.util import _is_iterable, _is_path_like, _stringify_path +from pyarrow._json cimport ParseOptions as JsonParseOptions +from pyarrow._json cimport ReadOptions as JsonReadOptions + + +_DEFAULT_BATCH_SIZE = 2**17 +_DEFAULT_BATCH_READAHEAD = 16 +_DEFAULT_FRAGMENT_READAHEAD = 4 + + +# Initialise support for Datasets in ExecPlan +Initialize() + + +_orc_fileformat = None +_orc_imported = False + + +def _get_orc_fileformat(): + """ + Import OrcFileFormat on first usage (to avoid circular import issue + when `pyarrow._dataset_orc` would be imported first) + """ + global _orc_fileformat + global _orc_imported + if not _orc_imported: + try: + from pyarrow._dataset_orc import OrcFileFormat + _orc_fileformat = OrcFileFormat + except ImportError as e: + _orc_fileformat = None + finally: + _orc_imported = True + return _orc_fileformat + + +_dataset_pq = False + + +def _get_parquet_classes(): + """ + Import Parquet class files on first usage (to avoid circular import issue + when `pyarrow._dataset_parquet` would be imported first) + """ + global _dataset_pq + if _dataset_pq is False: + try: + import pyarrow._dataset_parquet as _dataset_pq + except ImportError: + _dataset_pq = None + + +def _get_parquet_symbol(name): + """ + Get a symbol from pyarrow.parquet if the latter is importable, otherwise + return None. + """ + _get_parquet_classes() + return _dataset_pq and getattr(_dataset_pq, name) + + +cdef CFileSource _make_file_source(object file, FileSystem filesystem=None, object file_size=None): + + cdef: + CFileSource c_source + shared_ptr[CFileSystem] c_filesystem + CFileInfo c_info + c_string c_path + shared_ptr[CRandomAccessFile] c_file + shared_ptr[CBuffer] c_buffer + int64_t c_size + + if isinstance(file, Buffer): + c_buffer = pyarrow_unwrap_buffer(file) + c_source = CFileSource(move(c_buffer)) + elif _is_path_like(file): + if filesystem is None: + raise ValueError("cannot construct a FileSource from " + "a path without a FileSystem") + c_filesystem = filesystem.unwrap() + c_path = tobytes(_stringify_path(file)) + + if file_size is not None: + c_size = file_size + c_info = FileInfo(c_path, size=c_size).unwrap() + c_source = CFileSource(move(c_info), move(c_filesystem)) + else: + c_source = CFileSource(move(c_path), move(c_filesystem)) + elif hasattr(file, 'read'): + # Optimistically hope this is file-like + c_file = get_native_file(file, False).get_random_access_file() + c_source = CFileSource(move(c_file)) + + else: + raise TypeError("cannot construct a FileSource " + "from " + str(file)) + + return c_source + + +cdef CSegmentEncoding _get_segment_encoding(str segment_encoding): + if segment_encoding == "none": + return CSegmentEncoding_None + elif segment_encoding == "uri": + return CSegmentEncoding_Uri + raise ValueError(f"Unknown segment encoding: {segment_encoding}") + + +cdef str _wrap_segment_encoding(CSegmentEncoding segment_encoding): + if segment_encoding == CSegmentEncoding_None: + return "none" + elif segment_encoding == CSegmentEncoding_Uri: + return "uri" + raise ValueError("Unknown segment encoding") + + +cdef Expression _true = Expression._scalar(True) + + +cdef class Dataset(_Weakrefable): + """ + Collection of data fragments and potentially child datasets. + + Arrow Datasets allow you to query against data that has been split across + multiple files. This sharding of data may indicate partitioning, which + can accelerate queries that only touch some partitions (files). + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CDataset]& sp): + self.wrapped = sp + self.dataset = sp.get() + self._scan_options = dict() + + @staticmethod + cdef wrap(const shared_ptr[CDataset]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'union': UnionDataset, + 'filesystem': FileSystemDataset, + 'in-memory': InMemoryDataset, + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef Dataset self = class_.__new__(class_) + self.init(sp) + return self + + cdef shared_ptr[CDataset] unwrap(self) nogil: + return self.wrapped + + @property + def partition_expression(self): + """ + An Expression which evaluates to true for all data viewed by this + Dataset. + """ + return Expression.wrap(self.dataset.partition_expression()) + + def replace_schema(self, Schema schema not None): + """ + Return a copy of this Dataset with a different schema. + + The copy will view the same Fragments. If the new schema is not + compatible with the original dataset's schema then an error will + be raised. + + Parameters + ---------- + schema : Schema + The new dataset schema. + """ + cdef shared_ptr[CDataset] copy = GetResultValue( + self.dataset.ReplaceSchema(pyarrow_unwrap_schema(schema)) + ) + + d = Dataset.wrap(move(copy)) + if self._scan_options: + # Preserve scan options if set. + d._scan_options = self._scan_options.copy() + return d + + def get_fragments(self, Expression filter=None): + """Returns an iterator over the fragments in this dataset. + + Parameters + ---------- + filter : Expression, default None + Return fragments matching the optional filter, either using the + partition_expression or internal information like Parquet's + statistics. + + Returns + ------- + fragments : iterator of Fragment + """ + if self._scan_options.get("filter") is not None: + # Accessing fragments of a filtered dataset is not supported. + # It would be unclear if you wanted to filter the fragments + # or the rows in those fragments. + raise ValueError( + "Retrieving fragments of a filtered or projected " + "dataset is not allowed. Remove the filtering." + ) + + return self._get_fragments(filter) + + def _get_fragments(self, Expression filter): + cdef: + CExpression c_filter + + if filter is None: + c_fragments = move(GetResultValue(self.dataset.GetFragments())) + else: + c_filter = _bind(filter, self.schema) + c_fragments = move(GetResultValue( + self.dataset.GetFragments(c_filter))) + + for maybe_fragment in c_fragments: + yield Fragment.wrap(GetResultValue(move(maybe_fragment))) + + def _scanner_options(self, options): + """Returns the default options to create a new Scanner. + + This is automatically invoked by :meth:`Dataset.scanner` + and there is no need to use it. + """ + new_options = options.copy() + + # at the moment only support filter + requested_filter = options.get("filter") + current_filter = self._scan_options.get("filter") + if requested_filter is not None and current_filter is not None: + new_options["filter"] = current_filter & requested_filter + elif current_filter is not None: + new_options["filter"] = current_filter + + return new_options + + def scanner(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Build a scan operation against the dataset. + + Data is not loaded immediately. Instead, this produces a Scanner, + which exposes further operations (e.g. loading all data as a + table, counting rows). + + See the :meth:`Scanner.from_dataset` method for further information. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + scanner : Scanner + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, "dataset_scanner.parquet") + + >>> import pyarrow.dataset as ds + >>> dataset = ds.dataset("dataset_scanner.parquet") + + Selecting a subset of the columns: + + >>> dataset.scanner(columns=["year", "n_legs"]).to_table() + pyarrow.Table + year: int64 + n_legs: int64 + ---- + year: [[2020,2022,2021,2022,2019,2021]] + n_legs: [[2,2,4,4,5,100]] + + Projecting selected columns using an expression: + + >>> dataset.scanner(columns={ + ... "n_legs_uint": ds.field("n_legs").cast("uint8"), + ... }).to_table() + pyarrow.Table + n_legs_uint: uint8 + ---- + n_legs_uint: [[2,2,4,4,5,100]] + + Filtering rows while scanning: + + >>> dataset.scanner(filter=ds.field("year") > 2020).to_table() + pyarrow.Table + year: int64 + n_legs: int64 + animal: string + ---- + year: [[2022,2021,2022,2021]] + n_legs: [[2,4,4,100]] + animal: [["Parrot","Dog","Horse","Centipede"]] + """ + return Scanner.from_dataset( + self, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ) + + def to_batches(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the dataset as materialized record batches. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_batches() + + def to_table(self, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the dataset to an Arrow table. + + Note that this method reads all the selected data from the dataset + into memory. + + Parameters + ---------- + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_table() + + def take(self, + object indices, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Select rows of data by index. + + Parameters + ---------- + indices : Array or array-like + indices of rows to select in the dataset. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).take(indices) + + def head(self, + int num_rows, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Load the first N rows of the dataset. + + Parameters + ---------- + num_rows : int + The number of rows to load. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).head(num_rows) + + def count_rows(self, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Count rows matching the scanner filter. + + Parameters + ---------- + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + count : int + """ + return self.scanner( + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).count_rows() + + @property + def schema(self): + """The common schema of the full Dataset""" + return pyarrow_wrap_schema(self.dataset.schema()) + + def filter(self, expression not None): + """ + Apply a row filter to the dataset. + + Parameters + ---------- + expression : Expression + The filter that should be applied to the dataset. + + Returns + ------- + Dataset + """ + cdef: + Dataset filtered_dataset + + new_filter = expression + current_filter = self._scan_options.get("filter") + if current_filter is not None and new_filter is not None: + new_filter = current_filter & new_filter + + filtered_dataset = self.__class__.__new__(self.__class__) + filtered_dataset.init(self.wrapped) + filtered_dataset._scan_options = dict(filter=new_filter) + return filtered_dataset + + def sort_by(self, sorting, **kwargs): + """ + Sort the Dataset by one or multiple columns. + + Parameters + ---------- + sorting : str or list[tuple(name, order)] + Name of the column to use to sort (ascending), or + a list of multiple sorting conditions where + each entry is a tuple with column name + and sorting order ("ascending" or "descending") + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + InMemoryDataset + A new dataset sorted according to the sort keys. + """ + if isinstance(sorting, str): + sorting = [(sorting, "ascending")] + + res = _pac()._sort_source( + self, output_type=InMemoryDataset, sort_keys=sorting, **kwargs + ) + return res + + def join(self, right_dataset, keys, right_keys=None, join_type="left outer", + left_suffix=None, right_suffix=None, coalesce_keys=True, + use_threads=True): + """ + Perform a join between this dataset and another one. + + Result of the join will be a new dataset, where further + operations can be applied. + + Parameters + ---------- + right_dataset : dataset + The dataset to join to the current one, acting as the right dataset + in the join operation. + keys : str or list[str] + The columns from current dataset that should be used as keys + of the join operation left side. + right_keys : str or list[str], default None + The columns from the right_dataset that should be used as keys + on the join operation right side. + When ``None`` use the same key names as the left dataset. + join_type : str, default "left outer" + The kind of join that should be performed, one of + ("left semi", "right semi", "left anti", "right anti", + "inner", "left outer", "right outer", "full outer") + left_suffix : str, default None + Which suffix to add to right column names. This prevents confusion + when the columns in left and right datasets have colliding names. + right_suffix : str, default None + Which suffix to add to the left column names. This prevents confusion + when the columns in left and right datasets have colliding names. + coalesce_keys : bool, default True + If the duplicated keys should be omitted from one of the sides + in the join result. + use_threads : bool, default True + Whenever to use multithreading or not. + + Returns + ------- + InMemoryDataset + """ + if right_keys is None: + right_keys = keys + return _pac()._perform_join( + join_type, self, keys, right_dataset, right_keys, + left_suffix=left_suffix, right_suffix=right_suffix, + use_threads=use_threads, coalesce_keys=coalesce_keys, + output_type=InMemoryDataset + ) + + +cdef class InMemoryDataset(Dataset): + """ + A Dataset wrapping in-memory data. + + Parameters + ---------- + source : RecordBatch, Table, list, tuple + The data for this dataset. Can be a RecordBatch, Table, list of + RecordBatch/Table, iterable of RecordBatch, or a RecordBatchReader + If an iterable is provided, the schema must also be provided. + schema : Schema, optional + Only required if passing an iterable as the source + """ + + cdef: + CInMemoryDataset* in_memory_dataset + + def __init__(self, source, Schema schema=None): + cdef: + shared_ptr[CInMemoryDataset] in_memory_dataset + + if isinstance(source, (pa.RecordBatch, pa.Table)): + source = [source] + + if isinstance(source, (list, tuple)): + batches = [] + for item in source: + if isinstance(item, pa.RecordBatch): + batches.append(item) + elif isinstance(item, pa.Table): + batches.extend(item.to_batches()) + else: + raise TypeError( + 'Expected a list of tables or batches. The given list ' + 'contains a ' + type(item).__name__) + if schema is None: + schema = item.schema + elif not schema.equals(item.schema): + raise ArrowTypeError( + f'Item has schema\n{item.schema}\nwhich does not ' + f'match expected schema\n{schema}') + if not batches and schema is None: + raise ValueError('Must provide schema to construct in-memory ' + 'dataset from an empty list') + table = pa.Table.from_batches(batches, schema=schema) + in_memory_dataset = make_shared[CInMemoryDataset]( + pyarrow_unwrap_table(table)) + else: + raise TypeError( + 'Expected a table, batch, or list of tables/batches ' + 'instead of the given type: ' + + type(source).__name__ + ) + + self.init( in_memory_dataset) + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.in_memory_dataset = sp.get() + + +cdef class UnionDataset(Dataset): + """ + A Dataset wrapping child datasets. + + Children's schemas must agree with the provided schema. + + Parameters + ---------- + schema : Schema + A known schema to conform to. + children : list of Dataset + One or more input children + """ + + cdef: + CUnionDataset* union_dataset + + def __init__(self, Schema schema not None, children): + cdef: + Dataset child + CDatasetVector c_children + shared_ptr[CUnionDataset] union_dataset + + for child in children: + c_children.push_back(child.wrapped) + + union_dataset = GetResultValue(CUnionDataset.Make( + pyarrow_unwrap_schema(schema), move(c_children))) + self.init( union_dataset) + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.union_dataset = sp.get() + + def __reduce__(self): + return UnionDataset, (self.schema, self.children) + + @property + def children(self): + cdef CDatasetVector children = self.union_dataset.children() + return [Dataset.wrap(children[i]) for i in range(children.size())] + + +cdef class FileSystemDataset(Dataset): + """ + A Dataset of file fragments. + + A FileSystemDataset is composed of one or more FileFragment. + + Parameters + ---------- + fragments : list[Fragments] + List of fragments to consume. + schema : Schema + The top-level schema of the Dataset. + format : FileFormat + File format of the fragments, currently only ParquetFileFormat, + IpcFileFormat, CsvFileFormat, and JsonFileFormat are supported. + filesystem : FileSystem + FileSystem of the fragments. + root_partition : Expression, optional + The top-level partition of the DataDataset. + """ + + cdef: + CFileSystemDataset* filesystem_dataset + + def __init__(self, fragments, Schema schema, FileFormat format, + FileSystem filesystem=None, root_partition=None): + cdef: + FileFragment fragment=None + vector[shared_ptr[CFileFragment]] c_fragments + CResult[shared_ptr[CDataset]] result + shared_ptr[CFileSystem] c_filesystem + + if root_partition is None: + root_partition = _true + elif not isinstance(root_partition, Expression): + raise TypeError( + "Argument 'root_partition' has incorrect type (expected " + "Expression, got {0})".format(type(root_partition)) + ) + + for fragment in fragments: + c_fragments.push_back( + static_pointer_cast[CFileFragment, CFragment]( + fragment.unwrap())) + + if filesystem is None: + filesystem = fragment.filesystem + + if filesystem is not None: + c_filesystem = filesystem.unwrap() + + result = CFileSystemDataset.Make( + pyarrow_unwrap_schema(schema), + ( root_partition).unwrap(), + format.unwrap(), + c_filesystem, + c_fragments + ) + self.init(GetResultValue(result)) + + @property + def filesystem(self): + return FileSystem.wrap(self.filesystem_dataset.filesystem()) + + @property + def partitioning(self): + """ + The partitioning of the Dataset source, if discovered. + + If the FileSystemDataset is created using the ``dataset()`` factory + function with a partitioning specified, this will return the + finalized Partitioning object from the dataset discovery. In all + other cases, this returns None. + """ + c_partitioning = self.filesystem_dataset.partitioning() + if c_partitioning.get() == nullptr: + return None + try: + return Partitioning.wrap(c_partitioning) + except TypeError: + # e.g. type_name "default" + return None + + cdef void init(self, const shared_ptr[CDataset]& sp): + Dataset.init(self, sp) + self.filesystem_dataset = sp.get() + + def __reduce__(self): + return FileSystemDataset, ( + list(self.get_fragments()), + self.schema, + self.format, + self.filesystem, + self.partition_expression + ) + + @classmethod + def from_paths(cls, paths, schema=None, format=None, + filesystem=None, partitions=None, root_partition=None): + """ + A Dataset created from a list of paths on a particular filesystem. + + Parameters + ---------- + paths : list of str + List of file paths to create the fragments from. + schema : Schema + The top-level schema of the DataDataset. + format : FileFormat + File format to create fragments from, currently only + ParquetFileFormat, IpcFileFormat, CsvFileFormat, and JsonFileFormat are supported. + filesystem : FileSystem + The filesystem which files are from. + partitions : list[Expression], optional + Attach additional partition information for the file paths. + root_partition : Expression, optional + The top-level partition of the DataDataset. + """ + if root_partition is None: + root_partition = _true + + for arg, class_, name in [ + (schema, Schema, 'schema'), + (format, FileFormat, 'format'), + (filesystem, FileSystem, 'filesystem'), + (root_partition, Expression, 'root_partition') + ]: + if not isinstance(arg, class_): + raise TypeError( + "Argument '{0}' has incorrect type (expected {1}, " + "got {2})".format(name, class_.__name__, type(arg)) + ) + + partitions = partitions or [_true] * len(paths) + + if len(paths) != len(partitions): + raise ValueError( + 'The number of files resulting from paths_or_selector ' + 'must be equal to the number of partitions.' + ) + + fragments = [ + format.make_fragment(path, filesystem, partitions[i]) + for i, path in enumerate(paths) + ] + return FileSystemDataset(fragments, schema, format, + filesystem, root_partition) + + @property + def files(self): + """List of the files""" + cdef vector[c_string] files = self.filesystem_dataset.files() + return [frombytes(f) for f in files] + + @property + def format(self): + """The FileFormat of this source.""" + return FileFormat.wrap(self.filesystem_dataset.format()) + + +cdef class FileWriteOptions(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + self.wrapped = sp + self.c_options = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileWriteOptions]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'csv': CsvFileWriteOptions, + 'ipc': IpcFileWriteOptions, + 'parquet': _get_parquet_symbol('ParquetFileWriteOptions'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FileWriteOptions self = class_.__new__(class_) + self.init(sp) + return self + + @property + def format(self): + return FileFormat.wrap(self.c_options.format()) + + cdef inline shared_ptr[CFileWriteOptions] unwrap(self): + return self.wrapped + + +cdef class FileFormat(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + self.wrapped = sp + self.format = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFileFormat]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'ipc': IpcFileFormat, + 'csv': CsvFileFormat, + 'json': JsonFileFormat, + 'parquet': _get_parquet_symbol('ParquetFileFormat'), + 'orc': _get_orc_fileformat(), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FileFormat self = class_.__new__(class_) + self.init(sp) + return self + + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer): + parquet_metadata = None + size = GetResultValue(file_writer.GetBytesWritten()) + return WrittenFile(path, parquet_metadata, size) + + cdef inline shared_ptr[CFileFormat] unwrap(self): + return self.wrapped + + def inspect(self, file, filesystem=None): + """ + Infer the schema of a file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to infer a schema from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + + Returns + ------- + schema : Schema + The schema inferred from the file + """ + cdef: + CFileSource c_source = _make_file_source(file, filesystem, file_size=None) + CResult[shared_ptr[CSchema]] c_result + with nogil: + c_result = self.format.Inspect(c_source) + c_schema = GetResultValue(c_result) + return pyarrow_wrap_schema(move(c_schema)) + + def make_fragment(self, file, filesystem=None, + Expression partition_expression=None, + *, file_size=None): + """ + Make a FileFragment from a given file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to make a fragment from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + partition_expression : Expression, optional + An expression that is guaranteed true for all rows in the fragment. Allows + fragment to be potentially skipped while scanning with a filter. + file_size : int, optional + The size of the file in bytes. Can improve performance with high-latency filesystems + when file size needs to be known before reading. + + Returns + ------- + fragment : Fragment + The file fragment + """ + if partition_expression is None: + partition_expression = _true + c_source = _make_file_source(file, filesystem, file_size) + c_fragment = GetResultValue( + self.format.MakeFragment(move(c_source), + partition_expression.unwrap(), + nullptr)) + return Fragment.wrap(move(c_fragment)) + + def make_write_options(self): + sp_write_options = self.format.DefaultWriteOptions() + if sp_write_options.get() == nullptr: + # DefaultWriteOptions() may return `nullptr` which means that + # the format does not yet support writing datasets. + raise NotImplementedError( + "Writing datasets not yet implemented for this file format." + ) + return FileWriteOptions.wrap(sp_write_options) + + @property + def default_extname(self): + return frombytes(self.format.type_name()) + + @property + def default_fragment_scan_options(self): + dfso = FragmentScanOptions.wrap( + self.wrapped.get().default_fragment_scan_options) + # CsvFileFormat stores a Python-specific encoding field that needs + # to be restored because it does not exist in the C++ struct + if isinstance(self, CsvFileFormat): + if self._read_options_py is not None: + dfso.read_options = self._read_options_py + return dfso + + @default_fragment_scan_options.setter + def default_fragment_scan_options(self, FragmentScanOptions options): + if options is None: + self.wrapped.get().default_fragment_scan_options =\ + nullptr + else: + self._set_default_fragment_scan_options(options) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + raise ValueError(f"Cannot set fragment scan options for " + f"'{options.type_name}' on {self.__class__.__name__}") + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class Fragment(_Weakrefable): + """Fragment of data from a Dataset.""" + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFragment]& sp): + self.wrapped = sp + self.fragment = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CFragment]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + # IpcFileFormat, CsvFileFormat, JsonFileFormat and OrcFileFormat do not have + # corresponding subclasses of FileFragment + 'ipc': FileFragment, + 'csv': FileFragment, + 'json': FileFragment, + 'orc': FileFragment, + 'parquet': _get_parquet_symbol('ParquetFileFragment'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + class_ = Fragment + + cdef Fragment self = class_.__new__(class_) + self.init(sp) + return self + + cdef inline shared_ptr[CFragment] unwrap(self): + return self.wrapped + + @property + def physical_schema(self): + """Return the physical schema of this Fragment. This schema can be + different from the dataset read schema.""" + cdef: + CResult[shared_ptr[CSchema]] maybe_schema + with nogil: + maybe_schema = self.fragment.ReadPhysicalSchema() + return pyarrow_wrap_schema(GetResultValue(maybe_schema)) + + @property + def partition_expression(self): + """An Expression which evaluates to true for all data viewed by this + Fragment. + """ + return Expression.wrap(self.fragment.partition_expression()) + + def scanner(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Build a scan operation against the fragment. + + Data is not loaded immediately. Instead, this produces a Scanner, + which exposes further operations (e.g. loading all data as a + table, counting rows). + + Parameters + ---------- + schema : Schema + Schema to use for scanning. This is used to unify a Fragment to + its Dataset's schema. If not specified this will use the + Fragment's physical schema which might differ for each Fragment. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + scanner : Scanner + """ + return Scanner.from_fragment( + self, + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ) + + def to_batches(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Read the fragment as materialized record batches. + + Parameters + ---------- + schema : Schema, optional + Concrete schema to use for scanning. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + return Scanner.from_fragment( + self, + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_batches() + + def to_table(self, + Schema schema=None, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Convert this Fragment into a Table. + + Use this convenience utility with care. This will serially materialize + the Scan result in memory before creating the Table. + + Parameters + ---------- + schema : Schema, optional + Concrete schema to use for scanning. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + table : Table + """ + return self.scanner( + schema=schema, + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).to_table() + + def take(self, + object indices, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Select rows of data by index. + + Parameters + ---------- + indices : Array or array-like + The indices of row to select in the dataset. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).take(indices) + + def head(self, + int num_rows, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Load the first N rows of the fragment. + + Parameters + ---------- + num_rows : int + The number of rows to load. + columns : list of str, default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + Table + """ + return self.scanner( + columns=columns, + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).head(num_rows) + + def count_rows(self, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, + MemoryPool memory_pool=None): + """ + Count rows matching the scanner filter. + + Parameters + ---------- + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + + Returns + ------- + count : int + """ + return self.scanner( + filter=filter, + batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + fragment_scan_options=fragment_scan_options, + use_threads=use_threads, + memory_pool=memory_pool + ).count_rows() + + +cdef class FileFragment(Fragment): + """A Fragment representing a data file.""" + + cdef void init(self, const shared_ptr[CFragment]& sp): + Fragment.init(self, sp) + self.file_fragment = sp.get() + + def __repr__(self): + type_name = frombytes(self.fragment.type_name()) + if type_name != "parquet": + typ = f" type={type_name}" + else: + # parquet has a subclass -> type embedded in class name + typ = "" + partition_dict = get_partition_keys(self.partition_expression) + partition = ", ".join( + [f"{key}={val}" for key, val in partition_dict.items()] + ) + if partition: + partition = f" partition=[{partition}]" + return "".format( + self.__class__.__name__, typ, self.path, partition + ) + + def __reduce__(self): + buffer = self.buffer + return self.format.make_fragment, ( + self.path if buffer is None else buffer, + self.filesystem, + self.partition_expression + ) + + def open(self): + """ + Open a NativeFile of the buffer or file viewed by this fragment. + """ + cdef: + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CRandomAccessFile] opened + c_string c_path + NativeFile out = NativeFile() + + if self.buffer is not None: + return pa.BufferReader(self.buffer) + + c_path = tobytes(self.file_fragment.source().path()) + with nogil: + c_filesystem = self.file_fragment.source().filesystem() + opened = GetResultValue(c_filesystem.get().OpenInputFile(c_path)) + + out.set_random_access_file(opened) + out.is_readable = True + return out + + @property + def path(self): + """ + The path of the data file viewed by this fragment, if it views a + file. If instead it views a buffer, this will be "". + """ + return frombytes(self.file_fragment.source().path()) + + @property + def filesystem(self): + """ + The FileSystem containing the data file viewed by this fragment, if + it views a file. If instead it views a buffer, this will be None. + """ + cdef: + shared_ptr[CFileSystem] c_fs + c_fs = self.file_fragment.source().filesystem() + + if c_fs.get() == nullptr: + return None + + return FileSystem.wrap(c_fs) + + @property + def buffer(self): + """ + The buffer viewed by this fragment, if it views a buffer. If + instead it views a file, this will be None. + """ + cdef: + shared_ptr[CBuffer] c_buffer + c_buffer = self.file_fragment.source().buffer() + + if c_buffer.get() == nullptr: + return None + + return pyarrow_wrap_buffer(c_buffer) + + @property + def format(self): + """ + The format of the data file viewed by this fragment. + """ + return FileFormat.wrap(self.file_fragment.format()) + + +cdef class FragmentScanOptions(_Weakrefable): + """Scan options specific to a particular fragment and scan operation.""" + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + self.wrapped = sp + + @staticmethod + cdef wrap(const shared_ptr[CFragmentScanOptions]& sp): + if not sp: + return None + + type_name = frombytes(sp.get().type_name()) + + classes = { + 'csv': CsvFragmentScanOptions, + 'json': JsonFragmentScanOptions, + 'parquet': _get_parquet_symbol('ParquetFragmentScanOptions'), + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef FragmentScanOptions self = class_.__new__(class_) + self.init(sp) + return self + + @property + def type_name(self): + return frombytes(self.wrapped.get().type_name()) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class IpcFileWriteOptions(FileWriteOptions): + cdef: + CIpcFileWriteOptions* ipc_options + + def __init__(self): + _forbid_instantiation(self.__class__) + + @property + def write_options(self): + out = IpcWriteOptions() + out.c_options = CIpcWriteOptions(deref(self.ipc_options.options)) + return out + + @write_options.setter + def write_options(self, IpcWriteOptions write_options not None): + self.ipc_options.options.reset( + new CIpcWriteOptions(write_options.c_options)) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.ipc_options = sp.get() + + +cdef class IpcFileFormat(FileFormat): + + def __init__(self): + self.init(shared_ptr[CFileFormat](new CIpcFileFormat())) + + def equals(self, IpcFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.IpcFileFormat + + Returns + ------- + True + """ + return True + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.ipc.IpcWriteOptions + """ + cdef IpcFileWriteOptions opts = \ + FileFormat.make_write_options(self) + opts.write_options = IpcWriteOptions(**kwargs) + return opts + + @property + def default_extname(self): + return "arrow" + + def __reduce__(self): + return IpcFileFormat, tuple() + + +cdef class FeatherFileFormat(IpcFileFormat): + + @property + def default_extname(self): + return "feather" + + +cdef class CsvFileFormat(FileFormat): + """ + FileFormat for CSV files. + + Parameters + ---------- + parse_options : pyarrow.csv.ParseOptions + Options regarding CSV parsing. + default_fragment_scan_options : CsvFragmentScanOptions + Default options for fragments scan. + convert_options : pyarrow.csv.ConvertOptions + Options regarding value conversion. + read_options : pyarrow.csv.ReadOptions + General read options. + """ + cdef: + CCsvFileFormat* csv_format + # The encoding field in ReadOptions does not exist in the C++ struct. + # We need to store it here and override it when reading + # default_fragment_scan_options.read_options + public ReadOptions _read_options_py + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, ParseOptions parse_options=None, + default_fragment_scan_options=None, + ConvertOptions convert_options=None, + ReadOptions read_options=None): + self.init(shared_ptr[CFileFormat](new CCsvFileFormat())) + if parse_options is not None: + self.parse_options = parse_options + if convert_options is not None or read_options is not None: + if default_fragment_scan_options: + raise ValueError('If `default_fragment_scan_options` is ' + 'given, cannot specify convert_options ' + 'or read_options') + self.default_fragment_scan_options = CsvFragmentScanOptions( + convert_options=convert_options, read_options=read_options) + elif isinstance(default_fragment_scan_options, dict): + self.default_fragment_scan_options = CsvFragmentScanOptions( + **default_fragment_scan_options) + elif isinstance(default_fragment_scan_options, CsvFragmentScanOptions): + self.default_fragment_scan_options = default_fragment_scan_options + elif default_fragment_scan_options is not None: + raise TypeError('`default_fragment_scan_options` must be either ' + 'a dictionary or an instance of ' + 'CsvFragmentScanOptions') + if read_options is not None: + self._read_options_py = read_options + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.csv_format = sp.get() + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.csv.WriteOptions + """ + cdef CsvFileWriteOptions opts = \ + FileFormat.make_write_options(self) + opts.write_options = WriteOptions(**kwargs) + return opts + + @property + def parse_options(self): + return ParseOptions.wrap(self.csv_format.parse_options) + + @parse_options.setter + def parse_options(self, ParseOptions parse_options not None): + self.csv_format.parse_options = deref(parse_options.options) + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'csv': + self.csv_format.default_fragment_scan_options = options.wrapped + self.default_fragment_scan_options.read_options = options.read_options + self._read_options_py = options.read_options + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, CsvFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.CsvFileFormat + + Returns + ------- + bool + """ + return ( + self.parse_options.equals(other.parse_options) and + self.default_fragment_scan_options == + other.default_fragment_scan_options) + + def __reduce__(self): + return CsvFileFormat, (self.parse_options, + self.default_fragment_scan_options) + + def __repr__(self): + return f"" + + +cdef class CsvFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for CSV fragments. + + Parameters + ---------- + convert_options : pyarrow.csv.ConvertOptions + Options regarding value conversion. + read_options : pyarrow.csv.ReadOptions + General read options. + """ + + cdef: + CCsvFragmentScanOptions* csv_options + # The encoding field in ReadOptions does not exist in the C++ struct. + # We need to store it here and override it when reading read_options + ReadOptions _read_options_py + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, ConvertOptions convert_options=None, + ReadOptions read_options=None): + self.init(shared_ptr[CFragmentScanOptions]( + new CCsvFragmentScanOptions())) + if convert_options is not None: + self.convert_options = convert_options + if read_options is not None: + self.read_options = read_options + self._read_options_py = read_options + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.csv_options = sp.get() + + @property + def convert_options(self): + return ConvertOptions.wrap(self.csv_options.convert_options) + + @convert_options.setter + def convert_options(self, ConvertOptions convert_options not None): + self.csv_options.convert_options = deref(convert_options.options) + + @property + def read_options(self): + read_options = ReadOptions.wrap(self.csv_options.read_options) + if self._read_options_py is not None: + read_options.encoding = self._read_options_py.encoding + return read_options + + @read_options.setter + def read_options(self, ReadOptions read_options not None): + self.csv_options.read_options = deref(read_options.options) + self._read_options_py = read_options + if codecs.lookup(read_options.encoding).name != 'utf-8': + self.csv_options.stream_transform_func = deref( + make_streamwrap_func(read_options.encoding, 'utf-8')) + + def equals(self, CsvFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.CsvFragmentScanOptions + + Returns + ------- + bool + """ + return ( + other and + self.convert_options.equals(other.convert_options) and + self.read_options.equals(other.read_options)) + + def __reduce__(self): + return CsvFragmentScanOptions, (self.convert_options, + self.read_options) + + +cdef class CsvFileWriteOptions(FileWriteOptions): + cdef: + CCsvFileWriteOptions* csv_options + object _properties + + def __init__(self): + _forbid_instantiation(self.__class__) + + @property + def write_options(self): + return WriteOptions.wrap(deref(self.csv_options.write_options)) + + @write_options.setter + def write_options(self, WriteOptions write_options not None): + self.csv_options.write_options.reset( + new CCSVWriteOptions(deref(write_options.options))) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.csv_options = sp.get() + + +cdef class JsonFileFormat(FileFormat): + """ + FileFormat for JSON files. + + Parameters + ---------- + default_fragment_scan_options : JsonFragmentScanOptions + Default options for fragments scan. + parse_options : pyarrow.json.ParseOptions + Options regarding json parsing. + read_options : pyarrow.json.ReadOptions + General read options. + """ + cdef: + CJsonFileFormat* json_format + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, default_fragment_scan_options=None, + JsonParseOptions parse_options=None, + JsonReadOptions read_options=None): + self.init(shared_ptr[CFileFormat](new CJsonFileFormat())) + if parse_options is not None or read_options is not None: + if default_fragment_scan_options is not None: + raise ValueError('If `default_fragment_scan_options` is ' + 'given, cannot specify read_options') + self.default_fragment_scan_options = JsonFragmentScanOptions( + parse_options=parse_options, + read_options=read_options) + elif isinstance(default_fragment_scan_options, dict): + self.default_fragment_scan_options = JsonFragmentScanOptions( + **default_fragment_scan_options) + elif isinstance(default_fragment_scan_options, JsonFragmentScanOptions): + self.default_fragment_scan_options = default_fragment_scan_options + elif default_fragment_scan_options is not None: + raise TypeError('`default_fragment_scan_options` must be either ' + 'a dictionary or an instance of ' + 'JsonFragmentScanOptions') + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.json_format = sp.get() + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'json': + self.json_format.default_fragment_scan_options = options.wrapped + self.default_fragment_scan_options.read_options = options.read_options + self.default_fragment_scan_options.parse_options = options.parse_options + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, JsonFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.JsonFileFormat + + Returns + ------- + bool + """ + return (other and + self.default_fragment_scan_options == + other.default_fragment_scan_options) + + def __reduce__(self): + return JsonFileFormat, (self.default_fragment_scan_options,) + + def __repr__(self): + return "" + + +cdef class JsonFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for JSON fragments. + + Parameters + ---------- + parse_options : pyarrow.json.ParseOptions + Options regarding JSON parsing. + read_options : pyarrow.json.ReadOptions + General read options. + """ + cdef: + CJsonFragmentScanOptions* json_options + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, JsonParseOptions parse_options=None, + JsonReadOptions read_options=None): + self.init(shared_ptr[CFragmentScanOptions]( + new CJsonFragmentScanOptions())) + if parse_options is not None: + self.parse_options = parse_options + if read_options is not None: + self.read_options = read_options + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.json_options = sp.get() + + @property + def parse_options(self): + return JsonParseOptions.wrap(self.json_options.parse_options) + + @parse_options.setter + def parse_options(self, JsonParseOptions parse_options not None): + self.json_options.parse_options = parse_options.options + + @property + def read_options(self): + return JsonReadOptions.wrap(self.json_options.read_options) + + @read_options.setter + def read_options(self, JsonReadOptions read_options not None): + self.json_options.read_options = read_options.options + + def equals(self, JsonFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.JsonFragmentScanOptions + + Returns + ------- + bool + """ + return ( + other and + self.read_options.equals(other.read_options) and + self.parse_options.equals(other.parse_options)) + + def __reduce__(self): + return JsonFragmentScanOptions, (self.parse_options, self.read_options) + + +cdef class Partitioning(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + self.wrapped = sp + self.partitioning = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CPartitioning]& sp): + type_name = frombytes(sp.get().type_name()) + + classes = { + 'directory': DirectoryPartitioning, + 'hive': HivePartitioning, + 'filename': FilenamePartitioning, + } + + class_ = classes.get(type_name, None) + if class_ is None: + raise TypeError(type_name) + + cdef Partitioning self = class_.__new__(class_) + self.init(sp) + return self + + cdef inline shared_ptr[CPartitioning] unwrap(self): + return self.wrapped + + def __eq__(self, other): + if isinstance(other, Partitioning): + return self.partitioning.Equals(deref((other).unwrap())) + return False + + def parse(self, path): + """ + Parse a path into a partition expression. + + Parameters + ---------- + path : str + + Returns + ------- + pyarrow.dataset.Expression + """ + cdef CResult[CExpression] result + result = self.partitioning.Parse(tobytes(path)) + return Expression.wrap(GetResultValue(result)) + + @property + def schema(self): + """The arrow Schema attached to the partitioning.""" + return pyarrow_wrap_schema(self.partitioning.schema()) + + +cdef class PartitioningFactory(_Weakrefable): + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioningFactory]& sp): + self.wrapped = sp + self.factory = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CPartitioningFactory]& sp, + object constructor, object options): + cdef PartitioningFactory self = PartitioningFactory.__new__( + PartitioningFactory + ) + self.init(sp) + self.constructor = constructor + self.options = options + return self + + cdef inline shared_ptr[CPartitioningFactory] unwrap(self): + return self.wrapped + + def __reduce__(self): + return self.constructor, self.options + + @property + def type_name(self): + return frombytes(self.factory.type_name()) + + +cdef vector[shared_ptr[CArray]] _partitioning_dictionaries( + Schema schema, dictionaries) except *: + cdef: + vector[shared_ptr[CArray]] c_dictionaries + + dictionaries = dictionaries or {} + + for field in schema: + dictionary = dictionaries.get(field.name) + + if (isinstance(field.type, pa.DictionaryType) and + dictionary is not None): + c_dictionaries.push_back(pyarrow_unwrap_array(dictionary)) + else: + c_dictionaries.push_back( nullptr) + + return c_dictionaries + + +cdef class KeyValuePartitioning(Partitioning): + + cdef: + CKeyValuePartitioning* keyvalue_partitioning + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + Partitioning.init(self, sp) + self.keyvalue_partitioning = sp.get() + self.wrapped = sp + self.partitioning = sp.get() + + def __reduce__(self): + dictionaries = self.dictionaries + if dictionaries: + dictionaries = dict(zip(self.schema.names, dictionaries)) + segment_encoding = _wrap_segment_encoding( + deref(self.keyvalue_partitioning).segment_encoding() + ) + return self.__class__, (self.schema, dictionaries, segment_encoding) + + @property + def dictionaries(self): + """ + The unique values for each partition field, if available. + + Those values are only available if the Partitioning object was + created through dataset discovery from a PartitioningFactory, or + if the dictionaries were manually specified in the constructor. + If no dictionary field is available, this returns an empty list. + """ + cdef vector[shared_ptr[CArray]] c_arrays + c_arrays = self.keyvalue_partitioning.dictionaries() + res = [] + for arr in c_arrays: + if arr.get() == nullptr: + # Partitioning object has not been created through + # inspected Factory + res.append(None) + else: + res.append(pyarrow_wrap_array(arr)) + return res + + +def _constructor_directory_partitioning_factory(*args): + return DirectoryPartitioning.discover(*args) + + +cdef class DirectoryPartitioning(KeyValuePartitioning): + """ + A Partitioning based on a specified Schema. + + The DirectoryPartitioning expects one segment in the file path for each + field in the schema (all fields are required to be present). + For example given schema the path "/2009/11" would + be parsed to ("year"_ == 2009 and "month"_ == 11). + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + DirectoryPartitioning + + Examples + -------- + >>> from pyarrow.dataset import DirectoryPartitioning + >>> partitioning = DirectoryPartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("/2009/11/")) + ((year == 2009) and (month == 11)) + """ + + cdef: + CDirectoryPartitioning* directory_partitioning + + def __init__(self, Schema schema not None, dictionaries=None, + segment_encoding="uri"): + cdef: + shared_ptr[CDirectoryPartitioning] c_partitioning + CKeyValuePartitioningOptions c_options + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + c_partitioning = make_shared[CDirectoryPartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.directory_partitioning = sp.get() + + @staticmethod + def discover(field_names=None, infer_dictionary=False, + max_partition_dictionary_size=0, + schema=None, segment_encoding="uri"): + """ + Discover a DirectoryPartitioning. + + Parameters + ---------- + field_names : list of str + The names to associate with the values from the subdirectory names. + If schema is given, will be populated from the schema. + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain types. This can be more efficient + when materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + max_partition_dictionary_size : int, default 0 + Synonymous with infer_dictionary for backwards compatibility with + 1.0: setting this to -1 or None is equivalent to passing + infer_dictionary=True. + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CPartitioningFactoryOptions c_options + vector[c_string] c_field_names + + if max_partition_dictionary_size in {-1, None}: + infer_dictionary = True + elif max_partition_dictionary_size != 0: + raise NotImplementedError("max_partition_dictionary_size must be " + "0, -1, or None") + + if infer_dictionary: + c_options.infer_dictionary = True + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + c_field_names = [tobytes(f.name) for f in schema] + elif not field_names: + raise ValueError( + "Neither field_names nor schema was passed; " + "cannot infer field_names") + else: + c_field_names = [tobytes(s) for s in field_names] + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CDirectoryPartitioning.MakeFactory(c_field_names, c_options), + _constructor_directory_partitioning_factory, + (field_names, infer_dictionary, max_partition_dictionary_size, + schema, segment_encoding) + ) + + +def _constructor_hive_partitioning_factory(*args): + return HivePartitioning.discover(*args) + + +cdef class HivePartitioning(KeyValuePartitioning): + """ + A Partitioning for "/$key=$value/" nested directories as found in + Apache Hive. + + Multi-level, directory based partitioning scheme originating from + Apache Hive with all data files stored in the leaf directories. Data is + partitioned by static values of a particular column in the schema. + Partition keys are represented in the form $key=$value in directory names. + Field order is ignored, as are missing or unrecognized field names. + + For example, given schema, a possible + path would be "/year=2009/month=11/day=15". + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + null_fallback : str, default "__HIVE_DEFAULT_PARTITION__" + If any field is None then this fallback will be used as a label + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + HivePartitioning + + Examples + -------- + >>> from pyarrow.dataset import HivePartitioning + >>> partitioning = HivePartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("/year=2009/month=11/")) + ((year == 2009) and (month == 11)) + + """ + + cdef: + CHivePartitioning* hive_partitioning + + def __init__(self, + Schema schema not None, + dictionaries=None, + null_fallback="__HIVE_DEFAULT_PARTITION__", + segment_encoding="uri"): + + cdef: + shared_ptr[CHivePartitioning] c_partitioning + CHivePartitioningOptions c_options + + c_options.null_fallback = tobytes(null_fallback) + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + c_partitioning = make_shared[CHivePartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.hive_partitioning = sp.get() + + def __reduce__(self): + dictionaries = self.dictionaries + if dictionaries: + dictionaries = dict(zip(self.schema.names, dictionaries)) + segment_encoding = _wrap_segment_encoding( + deref(self.keyvalue_partitioning).segment_encoding() + ) + null_fallback = frombytes(deref(self.hive_partitioning).null_fallback()) + return HivePartitioning, ( + self.schema, dictionaries, null_fallback, segment_encoding + ) + + @staticmethod + def discover(infer_dictionary=False, + max_partition_dictionary_size=0, + null_fallback="__HIVE_DEFAULT_PARTITION__", + schema=None, + segment_encoding="uri"): + """ + Discover a HivePartitioning. + + Parameters + ---------- + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain. This can be more efficient when + materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + max_partition_dictionary_size : int, default 0 + Synonymous with infer_dictionary for backwards compatibility with + 1.0: setting this to -1 or None is equivalent to passing + infer_dictionary=True. + null_fallback : str, default "__HIVE_DEFAULT_PARTITION__" + When inferring a schema for partition fields this value will be + replaced by null. The default is set to __HIVE_DEFAULT_PARTITION__ + for compatibility with Spark + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CHivePartitioningFactoryOptions c_options + + if max_partition_dictionary_size in {-1, None}: + infer_dictionary = True + elif max_partition_dictionary_size != 0: + raise NotImplementedError("max_partition_dictionary_size must be " + "0, -1, or None") + + if infer_dictionary: + c_options.infer_dictionary = True + + c_options.null_fallback = tobytes(null_fallback) + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CHivePartitioning.MakeFactory(c_options), + _constructor_hive_partitioning_factory, + (infer_dictionary, max_partition_dictionary_size, null_fallback, + schema, segment_encoding), + ) + + +def _constructor_filename_partitioning_factory(*args): + return FilenamePartitioning.discover(*args) + + +cdef class FilenamePartitioning(KeyValuePartitioning): + """ + A Partitioning based on a specified Schema. + + The FilenamePartitioning expects one segment in the file name for each + field in the schema (all fields are required to be present) separated + by '_'. For example given schema the name + ``"2009_11_"`` would be parsed to ("year" == 2009 and "month" == 11). + + Parameters + ---------- + schema : Schema + The schema that describes the partitions present in the file path. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + FilenamePartitioning + + Examples + -------- + >>> from pyarrow.dataset import FilenamePartitioning + >>> partitioning = FilenamePartitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())])) + >>> print(partitioning.parse("2009_11_data.parquet")) + ((year == 2009) and (month == 11)) + """ + + cdef: + CFilenamePartitioning* filename_partitioning + + def __init__(self, Schema schema not None, dictionaries=None, + segment_encoding="uri"): + cdef: + shared_ptr[CFilenamePartitioning] c_partitioning + CKeyValuePartitioningOptions c_options + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + c_partitioning = make_shared[CFilenamePartitioning]( + pyarrow_unwrap_schema(schema), + _partitioning_dictionaries(schema, dictionaries), + c_options, + ) + self.init( c_partitioning) + + cdef init(self, const shared_ptr[CPartitioning]& sp): + KeyValuePartitioning.init(self, sp) + self.filename_partitioning = sp.get() + + @staticmethod + def discover(field_names=None, infer_dictionary=False, + schema=None, segment_encoding="uri"): + """ + Discover a FilenamePartitioning. + + Parameters + ---------- + field_names : list of str + The names to associate with the values from the subdirectory names. + If schema is given, will be populated from the schema. + infer_dictionary : bool, default False + When inferring a schema for partition fields, yield dictionary + encoded types instead of plain types. This can be more efficient + when materializing virtual columns, and Expressions parsed by the + finished Partitioning will include dictionaries of all unique + inspected values for each field. + schema : Schema, default None + Use this schema instead of inferring a schema from partition + values. Partition values will be validated against this schema + before accumulation into the Partitioning's dictionary. + segment_encoding : str, default "uri" + After splitting paths into segments, decode the segments. Valid + values are "uri" (URI-decode segments) and "none" (leave as-is). + + Returns + ------- + PartitioningFactory + To be used in the FileSystemFactoryOptions. + """ + cdef: + CPartitioningFactoryOptions c_options + vector[c_string] c_field_names + + if infer_dictionary: + c_options.infer_dictionary = True + + if schema: + c_options.schema = pyarrow_unwrap_schema(schema) + c_field_names = [tobytes(f.name) for f in schema] + elif not field_names: + raise TypeError( + "Neither field_names nor schema was passed; " + "cannot infer field_names") + else: + c_field_names = [tobytes(s) for s in field_names] + + c_options.segment_encoding = _get_segment_encoding(segment_encoding) + + return PartitioningFactory.wrap( + CFilenamePartitioning.MakeFactory(c_field_names, c_options), + _constructor_filename_partitioning_factory, + (field_names, infer_dictionary, schema, segment_encoding) + ) + + +cdef class DatasetFactory(_Weakrefable): + """ + DatasetFactory is used to create a Dataset, inspect the Schema + of the fragments contained in it, and declare a partitioning. + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef init(self, const shared_ptr[CDatasetFactory]& sp): + self.wrapped = sp + self.factory = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CDatasetFactory]& sp): + cdef DatasetFactory self = \ + DatasetFactory.__new__(DatasetFactory) + self.init(sp) + return self + + cdef inline shared_ptr[CDatasetFactory] unwrap(self) nogil: + return self.wrapped + + @property + def root_partition(self): + return Expression.wrap(self.factory.root_partition()) + + @root_partition.setter + def root_partition(self, Expression expr): + check_status(self.factory.SetRootPartition(expr.unwrap())) + + def inspect_schemas(self): + cdef CResult[vector[shared_ptr[CSchema]]] result + cdef CInspectOptions options + with nogil: + result = self.factory.InspectSchemas(options) + + schemas = [] + for s in GetResultValue(result): + schemas.append(pyarrow_wrap_schema(s)) + return schemas + + def inspect(self): + """ + Inspect all data fragments and return a common Schema. + + Returns + ------- + Schema + """ + cdef: + CInspectOptions options + CResult[shared_ptr[CSchema]] result + with nogil: + result = self.factory.Inspect(options) + return pyarrow_wrap_schema(GetResultValue(result)) + + def finish(self, Schema schema=None): + """ + Create a Dataset using the inspected schema or an explicit schema + (if given). + + Parameters + ---------- + schema : Schema, default None + The schema to conform the source to. If None, the inspected + schema is used. + + Returns + ------- + Dataset + """ + cdef: + shared_ptr[CSchema] sp_schema + CResult[shared_ptr[CDataset]] result + + if schema is not None: + sp_schema = pyarrow_unwrap_schema(schema) + with nogil: + result = self.factory.FinishWithSchema(sp_schema) + else: + with nogil: + result = self.factory.Finish() + + return Dataset.wrap(GetResultValue(result)) + + +cdef class FileSystemFactoryOptions(_Weakrefable): + """ + Influences the discovery of filesystem paths. + + Parameters + ---------- + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + partitioning : Partitioning/PartitioningFactory, optional + Apply the Partitioning to every discovered Fragment. See Partitioning or + PartitioningFactory documentation. + exclude_invalid_files : bool, optional (default True) + If True, invalid files will be excluded (file format specific check). + This will incur IO for each files in a serial and single threaded + fashion. Disabling this feature will skip the IO, but unsupported + files may be present in the Dataset (resulting in an error at scan + time). + selector_ignore_prefixes : list, optional + When discovering from a Selector (and not from an explicit file list), + ignore files and directories matching any of these prefixes. + By default this is ['.', '_']. + """ + + cdef: + CFileSystemFactoryOptions options + + __slots__ = () # avoid mistakingly creating attributes + + def __init__(self, partition_base_dir=None, partitioning=None, + exclude_invalid_files=None, + list selector_ignore_prefixes=None): + if isinstance(partitioning, PartitioningFactory): + self.partitioning_factory = partitioning + elif isinstance(partitioning, Partitioning): + self.partitioning = partitioning + + if partition_base_dir is not None: + self.partition_base_dir = partition_base_dir + if exclude_invalid_files is not None: + self.exclude_invalid_files = exclude_invalid_files + if selector_ignore_prefixes is not None: + self.selector_ignore_prefixes = selector_ignore_prefixes + + cdef inline CFileSystemFactoryOptions unwrap(self): + return self.options + + @property + def partitioning(self): + """Partitioning to apply to discovered files. + + NOTE: setting this property will overwrite partitioning_factory. + """ + c_partitioning = self.options.partitioning.partitioning() + if c_partitioning.get() == nullptr: + return None + return Partitioning.wrap(c_partitioning) + + @partitioning.setter + def partitioning(self, Partitioning value): + self.options.partitioning = ( value).unwrap() + + @property + def partitioning_factory(self): + """PartitioningFactory to apply to discovered files and + discover a Partitioning. + + NOTE: setting this property will overwrite partitioning. + """ + c_factory = self.options.partitioning.factory() + if c_factory.get() == nullptr: + return None + return PartitioningFactory.wrap(c_factory, None, None) + + @partitioning_factory.setter + def partitioning_factory(self, PartitioningFactory value): + self.options.partitioning = ( value).unwrap() + + @property + def partition_base_dir(self): + """ + Base directory to strip paths before applying the partitioning. + """ + return frombytes(self.options.partition_base_dir) + + @partition_base_dir.setter + def partition_base_dir(self, value): + self.options.partition_base_dir = tobytes(value) + + @property + def exclude_invalid_files(self): + """Whether to exclude invalid files.""" + return self.options.exclude_invalid_files + + @exclude_invalid_files.setter + def exclude_invalid_files(self, bint value): + self.options.exclude_invalid_files = value + + @property + def selector_ignore_prefixes(self): + """ + List of prefixes. Files matching one of those prefixes will be + ignored by the discovery process. + """ + return [frombytes(p) for p in self.options.selector_ignore_prefixes] + + @selector_ignore_prefixes.setter + def selector_ignore_prefixes(self, values): + self.options.selector_ignore_prefixes = [tobytes(v) for v in values] + + +cdef class FileSystemDatasetFactory(DatasetFactory): + """ + Create a DatasetFactory from a list of paths with schema inspection. + + Parameters + ---------- + filesystem : pyarrow.fs.FileSystem + Filesystem to discover. + paths_or_selector : pyarrow.fs.FileSelector or list of path-likes + Either a Selector object or a list of path-like objects. + format : FileFormat + Currently only ParquetFileFormat and IpcFileFormat are supported. + options : FileSystemFactoryOptions, optional + Various flags influencing the discovery of filesystem paths. + """ + + cdef: + CFileSystemDatasetFactory* filesystem_factory + + def __init__(self, FileSystem filesystem not None, paths_or_selector, + FileFormat format not None, + FileSystemFactoryOptions options=None): + cdef: + vector[c_string] paths + CFileSelector c_selector + CResult[shared_ptr[CDatasetFactory]] result + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CFileFormat] c_format + CFileSystemFactoryOptions c_options + + options = options or FileSystemFactoryOptions() + c_options = options.unwrap() + c_filesystem = filesystem.unwrap() + c_format = format.unwrap() + + if isinstance(paths_or_selector, FileSelector): + with nogil: + c_selector = ( paths_or_selector).selector + result = CFileSystemDatasetFactory.MakeFromSelector( + c_filesystem, + c_selector, + c_format, + c_options + ) + elif isinstance(paths_or_selector, (list, tuple)): + paths = [tobytes(s) for s in paths_or_selector] + with nogil: + result = CFileSystemDatasetFactory.MakeFromPaths( + c_filesystem, + paths, + c_format, + c_options + ) + else: + raise TypeError('Must pass either paths or a FileSelector, but ' + 'passed {}'.format(type(paths_or_selector))) + + self.init(GetResultValue(result)) + + cdef init(self, shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.filesystem_factory = sp.get() + + +cdef class UnionDatasetFactory(DatasetFactory): + """ + Provides a way to inspect/discover a Dataset's expected schema before + materialization. + + Parameters + ---------- + factories : list of DatasetFactory + """ + + cdef: + CUnionDatasetFactory* union_factory + + def __init__(self, list factories): + cdef: + DatasetFactory factory + vector[shared_ptr[CDatasetFactory]] c_factories + for factory in factories: + c_factories.push_back(factory.unwrap()) + self.init(GetResultValue(CUnionDatasetFactory.Make(c_factories))) + + cdef init(self, const shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.union_factory = sp.get() + + +cdef class RecordBatchIterator(_Weakrefable): + """An iterator over a sequence of record batches.""" + cdef: + # An object that must be kept alive with the iterator. + object iterator_owner + # Iterator is a non-POD type and Cython uses offsetof, leading + # to a compiler warning unless wrapped like so + SharedPtrNoGIL[CRecordBatchIterator] iterator + + def __init__(self): + _forbid_instantiation(self.__class__, subclasses_instead=False) + + @staticmethod + cdef wrap(object owner, CRecordBatchIterator iterator): + cdef RecordBatchIterator self = \ + RecordBatchIterator.__new__(RecordBatchIterator) + self.iterator_owner = owner + self.iterator = make_shared[CRecordBatchIterator](move(iterator)) + return self + + cdef inline shared_ptr[CRecordBatchIterator] unwrap(self) nogil: + return self.iterator + + def __iter__(self): + return self + + def __next__(self): + cdef shared_ptr[CRecordBatch] record_batch + with nogil: + record_batch = GetResultValue(move(self.iterator.get().Next())) + if record_batch == NULL: + raise StopIteration + return pyarrow_wrap_batch(record_batch) + + +class TaggedRecordBatch(collections.namedtuple( + "TaggedRecordBatch", ["record_batch", "fragment"])): + """ + A combination of a record batch and the fragment it came from. + + Parameters + ---------- + record_batch : RecordBatch + The record batch. + fragment : Fragment + Fragment of the record batch. + """ + + +cdef class TaggedRecordBatchIterator(_Weakrefable): + """An iterator over a sequence of record batches with fragments.""" + cdef: + object iterator_owner + SharedPtrNoGIL[CTaggedRecordBatchIterator] iterator + + def __init__(self): + _forbid_instantiation(self.__class__, subclasses_instead=False) + + @staticmethod + cdef wrap(object owner, CTaggedRecordBatchIterator iterator): + cdef TaggedRecordBatchIterator self = \ + TaggedRecordBatchIterator.__new__(TaggedRecordBatchIterator) + self.iterator_owner = owner + self.iterator = make_shared[CTaggedRecordBatchIterator]( + move(iterator)) + return self + + def __iter__(self): + return self + + def __next__(self): + cdef CTaggedRecordBatch batch + with nogil: + batch = GetResultValue(move(self.iterator.get().Next())) + if batch.record_batch == NULL: + raise StopIteration + return TaggedRecordBatch( + record_batch=pyarrow_wrap_batch(batch.record_batch), + fragment=Fragment.wrap(batch.fragment)) + + +cdef void _populate_builder(const shared_ptr[CScannerBuilder]& ptr, + object columns=None, Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + bint use_threads=True, MemoryPool memory_pool=None, + FragmentScanOptions fragment_scan_options=None)\ + except *: + cdef: + CScannerBuilder *builder + vector[CExpression] c_exprs + + builder = ptr.get() + + check_status(builder.Filter(_bind( + filter, pyarrow_wrap_schema(builder.schema())))) + + if columns is not None: + if isinstance(columns, dict): + for expr in columns.values(): + if not isinstance(expr, Expression): + raise TypeError( + "Expected an Expression for a 'column' dictionary " + "value, got {} instead".format(type(expr)) + ) + c_exprs.push_back(( expr).unwrap()) + + check_status( + builder.Project(c_exprs, [tobytes(c) for c in columns.keys()]) + ) + elif isinstance(columns, list): + check_status(builder.ProjectColumns([tobytes(c) for c in columns])) + else: + raise ValueError( + "Expected a list or a dict for 'columns', " + "got {} instead.".format(type(columns)) + ) + + check_status(builder.BatchSize(batch_size)) + check_status(builder.BatchReadahead(batch_readahead)) + check_status(builder.FragmentReadahead(fragment_readahead)) + check_status(builder.UseThreads(use_threads)) + check_status(builder.Pool(maybe_unbox_memory_pool(memory_pool))) + if fragment_scan_options: + check_status( + builder.FragmentScanOptions(fragment_scan_options.wrapped)) + + +cdef class Scanner(_Weakrefable): + """A materialized scan operation with context and options bound. + + A scanner is the class that glues the scan tasks, data fragments and data + sources together. + """ + + def __init__(self): + _forbid_instantiation(self.__class__) + + cdef void init(self, const shared_ptr[CScanner]& sp): + self.wrapped = sp + self.scanner = sp.get() + + @staticmethod + cdef wrap(const shared_ptr[CScanner]& sp): + cdef Scanner self = Scanner.__new__(Scanner) + self.init(sp) + return self + + cdef inline shared_ptr[CScanner] unwrap(self): + return self.wrapped + + @staticmethod + cdef shared_ptr[CScanOptions] _make_scan_options(Dataset dataset, dict py_scanoptions) except *: + cdef: + shared_ptr[CScannerBuilder] builder = make_shared[CScannerBuilder](dataset.unwrap()) + + py_scanoptions = dataset._scanner_options(py_scanoptions) + + # Need to explicitly expand the arguments as Cython doesn't support + # keyword expansion in cdef functions. + _populate_builder( + builder, + columns=py_scanoptions.get("columns"), + filter=py_scanoptions.get("filter"), + batch_size=py_scanoptions.get("batch_size", _DEFAULT_BATCH_SIZE), + batch_readahead=py_scanoptions.get( + "batch_readahead", _DEFAULT_BATCH_READAHEAD), + fragment_readahead=py_scanoptions.get( + "fragment_readahead", _DEFAULT_FRAGMENT_READAHEAD), + use_threads=py_scanoptions.get("use_threads", True), + memory_pool=py_scanoptions.get("memory_pool"), + fragment_scan_options=py_scanoptions.get("fragment_scan_options")) + + return GetResultValue(deref(builder).GetScanOptions()) + + @staticmethod + def from_dataset(Dataset dataset not None, *, + object columns=None, + Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create Scanner from Dataset, + + Parameters + ---------- + dataset : Dataset + Dataset to scan. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScanOptions] options + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + + options = Scanner._make_scan_options( + dataset, + dict(columns=columns, filter=filter, batch_size=batch_size, + batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, use_threads=use_threads, + memory_pool=memory_pool, fragment_scan_options=fragment_scan_options) + ) + builder = make_shared[CScannerBuilder](dataset.unwrap(), options) + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @staticmethod + def from_fragment(Fragment fragment not None, *, Schema schema=None, + object columns=None, Expression filter=None, + int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create Scanner from Fragment, + + Parameters + ---------- + fragment : Fragment + fragment to scan. + schema : Schema, optional + The schema of the fragment. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScanOptions] options = make_shared[CScanOptions]() + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + + schema = schema or fragment.physical_schema + + builder = make_shared[CScannerBuilder](pyarrow_unwrap_schema(schema), + fragment.unwrap(), options) + _populate_builder(builder, columns=columns, filter=filter, + batch_size=batch_size, batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, + use_threads=use_threads, + memory_pool=memory_pool, + fragment_scan_options=fragment_scan_options) + + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @staticmethod + def from_batches(source, *, Schema schema=None, object columns=None, + Expression filter=None, int batch_size=_DEFAULT_BATCH_SIZE, + int batch_readahead=_DEFAULT_BATCH_READAHEAD, + int fragment_readahead=_DEFAULT_FRAGMENT_READAHEAD, + FragmentScanOptions fragment_scan_options=None, + bint use_threads=True, MemoryPool memory_pool=None): + """ + Create a Scanner from an iterator of batches. + + This creates a scanner which can be used only once. It is + intended to support writing a dataset (which takes a scanner) + from a source which can be read only once (e.g. a + RecordBatchReader or generator). + + Parameters + ---------- + source : Iterator + The iterator of Batches. + schema : Schema + The schema of the batches. + columns : list[str] or dict[str, Expression], default None + The columns to project. This can be a list of column names to + include (order and duplicates will be preserved), or a dictionary + with {new_column_name: expression} values for more advanced + projections. + + The list of columns or expressions may use the special fields + `__batch_index` (the index of the batch within the fragment), + `__fragment_index` (the index of the fragment within the dataset), + `__last_in_fragment` (whether the batch is last in fragment), and + `__filename` (the name of the source file or a description of the + source fragment). + + The columns will be passed down to Datasets and corresponding data + fragments to avoid loading, copying, and deserializing columns + that will not be required further down the compute chain. + By default all of the available columns are projected. Raises + an exception if any of the referenced column names does not exist + in the dataset's Schema. + filter : Expression, default None + Scan will return only the rows matching the filter. + If possible the predicate will be pushed down to exploit the + partition information or internal metadata found in the data + source, e.g. Parquet statistics. Otherwise filters the loaded + RecordBatches before yielding them. + batch_size : int, default 131_072 + The maximum row count for scanned record batches. If scanned + record batches are overflowing memory then this method can be + called to reduce their size. + batch_readahead : int, default 16 + The number of batches to read ahead in a file. This might not work + for all file formats. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_readahead : int, default 4 + The number of files to read ahead. Increasing this number will increase + RAM usage but could also improve IO utilization. + fragment_scan_options : FragmentScanOptions, default None + Options specific to a particular scan and fragment type, which + can change between different scans of the same dataset. + use_threads : bool, default True + If enabled, then maximum parallelism will be used determined by + the number of available CPU cores. + memory_pool : MemoryPool, default None + For memory allocations, if required. If not specified, uses the + default pool. + """ + cdef: + shared_ptr[CScannerBuilder] builder + shared_ptr[CScanner] scanner + RecordBatchReader reader + if isinstance(source, pa.ipc.RecordBatchReader): + if schema: + raise ValueError('Cannot specify a schema when providing ' + 'a RecordBatchReader') + reader = source + elif _is_iterable(source): + if schema is None: + raise ValueError('Must provide schema to construct scanner ' + 'from an iterable') + reader = pa.ipc.RecordBatchReader.from_batches(schema, source) + else: + raise TypeError('Expected a RecordBatchReader or an iterable of ' + 'batches instead of the given type: ' + + type(source).__name__) + builder = CScannerBuilder.FromRecordBatchReader(reader.reader) + _populate_builder(builder, columns=columns, filter=filter, + batch_size=batch_size, batch_readahead=batch_readahead, + fragment_readahead=fragment_readahead, use_threads=use_threads, + memory_pool=memory_pool, + fragment_scan_options=fragment_scan_options) + scanner = GetResultValue(builder.get().Finish()) + return Scanner.wrap(scanner) + + @property + def dataset_schema(self): + """The schema with which batches will be read from fragments.""" + return pyarrow_wrap_schema( + self.scanner.options().get().dataset_schema) + + @property + def projected_schema(self): + """ + The materialized schema of the data, accounting for projections. + + This is the schema of any data returned from the scanner. + """ + return pyarrow_wrap_schema( + self.scanner.options().get().projected_schema) + + def to_batches(self): + """ + Consume a Scanner in record batches. + + Returns + ------- + record_batches : iterator of RecordBatch + """ + def _iterator(batch_iter): + for batch in batch_iter: + yield batch.record_batch + # Don't make ourselves a generator so errors are raised immediately + return _iterator(self.scan_batches()) + + def scan_batches(self): + """ + Consume a Scanner in record batches with corresponding fragments. + + Returns + ------- + record_batches : iterator of TaggedRecordBatch + """ + cdef CTaggedRecordBatchIterator iterator + with nogil: + iterator = move(GetResultValue(self.scanner.ScanBatches())) + # Don't make ourselves a generator so errors are raised immediately + return TaggedRecordBatchIterator.wrap(self, move(iterator)) + + def to_table(self): + """ + Convert a Scanner into a Table. + + Use this convenience utility with care. This will serially materialize + the Scan result in memory before creating the Table. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + + with nogil: + result = self.scanner.ToTable() + + return pyarrow_wrap_table(GetResultValue(result)) + + def take(self, object indices): + """ + Select rows of data by index. + + Will only consume as many batches of the underlying dataset as + needed. Otherwise, this is equivalent to + ``to_table().take(indices)``. + + Parameters + ---------- + indices : Array or array-like + indices of rows to select in the dataset. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + cdef shared_ptr[CArray] c_indices + + if not isinstance(indices, pa.Array): + indices = pa.array(indices) + c_indices = pyarrow_unwrap_array(indices) + + with nogil: + result = self.scanner.TakeRows(deref(c_indices)) + return pyarrow_wrap_table(GetResultValue(result)) + + def head(self, int num_rows): + """ + Load the first N rows of the dataset. + + Parameters + ---------- + num_rows : int + The number of rows to load. + + Returns + ------- + Table + """ + cdef CResult[shared_ptr[CTable]] result + with nogil: + result = self.scanner.Head(num_rows) + return pyarrow_wrap_table(GetResultValue(result)) + + def count_rows(self): + """ + Count rows matching the scanner filter. + + Returns + ------- + count : int + """ + cdef CResult[int64_t] result + with nogil: + result = self.scanner.CountRows() + return GetResultValue(result) + + def to_reader(self): + """Consume this scanner as a RecordBatchReader. + + Returns + ------- + RecordBatchReader + """ + cdef RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = GetResultValue(self.scanner.ToRecordBatchReader()) + return reader + + +def get_partition_keys(Expression partition_expression): + """ + Extract partition keys (equality constraints between a field and a scalar) + from an expression as a dict mapping the field's name to its value. + + NB: All expressions yielded by a HivePartitioning or DirectoryPartitioning + will be conjunctions of equality conditions and are accessible through this + function. Other subexpressions will be ignored. + + Parameters + ---------- + partition_expression : pyarrow.dataset.Expression + + Returns + ------- + dict + + Examples + -------- + + For example, an expression of + + is converted to {'part': 'A', 'year': 2016} + """ + cdef: + CExpression expr = partition_expression.unwrap() + pair[CFieldRef, CDatum] ref_val + + out = {} + for ref_val in GetResultValue(CExtractKnownFieldValues(expr)).map: + assert ref_val.first.name() != nullptr + assert ref_val.second.kind() == DatumType_SCALAR + val = pyarrow_wrap_scalar(ref_val.second.scalar()) + out[frombytes(deref(ref_val.first.name()))] = val.as_py() + return out + + +cdef class WrittenFile(_Weakrefable): + """ + Metadata information about files written as + part of a dataset write operation + + Parameters + ---------- + path : str + Path to the file. + metadata : pyarrow.parquet.FileMetaData, optional + For Parquet files, the Parquet file metadata. + size : int + The size of the file in bytes. + """ + + def __init__(self, path, metadata, size): + self.path = path + self.metadata = metadata + self.size = size + + +cdef void _filesystemdataset_write_visitor( + dict visit_args, + CFileWriter* file_writer): + cdef: + str path + str base_dir + WrittenFile written_file + FileFormat file_format + + path = frombytes(deref(file_writer).destination().path) + base_dir = frombytes(visit_args['base_dir']) + file_format = FileFormat.wrap(file_writer.format()) + written_file = file_format._finish_write(path, base_dir, file_writer) + visit_args['file_visitor'](written_file) + + +def _filesystemdataset_write( + Scanner data not None, + object base_dir not None, + str basename_template not None, + FileSystem filesystem not None, + Partitioning partitioning not None, + FileWriteOptions file_options not None, + int max_partitions, + object file_visitor, + str existing_data_behavior not None, + int max_open_files, + int max_rows_per_file, + int min_rows_per_group, + int max_rows_per_group, + bool create_dir +): + """ + CFileSystemDataset.Write wrapper + """ + cdef: + CFileSystemDatasetWriteOptions c_options + shared_ptr[CScanner] c_scanner + dict visit_args + + c_options.file_write_options = file_options.unwrap() + c_options.filesystem = filesystem.unwrap() + c_options.base_dir = tobytes(_stringify_path(base_dir)) + c_options.partitioning = partitioning.unwrap() + c_options.max_partitions = max_partitions + c_options.max_open_files = max_open_files + c_options.max_rows_per_file = max_rows_per_file + c_options.max_rows_per_group = max_rows_per_group + c_options.min_rows_per_group = min_rows_per_group + c_options.basename_template = tobytes(basename_template) + if existing_data_behavior == 'error': + c_options.existing_data_behavior = ExistingDataBehavior_ERROR + elif existing_data_behavior == 'overwrite_or_ignore': + c_options.existing_data_behavior =\ + ExistingDataBehavior_OVERWRITE_OR_IGNORE + elif existing_data_behavior == 'delete_matching': + c_options.existing_data_behavior = ExistingDataBehavior_DELETE_MATCHING + else: + raise ValueError( + ("existing_data_behavior must be one of 'error', ", + "'overwrite_or_ignore' or 'delete_matching'") + ) + c_options.create_dir = create_dir + + if file_visitor is not None: + visit_args = {'base_dir': c_options.base_dir, + 'file_visitor': file_visitor} + # Need to use post_finish because parquet metadata is not available + # until after Finish has been called + c_options.writer_post_finish = BindFunction[cb_writer_finish_internal]( + &_filesystemdataset_write_visitor, visit_args) + + c_scanner = data.unwrap() + with nogil: + check_status(CFileSystemDataset.Write(c_options, c_scanner)) + + +cdef class _ScanNodeOptions(ExecNodeOptions): + + def _set_options(self, Dataset dataset, dict scan_options): + cdef: + shared_ptr[CScanOptions] c_scan_options + + c_scan_options = Scanner._make_scan_options(dataset, scan_options) + + self.wrapped.reset( + new CScanNodeOptions(dataset.unwrap(), c_scan_options) + ) + + +class ScanNodeOptions(_ScanNodeOptions): + """ + A Source node which yields batches from a Dataset scan. + + This is the option class for the "scan" node factory. + + This node is capable of applying pushdown projections or filters + to the file readers which reduce the amount of data that needs to + be read (if supported by the file format). But note that this does not + construct associated filter or project nodes to perform the final + filtering or projection. Rather, you may supply the same filter + expression or projection to the scan node that you also supply + to the filter or project node. + + Yielded batches will be augmented with fragment/batch indices to + enable stable ordering for simple ExecPlans. + + Parameters + ---------- + dataset : pyarrow.dataset.Dataset + The table which acts as the data source. + **kwargs : dict, optional + Scan options. See `Scanner.from_dataset` for possible arguments. + """ + + def __init__(self, Dataset dataset, **kwargs): + self._set_options(dataset, kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..de7714c1642e1984f16015cff416af9c7dee9cbc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx new file mode 100644 index 0000000000000000000000000000000000000000..a8cce3362225adcfd7e70b51e521f26d43d9a102 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_orc.pyx @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for ORC file format.""" + +from pyarrow.lib cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_dataset cimport * + +from pyarrow._dataset cimport FileFormat + + +cdef class OrcFileFormat(FileFormat): + + def __init__(self): + self.init(shared_ptr[CFileFormat](new COrcFileFormat())) + + def equals(self, OrcFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.OrcFileFormat + + Returns + ------- + True + """ + return True + + @property + def default_extname(self): + return "orc" + + def __reduce__(self): + return OrcFileFormat, tuple() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..65392f4fccae8ae5084e1ac87ab13812b2ec1501 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..d5bc172d324d533dc642bbb4a3f87439779a16af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pxd @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for Parquet file format.""" + +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libarrow_dataset_parquet cimport * + +from pyarrow._dataset cimport FragmentScanOptions, FileWriteOptions + + +cdef class ParquetFragmentScanOptions(FragmentScanOptions): + cdef: + CParquetFragmentScanOptions* parquet_options + object _parquet_decryption_config + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp) + cdef CReaderProperties* reader_properties(self) + cdef ArrowReaderProperties* arrow_reader_properties(self) + + +cdef class ParquetFileWriteOptions(FileWriteOptions): + + cdef: + CParquetFileWriteOptions* parquet_options + object _properties diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx new file mode 100644 index 0000000000000000000000000000000000000000..58ef6145cf7d1be014db696d21c4ccb05bb6b3b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset_parquet.pyx @@ -0,0 +1,1019 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +"""Dataset support for Parquet file format.""" + +from cython cimport binding +from cython.operator cimport dereference as deref + +import os +import warnings + +import pyarrow as pa +from pyarrow.lib cimport * +from pyarrow.lib import frombytes, tobytes +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libarrow_dataset_parquet cimport * +from pyarrow._fs cimport FileSystem + +from pyarrow._compute cimport Expression, _bind +from pyarrow._dataset cimport ( + _make_file_source, + DatasetFactory, + FileFormat, + FileFragment, + FileWriteOptions, + Fragment, + FragmentScanOptions, + CacheOptions, + Partitioning, + PartitioningFactory, + WrittenFile +) + +from pyarrow._parquet cimport ( + _create_writer_properties, _create_arrow_writer_properties, + FileMetaData, +) + + +try: + from pyarrow._dataset_parquet_encryption import ( + set_encryption_config, set_decryption_config + ) + parquet_encryption_enabled = True +except ImportError: + parquet_encryption_enabled = False + + +cdef Expression _true = Expression._scalar(True) + +ctypedef CParquetFileWriter* _CParquetFileWriterPtr + + +cdef class ParquetFileFormat(FileFormat): + """ + FileFormat for Parquet + + Parameters + ---------- + read_options : ParquetReadOptions + Read options for the file. + default_fragment_scan_options : ParquetFragmentScanOptions + Scan Options for the file. + **kwargs : dict + Additional options for read option or scan option + """ + + cdef: + CParquetFileFormat* parquet_format + + def __init__(self, read_options=None, + default_fragment_scan_options=None, + **kwargs): + cdef: + shared_ptr[CParquetFileFormat] wrapped + CParquetFileFormatReaderOptions* options + + # Read/scan options + read_options_args = {option: kwargs[option] for option in kwargs + if option in _PARQUET_READ_OPTIONS} + scan_args = {option: kwargs[option] for option in kwargs + if option not in _PARQUET_READ_OPTIONS} + if read_options and read_options_args: + duplicates = ', '.join(sorted(read_options_args)) + raise ValueError(f'If `read_options` is given, ' + f'cannot specify {duplicates}') + if default_fragment_scan_options and scan_args: + duplicates = ', '.join(sorted(scan_args)) + raise ValueError(f'If `default_fragment_scan_options` is given, ' + f'cannot specify {duplicates}') + + if read_options is None: + read_options = ParquetReadOptions(**read_options_args) + elif isinstance(read_options, dict): + # For backwards compatibility + duplicates = [] + for option, value in read_options.items(): + if option in _PARQUET_READ_OPTIONS: + read_options_args[option] = value + else: + duplicates.append(option) + scan_args[option] = value + if duplicates: + duplicates = ", ".join(duplicates) + warnings.warn(f'The scan options {duplicates} should be ' + 'specified directly as keyword arguments') + read_options = ParquetReadOptions(**read_options_args) + elif not isinstance(read_options, ParquetReadOptions): + raise TypeError('`read_options` must be either a dictionary or an ' + 'instance of ParquetReadOptions') + + if default_fragment_scan_options is None: + default_fragment_scan_options = ParquetFragmentScanOptions( + **scan_args) + elif isinstance(default_fragment_scan_options, dict): + default_fragment_scan_options = ParquetFragmentScanOptions( + **default_fragment_scan_options) + elif not isinstance(default_fragment_scan_options, + ParquetFragmentScanOptions): + raise TypeError('`default_fragment_scan_options` must be either a ' + 'dictionary or an instance of ' + 'ParquetFragmentScanOptions') + + wrapped = make_shared[CParquetFileFormat]() + + options = &(wrapped.get().reader_options) + if read_options.dictionary_columns is not None: + for column in read_options.dictionary_columns: + options.dict_columns.insert(tobytes(column)) + options.coerce_int96_timestamp_unit = \ + read_options._coerce_int96_timestamp_unit + + self.init( wrapped) + self.default_fragment_scan_options = default_fragment_scan_options + + cdef void init(self, const shared_ptr[CFileFormat]& sp): + FileFormat.init(self, sp) + self.parquet_format = sp.get() + + cdef WrittenFile _finish_write(self, path, base_dir, + CFileWriter* file_writer): + cdef: + FileMetaData parquet_metadata + CParquetFileWriter* parquet_file_writer + + parquet_metadata = None + parquet_file_writer = dynamic_cast[_CParquetFileWriterPtr](file_writer) + with nogil: + metadata = deref( + deref(parquet_file_writer).parquet_writer()).metadata() + if metadata: + parquet_metadata = FileMetaData() + parquet_metadata.init(metadata) + parquet_metadata.set_file_path(os.path.relpath(path, base_dir)) + + size = GetResultValue(file_writer.GetBytesWritten()) + + return WrittenFile(path, parquet_metadata, size) + + @property + def read_options(self): + cdef CParquetFileFormatReaderOptions* options + options = &self.parquet_format.reader_options + parquet_read_options = ParquetReadOptions( + dictionary_columns={frombytes(col) + for col in options.dict_columns}, + ) + # Read options getter/setter works with strings so setting + # the private property which uses the C Type + parquet_read_options._coerce_int96_timestamp_unit = \ + options.coerce_int96_timestamp_unit + return parquet_read_options + + def make_write_options(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + + Returns + ------- + pyarrow.dataset.FileWriteOptions + """ + opts = FileFormat.make_write_options(self) + ( opts).update(**kwargs) + return opts + + cdef _set_default_fragment_scan_options(self, FragmentScanOptions options): + if options.type_name == 'parquet': + self.parquet_format.default_fragment_scan_options = options.wrapped + else: + super()._set_default_fragment_scan_options(options) + + def equals(self, ParquetFileFormat other): + """ + Parameters + ---------- + other : pyarrow.dataset.ParquetFileFormat + + Returns + ------- + bool + """ + return ( + self.read_options.equals(other.read_options) and + self.default_fragment_scan_options == + other.default_fragment_scan_options + ) + + @property + def default_extname(self): + return "parquet" + + def __reduce__(self): + return ParquetFileFormat, (self.read_options, + self.default_fragment_scan_options) + + def __repr__(self): + return f"" + + def make_fragment(self, file, filesystem=None, + Expression partition_expression=None, row_groups=None, *, file_size=None): + """ + Make a FileFragment from a given file. + + Parameters + ---------- + file : file-like object, path-like or str + The file or file path to make a fragment from. + filesystem : Filesystem, optional + If `filesystem` is given, `file` must be a string and specifies + the path of the file to read from the filesystem. + partition_expression : Expression, optional + An expression that is guaranteed true for all rows in the fragment. Allows + fragment to be potentially skipped while scanning with a filter. + row_groups : Iterable, optional + The indices of the row groups to include + file_size : int, optional + The size of the file in bytes. Can improve performance with high-latency filesystems + when file size needs to be known before reading. + + Returns + ------- + fragment : Fragment + The file fragment + """ + cdef: + vector[int] c_row_groups + if partition_expression is None: + partition_expression = _true + if row_groups is None: + return super().make_fragment(file, filesystem, + partition_expression, file_size=file_size) + + c_source = _make_file_source(file, filesystem, file_size) + c_row_groups = [ row_group for row_group in set(row_groups)] + + c_fragment = GetResultValue( + self.parquet_format.MakeFragment(move(c_source), + partition_expression.unwrap(), + nullptr, + move(c_row_groups))) + return Fragment.wrap(move(c_fragment)) + + +class RowGroupInfo: + """ + A wrapper class for RowGroup information + + Parameters + ---------- + id : integer + The group ID. + metadata : FileMetaData + The rowgroup metadata. + schema : Schema + Schema of the rows. + """ + + def __init__(self, id, metadata, schema): + self.id = id + self.metadata = metadata + self.schema = schema + + @property + def num_rows(self): + return self.metadata.num_rows + + @property + def total_byte_size(self): + return self.metadata.total_byte_size + + @property + def statistics(self): + def name_stats(i): + col = self.metadata.column(i) + + stats = col.statistics + if stats is None or not stats.has_min_max: + return None, None + + name = col.path_in_schema + field_index = self.schema.get_field_index(name) + if field_index < 0: + return None, None + + typ = self.schema.field(field_index).type + return col.path_in_schema, { + 'min': pa.scalar(stats.min, type=typ).as_py(), + 'max': pa.scalar(stats.max, type=typ).as_py() + } + + return { + name: stats for name, stats + in map(name_stats, range(self.metadata.num_columns)) + if stats is not None + } + + def __repr__(self): + return "RowGroupInfo({})".format(self.id) + + def __eq__(self, other): + if isinstance(other, int): + return self.id == other + if not isinstance(other, RowGroupInfo): + return False + return self.id == other.id + + +cdef class ParquetFileFragment(FileFragment): + """A Fragment representing a parquet file.""" + + cdef: + CParquetFileFragment* parquet_file_fragment + + cdef void init(self, const shared_ptr[CFragment]& sp): + FileFragment.init(self, sp) + self.parquet_file_fragment = sp.get() + + def __reduce__(self): + buffer = self.buffer + # parquet_file_fragment.row_groups() is empty if the metadata + # information of the file is not yet populated + if not bool(self.parquet_file_fragment.row_groups()): + row_groups = None + else: + row_groups = [row_group.id for row_group in self.row_groups] + + return self.format.make_fragment, ( + self.path if buffer is None else buffer, + self.filesystem, + self.partition_expression, + row_groups + ) + + def ensure_complete_metadata(self): + """ + Ensure that all metadata (statistics, physical schema, ...) have + been read and cached in this fragment. + """ + with nogil: + check_status(self.parquet_file_fragment.EnsureCompleteMetadata()) + + @property + def row_groups(self): + metadata = self.metadata + cdef vector[int] row_groups = self.parquet_file_fragment.row_groups() + return [RowGroupInfo(i, metadata.row_group(i), self.physical_schema) + for i in row_groups] + + @property + def metadata(self): + self.ensure_complete_metadata() + cdef FileMetaData metadata = FileMetaData() + metadata.init(self.parquet_file_fragment.metadata()) + return metadata + + @property + def num_row_groups(self): + """ + Return the number of row groups viewed by this fragment (not the + number of row groups in the origin file). + """ + self.ensure_complete_metadata() + return self.parquet_file_fragment.row_groups().size() + + def split_by_row_group(self, Expression filter=None, + Schema schema=None): + """ + Split the fragment into multiple fragments. + + Yield a Fragment wrapping each row group in this ParquetFileFragment. + Row groups will be excluded whose metadata contradicts the optional + filter. + + Parameters + ---------- + filter : Expression, default None + Only include the row groups which satisfy this predicate (using + the Parquet RowGroup statistics). + schema : Schema, default None + Schema to use when filtering row groups. Defaults to the + Fragment's physical schema + + Returns + ------- + A list of Fragments + """ + cdef: + vector[shared_ptr[CFragment]] c_fragments + CExpression c_filter + shared_ptr[CFragment] c_fragment + + schema = schema or self.physical_schema + c_filter = _bind(filter, schema) + with nogil: + c_fragments = move(GetResultValue( + self.parquet_file_fragment.SplitByRowGroup(move(c_filter)))) + + return [Fragment.wrap(c_fragment) for c_fragment in c_fragments] + + def subset(self, Expression filter=None, Schema schema=None, + object row_group_ids=None): + """ + Create a subset of the fragment (viewing a subset of the row groups). + + Subset can be specified by either a filter predicate (with optional + schema) or by a list of row group IDs. Note that when using a filter, + the resulting fragment can be empty (viewing no row groups). + + Parameters + ---------- + filter : Expression, default None + Only include the row groups which satisfy this predicate (using + the Parquet RowGroup statistics). + schema : Schema, default None + Schema to use when filtering row groups. Defaults to the + Fragment's physical schema + row_group_ids : list of ints + The row group IDs to include in the subset. Can only be specified + if `filter` is None. + + Returns + ------- + ParquetFileFragment + """ + cdef: + CExpression c_filter + vector[int] c_row_group_ids + shared_ptr[CFragment] c_fragment + + if filter is not None and row_group_ids is not None: + raise ValueError( + "Cannot specify both 'filter' and 'row_group_ids'." + ) + + if filter is not None: + schema = schema or self.physical_schema + c_filter = _bind(filter, schema) + with nogil: + c_fragment = move(GetResultValue( + self.parquet_file_fragment.SubsetWithFilter( + move(c_filter)))) + elif row_group_ids is not None: + c_row_group_ids = [ + row_group for row_group in sorted(set(row_group_ids)) + ] + with nogil: + c_fragment = move(GetResultValue( + self.parquet_file_fragment.SubsetWithIds( + move(c_row_group_ids)))) + else: + raise ValueError( + "Need to specify one of 'filter' or 'row_group_ids'" + ) + + return Fragment.wrap(c_fragment) + + +cdef class ParquetReadOptions(_Weakrefable): + """ + Parquet format specific options for reading. + + Parameters + ---------- + dictionary_columns : list of string, default None + Names of columns which should be dictionary encoded as + they are read + coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular + resolution (e.g. 'ms'). Setting to None is equivalent to 'ns' + and therefore INT96 timestamps will be inferred as timestamps + in nanoseconds + """ + + cdef public: + set dictionary_columns + TimeUnit _coerce_int96_timestamp_unit + + # Also see _PARQUET_READ_OPTIONS + def __init__(self, dictionary_columns=None, + coerce_int96_timestamp_unit=None): + self.dictionary_columns = set(dictionary_columns or set()) + self.coerce_int96_timestamp_unit = coerce_int96_timestamp_unit + + @property + def coerce_int96_timestamp_unit(self): + return timeunit_to_string(self._coerce_int96_timestamp_unit) + + @coerce_int96_timestamp_unit.setter + def coerce_int96_timestamp_unit(self, unit): + if unit is not None: + self._coerce_int96_timestamp_unit = string_to_timeunit(unit) + else: + self._coerce_int96_timestamp_unit = TimeUnit_NANO + + def equals(self, ParquetReadOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.ParquetReadOptions + + Returns + ------- + bool + """ + return (self.dictionary_columns == other.dictionary_columns and + self.coerce_int96_timestamp_unit == + other.coerce_int96_timestamp_unit) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + def __repr__(self): + return ( + f"" + ) + + +cdef class ParquetFileWriteOptions(FileWriteOptions): + + def update(self, **kwargs): + """ + Parameters + ---------- + **kwargs : dict + """ + arrow_fields = { + "use_deprecated_int96_timestamps", + "coerce_timestamps", + "allow_truncated_timestamps", + "use_compliant_nested_type", + } + + setters = set() + for name, value in kwargs.items(): + if name not in self._properties: + raise TypeError("unexpected parquet write option: " + name) + self._properties[name] = value + if name in arrow_fields: + setters.add(self._set_arrow_properties) + elif name == "encryption_config" and value is not None: + setters.add(self._set_encryption_config) + else: + setters.add(self._set_properties) + + for setter in setters: + setter() + + def _set_properties(self): + cdef CParquetFileWriteOptions* opts = self.parquet_options + + opts.writer_properties = _create_writer_properties( + use_dictionary=self._properties["use_dictionary"], + compression=self._properties["compression"], + version=self._properties["version"], + write_statistics=self._properties["write_statistics"], + data_page_size=self._properties["data_page_size"], + compression_level=self._properties["compression_level"], + use_byte_stream_split=( + self._properties["use_byte_stream_split"] + ), + column_encoding=self._properties["column_encoding"], + data_page_version=self._properties["data_page_version"], + encryption_properties=self._properties["encryption_properties"], + write_batch_size=self._properties["write_batch_size"], + dictionary_pagesize_limit=self._properties["dictionary_pagesize_limit"], + write_page_index=self._properties["write_page_index"], + write_page_checksum=self._properties["write_page_checksum"], + sorting_columns=self._properties["sorting_columns"], + ) + + def _set_arrow_properties(self): + cdef CParquetFileWriteOptions* opts = self.parquet_options + + opts.arrow_writer_properties = _create_arrow_writer_properties( + use_deprecated_int96_timestamps=( + self._properties["use_deprecated_int96_timestamps"] + ), + coerce_timestamps=self._properties["coerce_timestamps"], + allow_truncated_timestamps=( + self._properties["allow_truncated_timestamps"] + ), + writer_engine_version="V2", + use_compliant_nested_type=( + self._properties["use_compliant_nested_type"] + ) + ) + + def _set_encryption_config(self): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Encryption is not enabled in your installation of pyarrow, but an " + "encryption_config was provided." + ) + set_encryption_config(self, self._properties["encryption_config"]) + + cdef void init(self, const shared_ptr[CFileWriteOptions]& sp): + FileWriteOptions.init(self, sp) + self.parquet_options = sp.get() + self._properties = dict( + use_dictionary=True, + compression="snappy", + version="2.6", + write_statistics=None, + data_page_size=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + data_page_version="1.0", + use_deprecated_int96_timestamps=False, + coerce_timestamps=None, + allow_truncated_timestamps=False, + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + write_page_index=False, + encryption_config=None, + write_page_checksum=False, + sorting_columns=None, + ) + + self._set_properties() + self._set_arrow_properties() + + def __repr__(self): + return "".format( + " ".join([f"{key}={value}" for key, value in self._properties.items()]) + ) + + +cdef set _PARQUET_READ_OPTIONS = { + 'dictionary_columns', 'coerce_int96_timestamp_unit' +} + + +cdef class ParquetFragmentScanOptions(FragmentScanOptions): + """ + Scan-specific options for Parquet fragments. + + Parameters + ---------- + use_buffered_stream : bool, default False + Read files through buffered input streams rather than loading entire + row groups at once. This may be enabled to reduce memory overhead. + Disabled by default. + buffer_size : int, default 8192 + Size of buffered stream, if enabled. Default is 8KB. + pre_buffer : bool, default True + If enabled, pre-buffer the raw Parquet data instead of issuing one + read per column chunk. This can improve performance on high-latency + filesystems (e.g. S3, GCS) by coalescing and issuing file reads in + parallel using a background I/O thread pool. + Set to False if you want to prioritize minimal memory usage + over maximum speed. + cache_options : pyarrow.CacheOptions, default None + Cache options used when pre_buffer is enabled. The default values should + be good for most use cases. You may want to adjust these for example if + you have exceptionally high latency to the file system. + thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + decryption_config : pyarrow.dataset.ParquetDecryptionConfig, default None + If not None, use the provided ParquetDecryptionConfig to decrypt the + Parquet file. + page_checksum_verification : bool, default False + If True, verify the page checksum for each page read from the file. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, bint use_buffered_stream=False, + buffer_size=8192, + bint pre_buffer=True, + cache_options=None, + thrift_string_size_limit=None, + thrift_container_size_limit=None, + decryption_config=None, + bint page_checksum_verification=False): + self.init(shared_ptr[CFragmentScanOptions]( + new CParquetFragmentScanOptions())) + self.use_buffered_stream = use_buffered_stream + self.buffer_size = buffer_size + self.pre_buffer = pre_buffer + if cache_options is not None: + self.cache_options = cache_options + if thrift_string_size_limit is not None: + self.thrift_string_size_limit = thrift_string_size_limit + if thrift_container_size_limit is not None: + self.thrift_container_size_limit = thrift_container_size_limit + if decryption_config is not None: + self.parquet_decryption_config = decryption_config + self.page_checksum_verification = page_checksum_verification + + cdef void init(self, const shared_ptr[CFragmentScanOptions]& sp): + FragmentScanOptions.init(self, sp) + self.parquet_options = sp.get() + + cdef CReaderProperties* reader_properties(self): + return self.parquet_options.reader_properties.get() + + cdef ArrowReaderProperties* arrow_reader_properties(self): + return self.parquet_options.arrow_reader_properties.get() + + @property + def use_buffered_stream(self): + return self.reader_properties().is_buffered_stream_enabled() + + @use_buffered_stream.setter + def use_buffered_stream(self, bint use_buffered_stream): + if use_buffered_stream: + self.reader_properties().enable_buffered_stream() + else: + self.reader_properties().disable_buffered_stream() + + @property + def buffer_size(self): + return self.reader_properties().buffer_size() + + @buffer_size.setter + def buffer_size(self, buffer_size): + if buffer_size <= 0: + raise ValueError("Buffer size must be larger than zero") + self.reader_properties().set_buffer_size(buffer_size) + + @property + def pre_buffer(self): + return self.arrow_reader_properties().pre_buffer() + + @pre_buffer.setter + def pre_buffer(self, bint pre_buffer): + self.arrow_reader_properties().set_pre_buffer(pre_buffer) + + @property + def cache_options(self): + return CacheOptions.wrap(self.arrow_reader_properties().cache_options()) + + @cache_options.setter + def cache_options(self, CacheOptions options): + self.arrow_reader_properties().set_cache_options(options.unwrap()) + + @property + def thrift_string_size_limit(self): + return self.reader_properties().thrift_string_size_limit() + + @thrift_string_size_limit.setter + def thrift_string_size_limit(self, size): + if size <= 0: + raise ValueError("size must be larger than zero") + self.reader_properties().set_thrift_string_size_limit(size) + + @property + def thrift_container_size_limit(self): + return self.reader_properties().thrift_container_size_limit() + + @thrift_container_size_limit.setter + def thrift_container_size_limit(self, size): + if size <= 0: + raise ValueError("size must be larger than zero") + self.reader_properties().set_thrift_container_size_limit(size) + + @property + def parquet_decryption_config(self): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Unable to access encryption features. " + "Encryption is not enabled in your installation of pyarrow." + ) + return self._parquet_decryption_config + + @parquet_decryption_config.setter + def parquet_decryption_config(self, config): + if not parquet_encryption_enabled: + raise NotImplementedError( + "Encryption is not enabled in your installation of pyarrow, but a " + "decryption_config was provided." + ) + set_decryption_config(self, config) + self._parquet_decryption_config = config + + @property + def page_checksum_verification(self): + return self.reader_properties().page_checksum_verification() + + @page_checksum_verification.setter + def page_checksum_verification(self, bint page_checksum_verification): + self.reader_properties().set_page_checksum_verification(page_checksum_verification) + + def equals(self, ParquetFragmentScanOptions other): + """ + Parameters + ---------- + other : pyarrow.dataset.ParquetFragmentScanOptions + + Returns + ------- + bool + """ + attrs = ( + self.use_buffered_stream, self.buffer_size, self.pre_buffer, self.cache_options, + self.thrift_string_size_limit, self.thrift_container_size_limit, + self.page_checksum_verification) + other_attrs = ( + other.use_buffered_stream, other.buffer_size, other.pre_buffer, other.cache_options, + other.thrift_string_size_limit, + other.thrift_container_size_limit, other.page_checksum_verification) + return attrs == other_attrs + + @staticmethod + @binding(True) # Required for Cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return ParquetFragmentScanOptions(**kwargs) + + def __reduce__(self): + kwargs = dict( + use_buffered_stream=self.use_buffered_stream, + buffer_size=self.buffer_size, + pre_buffer=self.pre_buffer, + cache_options=self.cache_options, + thrift_string_size_limit=self.thrift_string_size_limit, + thrift_container_size_limit=self.thrift_container_size_limit, + page_checksum_verification=self.page_checksum_verification + ) + return ParquetFragmentScanOptions._reconstruct, (kwargs,) + + +cdef class ParquetFactoryOptions(_Weakrefable): + """ + Influences the discovery of parquet dataset. + + Parameters + ---------- + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + partitioning : Partitioning, PartitioningFactory, optional + The partitioning scheme applied to fragments, see ``Partitioning``. + validate_column_chunk_paths : bool, default False + Assert that all ColumnChunk paths are consistent. The parquet spec + allows for ColumnChunk data to be stored in multiple files, but + ParquetDatasetFactory supports only a single file with all ColumnChunk + data. If this flag is set construction of a ParquetDatasetFactory will + raise an error if ColumnChunk data is not resident in a single file. + """ + + cdef: + CParquetFactoryOptions options + + __slots__ = () # avoid mistakingly creating attributes + + def __init__(self, partition_base_dir=None, partitioning=None, + validate_column_chunk_paths=False): + if isinstance(partitioning, PartitioningFactory): + self.partitioning_factory = partitioning + elif isinstance(partitioning, Partitioning): + self.partitioning = partitioning + + if partition_base_dir is not None: + self.partition_base_dir = partition_base_dir + + self.options.validate_column_chunk_paths = validate_column_chunk_paths + + cdef inline CParquetFactoryOptions unwrap(self): + return self.options + + @property + def partitioning(self): + """Partitioning to apply to discovered files. + + NOTE: setting this property will overwrite partitioning_factory. + """ + c_partitioning = self.options.partitioning.partitioning() + if c_partitioning.get() == nullptr: + return None + return Partitioning.wrap(c_partitioning) + + @partitioning.setter + def partitioning(self, Partitioning value): + self.options.partitioning = ( value).unwrap() + + @property + def partitioning_factory(self): + """PartitioningFactory to apply to discovered files and + discover a Partitioning. + + NOTE: setting this property will overwrite partitioning. + """ + c_factory = self.options.partitioning.factory() + if c_factory.get() == nullptr: + return None + return PartitioningFactory.wrap(c_factory, None, None) + + @partitioning_factory.setter + def partitioning_factory(self, PartitioningFactory value): + self.options.partitioning = ( value).unwrap() + + @property + def partition_base_dir(self): + """ + Base directory to strip paths before applying the partitioning. + """ + return frombytes(self.options.partition_base_dir) + + @partition_base_dir.setter + def partition_base_dir(self, value): + self.options.partition_base_dir = tobytes(value) + + @property + def validate_column_chunk_paths(self): + """ + Base directory to strip paths before applying the partitioning. + """ + return self.options.validate_column_chunk_paths + + @validate_column_chunk_paths.setter + def validate_column_chunk_paths(self, value): + self.options.validate_column_chunk_paths = value + + +cdef class ParquetDatasetFactory(DatasetFactory): + """ + Create a ParquetDatasetFactory from a Parquet `_metadata` file. + + Parameters + ---------- + metadata_path : str + Path to the `_metadata` parquet metadata-only file generated with + `pyarrow.parquet.write_metadata`. + filesystem : pyarrow.fs.FileSystem + Filesystem to read the metadata_path from, and subsequent parquet + files. + format : ParquetFileFormat + Parquet format options. + options : ParquetFactoryOptions, optional + Various flags influencing the discovery of filesystem paths. + """ + + cdef: + CParquetDatasetFactory* parquet_factory + + def __init__(self, metadata_path, FileSystem filesystem not None, + FileFormat format not None, + ParquetFactoryOptions options=None): + cdef: + c_string c_path + shared_ptr[CFileSystem] c_filesystem + shared_ptr[CParquetFileFormat] c_format + CResult[shared_ptr[CDatasetFactory]] result + CParquetFactoryOptions c_options + + c_path = tobytes(metadata_path) + c_filesystem = filesystem.unwrap() + c_format = static_pointer_cast[CParquetFileFormat, CFileFormat]( + format.unwrap()) + options = options or ParquetFactoryOptions() + c_options = options.unwrap() + + with nogil: + result = CParquetDatasetFactory.MakeFromMetaDataPath( + c_path, c_filesystem, c_format, c_options) + self.init(GetResultValue(result)) + + cdef init(self, shared_ptr[CDatasetFactory]& sp): + DatasetFactory.init(self, sp) + self.parquet_factory = sp.get() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8e7ade9c3849d4033127d2154b9b2f95845d9eba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.pyx new file mode 100644 index 0000000000000000000000000000000000000000..7dd61c9a986ff1044fb7b5c22a2f24725710afd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_feather.pyx @@ -0,0 +1,117 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# --------------------------------------------------------------------- +# Implement Feather file format + +# cython: profile=False +# distutils: language = c++ +# cython: language_level=3 + +from cython.operator cimport dereference as deref +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_feather cimport * +from pyarrow.lib cimport (check_status, Table, _Weakrefable, + get_writer, get_reader, pyarrow_wrap_table) +from pyarrow.lib import tobytes + + +class FeatherError(Exception): + pass + + +def write_feather(Table table, object dest, compression=None, + compression_level=None, chunksize=None, version=2): + cdef shared_ptr[COutputStream] sink + get_writer(dest, &sink) + + cdef CFeatherProperties properties + if version == 2: + properties.version = kFeatherV2Version + else: + properties.version = kFeatherV1Version + + if compression == 'zstd': + properties.compression = CCompressionType_ZSTD + elif compression == 'lz4': + properties.compression = CCompressionType_LZ4_FRAME + else: + properties.compression = CCompressionType_UNCOMPRESSED + + if chunksize is not None: + properties.chunksize = chunksize + + if compression_level is not None: + properties.compression_level = compression_level + + with nogil: + check_status(WriteFeather(deref(table.table), sink.get(), + properties)) + + +cdef class FeatherReader(_Weakrefable): + cdef: + shared_ptr[CFeatherReader] reader + + def __cinit__(self, source, c_bool use_memory_map, c_bool use_threads): + cdef: + shared_ptr[CRandomAccessFile] reader + CIpcReadOptions options = CIpcReadOptions.Defaults() + options.use_threads = use_threads + + get_reader(source, use_memory_map, &reader) + with nogil: + self.reader = GetResultValue(CFeatherReader.Open(reader, options)) + + @property + def version(self): + return self.reader.get().version() + + def read(self): + cdef shared_ptr[CTable] sp_table + with nogil: + check_status(self.reader.get() + .Read(&sp_table)) + + return pyarrow_wrap_table(sp_table) + + def read_indices(self, indices): + cdef: + shared_ptr[CTable] sp_table + vector[int] c_indices + + for index in indices: + c_indices.push_back(index) + with nogil: + check_status(self.reader.get() + .Read(c_indices, &sp_table)) + + return pyarrow_wrap_table(sp_table) + + def read_names(self, names): + cdef: + shared_ptr[CTable] sp_table + vector[c_string] c_names + + for name in names: + c_names.push_back(tobytes(name)) + with nogil: + check_status(self.reader.get() + .Read(c_names, &sp_table)) + + return pyarrow_wrap_table(sp_table) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.pyx new file mode 100644 index 0000000000000000000000000000000000000000..67ee7590560f0d6dad1b1d38977b39cfd303dd49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.pyx @@ -0,0 +1,3189 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +import collections +import enum +import re +import time +import warnings +import weakref + +from cython.operator cimport dereference as deref +from cython.operator cimport postincrement +from libcpp cimport bool as c_bool + +from pyarrow.lib cimport * +from pyarrow.lib import (ArrowCancelled, ArrowException, ArrowInvalid, + SignalStopHandler) +from pyarrow.lib import as_buffer, frombytes, tobytes +from pyarrow.includes.libarrow_flight cimport * +from pyarrow.ipc import _get_legacy_format_default, _ReadPandasMixin +import pyarrow.lib as lib + + +cdef CFlightCallOptions DEFAULT_CALL_OPTIONS + + +cdef int check_flight_status(const CStatus& status) except -1 nogil: + cdef shared_ptr[FlightStatusDetail] detail + + if status.ok(): + return 0 + + detail = FlightStatusDetail.UnwrapStatus(status) + if detail: + with gil: + message = frombytes(status.message(), safe=True) + detail_msg = detail.get().extra_info() + if detail.get().code() == CFlightStatusInternal: + raise FlightInternalError(message, detail_msg) + elif detail.get().code() == CFlightStatusFailed: + message = _munge_grpc_python_error(message) + raise FlightServerError(message, detail_msg) + elif detail.get().code() == CFlightStatusTimedOut: + raise FlightTimedOutError(message, detail_msg) + elif detail.get().code() == CFlightStatusCancelled: + raise FlightCancelledError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnauthenticated: + raise FlightUnauthenticatedError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnauthorized: + raise FlightUnauthorizedError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnavailable: + raise FlightUnavailableError(message, detail_msg) + + size_detail = FlightWriteSizeStatusDetail.UnwrapStatus(status) + if size_detail: + with gil: + message = frombytes(status.message(), safe=True) + raise FlightWriteSizeExceededError( + message, + size_detail.get().limit(), size_detail.get().actual()) + + return check_status(status) + + +_FLIGHT_SERVER_ERROR_REGEX = re.compile( + r'Flight RPC failed with message: (.*). Detail: ' + r'Python exception: (.*)', + re.DOTALL +) + + +def _munge_grpc_python_error(message): + m = _FLIGHT_SERVER_ERROR_REGEX.match(message) + if m: + return ('Flight RPC failed with Python exception \"{}: {}\"' + .format(m.group(2), m.group(1))) + else: + return message + + +cdef IpcWriteOptions _get_options(options): + return _get_legacy_format_default( + use_legacy_format=None, options=options) + + +cdef class FlightCallOptions(_Weakrefable): + """RPC-layer options for a Flight call.""" + + cdef: + CFlightCallOptions options + + def __init__(self, timeout=None, write_options=None, headers=None, + IpcReadOptions read_options=None): + """Create call options. + + Parameters + ---------- + timeout : float, None + A timeout for the call, in seconds. None means that the + timeout defaults to an implementation-specific value. + write_options : pyarrow.ipc.IpcWriteOptions, optional + IPC write options. The default options can be controlled + by environment variables (see pyarrow.ipc). + headers : List[Tuple[str, str]], optional + A list of arbitrary headers as key, value tuples + read_options : pyarrow.ipc.IpcReadOptions, optional + Serialization options for reading IPC format. + """ + cdef IpcWriteOptions c_write_options + + if timeout is not None: + self.options.timeout = CTimeoutDuration(timeout) + if write_options is not None: + c_write_options = _get_options(write_options) + self.options.write_options = c_write_options.c_options + if read_options is not None: + if not isinstance(read_options, IpcReadOptions): + raise TypeError("expected IpcReadOptions, got {}" + .format(type(read_options))) + self.options.read_options = read_options.c_options + if headers is not None: + self.options.headers = headers + + @staticmethod + cdef CFlightCallOptions* unwrap(obj): + if not obj: + return &DEFAULT_CALL_OPTIONS + elif isinstance(obj, FlightCallOptions): + return &(( obj).options) + raise TypeError("Expected a FlightCallOptions object, not " + "'{}'".format(type(obj))) + + +_CertKeyPair = collections.namedtuple('_CertKeyPair', ['cert', 'key']) + + +class CertKeyPair(_CertKeyPair): + """A TLS certificate and key for use in Flight.""" + + +cdef class FlightError(Exception): + """ + The base class for Flight-specific errors. + + A server may raise this class or one of its subclasses to provide + a more detailed error to clients. + + Parameters + ---------- + message : str, optional + The error message. + extra_info : bytes, optional + Extra binary error details that were provided by the + server/will be sent to the client. + + Attributes + ---------- + extra_info : bytes + Extra binary error details that were provided by the + server/will be sent to the client. + """ + + cdef dict __dict__ + + def __init__(self, message='', extra_info=b''): + super().__init__(message) + self.extra_info = tobytes(extra_info) + + cdef CStatus to_status(self): + message = tobytes("Flight error: {}".format(str(self))) + return CStatus_UnknownError(message) + + +cdef class FlightInternalError(FlightError, ArrowException): + """An error internal to the Flight server occurred.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusInternal, + tobytes(str(self)), self.extra_info) + + +cdef class FlightTimedOutError(FlightError, ArrowException): + """The Flight RPC call timed out.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusTimedOut, + tobytes(str(self)), self.extra_info) + + +cdef class FlightCancelledError(FlightError, ArrowCancelled): + """The operation was cancelled.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusCancelled, tobytes(str(self)), + self.extra_info) + + +cdef class FlightServerError(FlightError, ArrowException): + """A server error occurred.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusFailed, tobytes(str(self)), + self.extra_info) + + +cdef class FlightUnauthenticatedError(FlightError, ArrowException): + """The client is not authenticated.""" + + cdef CStatus to_status(self): + return MakeFlightError( + CFlightStatusUnauthenticated, tobytes(str(self)), self.extra_info) + + +cdef class FlightUnauthorizedError(FlightError, ArrowException): + """The client is not authorized to perform the given operation.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusUnauthorized, tobytes(str(self)), + self.extra_info) + + +cdef class FlightUnavailableError(FlightError, ArrowException): + """The server is not reachable or available.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusUnavailable, tobytes(str(self)), + self.extra_info) + + +class FlightWriteSizeExceededError(ArrowInvalid): + """A write operation exceeded the client-configured limit.""" + + def __init__(self, message, limit, actual): + super().__init__(message) + self.limit = limit + self.actual = actual + + +cdef class Action(_Weakrefable): + """An action executable on a Flight service.""" + cdef: + CAction action + + def __init__(self, action_type, buf): + """Create an action from a type and a buffer. + + Parameters + ---------- + action_type : bytes or str + buf : Buffer or bytes-like object + """ + self.action.type = tobytes(action_type) + self.action.body = pyarrow_unwrap_buffer(as_buffer(buf)) + + @property + def type(self): + """The action type.""" + return frombytes(self.action.type) + + @property + def body(self): + """The action body (arguments for the action).""" + return pyarrow_wrap_buffer(self.action.body) + + @staticmethod + cdef CAction unwrap(action) except *: + if not isinstance(action, Action): + raise TypeError("Must provide Action, not '{}'".format( + type(action))) + return ( action).action + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.action.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Action action = Action.__new__(Action) + action.action = GetResultValue( + CAction.Deserialize(tobytes(serialized))) + return action + + def __eq__(self, Action other): + return self.action == other.action + + def __repr__(self): + return (f"") + + +_ActionType = collections.namedtuple('_ActionType', ['type', 'description']) + + +class ActionType(_ActionType): + """A type of action that is executable on a Flight service.""" + + def make_action(self, buf): + """Create an Action with this type. + + Parameters + ---------- + buf : obj + An Arrow buffer or Python bytes or bytes-like object. + """ + return Action(self.type, buf) + + +cdef class Result(_Weakrefable): + """A result from executing an Action.""" + cdef: + unique_ptr[CFlightResult] result + + def __init__(self, buf): + """Create a new result. + + Parameters + ---------- + buf : Buffer or bytes-like object + """ + self.result.reset(new CFlightResult()) + self.result.get().body = pyarrow_unwrap_buffer(as_buffer(buf)) + + @property + def body(self): + """Get the Buffer containing the result.""" + return pyarrow_wrap_buffer(self.result.get().body) + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.result.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Result result = Result.__new__(Result) + result.result.reset(new CFlightResult(GetResultValue( + CFlightResult.Deserialize(tobytes(serialized))))) + return result + + def __eq__(self, Result other): + return deref(self.result.get()) == deref(other.result.get()) + + def __repr__(self): + return f"" + + +cdef class BasicAuth(_Weakrefable): + """A container for basic auth.""" + cdef: + unique_ptr[CBasicAuth] basic_auth + + def __init__(self, username=None, password=None): + """Create a new basic auth object. + + Parameters + ---------- + username : string + password : string + """ + self.basic_auth.reset(new CBasicAuth()) + if username: + self.basic_auth.get().username = tobytes(username) + if password: + self.basic_auth.get().password = tobytes(password) + + @property + def username(self): + """Get the username.""" + return self.basic_auth.get().username + + @property + def password(self): + """Get the password.""" + return self.basic_auth.get().password + + @staticmethod + def deserialize(serialized): + auth = BasicAuth() + auth.basic_auth.reset(new CBasicAuth(GetResultValue( + CBasicAuth.Deserialize(tobytes(serialized))))) + return auth + + def serialize(self): + return GetResultValue(self.basic_auth.get().SerializeToString()) + + def __eq__(self, BasicAuth other): + return deref(self.basic_auth.get()) == deref(other.basic_auth.get()) + + def __repr__(self): + return (f"") + + +class DescriptorType(enum.Enum): + """ + The type of a FlightDescriptor. + + Attributes + ---------- + + UNKNOWN + An unknown descriptor type. + + PATH + A Flight stream represented by a path. + + CMD + A Flight stream represented by an application-defined command. + + """ + + UNKNOWN = 0 + PATH = 1 + CMD = 2 + + +class FlightMethod(enum.Enum): + """The implemented methods in Flight.""" + + INVALID = 0 + HANDSHAKE = 1 + LIST_FLIGHTS = 2 + GET_FLIGHT_INFO = 3 + GET_SCHEMA = 4 + DO_GET = 5 + DO_PUT = 6 + DO_ACTION = 7 + LIST_ACTIONS = 8 + DO_EXCHANGE = 9 + + +cdef wrap_flight_method(CFlightMethod method): + if method == CFlightMethodHandshake: + return FlightMethod.HANDSHAKE + elif method == CFlightMethodListFlights: + return FlightMethod.LIST_FLIGHTS + elif method == CFlightMethodGetFlightInfo: + return FlightMethod.GET_FLIGHT_INFO + elif method == CFlightMethodGetSchema: + return FlightMethod.GET_SCHEMA + elif method == CFlightMethodDoGet: + return FlightMethod.DO_GET + elif method == CFlightMethodDoPut: + return FlightMethod.DO_PUT + elif method == CFlightMethodDoAction: + return FlightMethod.DO_ACTION + elif method == CFlightMethodListActions: + return FlightMethod.LIST_ACTIONS + elif method == CFlightMethodDoExchange: + return FlightMethod.DO_EXCHANGE + return FlightMethod.INVALID + + +cdef class FlightDescriptor(_Weakrefable): + """A description of a data stream available from a Flight service.""" + cdef: + CFlightDescriptor descriptor + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "`pyarrow.flight.FlightDescriptor.for_{path,command}` " + "function instead." + .format(self.__class__.__name__)) + + @staticmethod + def for_path(*path): + """Create a FlightDescriptor for a resource path.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor.type = CDescriptorTypePath + result.descriptor.path = [tobytes(p) for p in path] + return result + + @staticmethod + def for_command(command): + """Create a FlightDescriptor for an opaque command.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor.type = CDescriptorTypeCmd + result.descriptor.cmd = tobytes(command) + return result + + @property + def descriptor_type(self): + """Get the type of this descriptor.""" + if self.descriptor.type == CDescriptorTypeUnknown: + return DescriptorType.UNKNOWN + elif self.descriptor.type == CDescriptorTypePath: + return DescriptorType.PATH + elif self.descriptor.type == CDescriptorTypeCmd: + return DescriptorType.CMD + raise RuntimeError("Invalid descriptor type!") + + @property + def command(self): + """Get the command for this descriptor.""" + if self.descriptor_type != DescriptorType.CMD: + return None + return self.descriptor.cmd + + @property + def path(self): + """Get the path for this descriptor.""" + if self.descriptor_type != DescriptorType.PATH: + return None + return self.descriptor.path + + def __repr__(self): + if self.descriptor_type == DescriptorType.PATH: + return f"" + elif self.descriptor_type == DescriptorType.CMD: + return f"" + else: + return "" + + @staticmethod + cdef CFlightDescriptor unwrap(descriptor) except *: + if not isinstance(descriptor, FlightDescriptor): + raise TypeError("Must provide a FlightDescriptor, not '{}'".format( + type(descriptor))) + return ( descriptor).descriptor + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.descriptor.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + descriptor.descriptor = GetResultValue( + CFlightDescriptor.Deserialize(tobytes(serialized))) + return descriptor + + def __eq__(self, FlightDescriptor other): + return self.descriptor == other.descriptor + + +cdef class Ticket(_Weakrefable): + """A ticket for requesting a Flight stream.""" + + cdef: + CTicket c_ticket + + def __init__(self, ticket): + self.c_ticket.ticket = tobytes(ticket) + + @property + def ticket(self): + return self.c_ticket.ticket + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.c_ticket.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Ticket ticket = Ticket.__new__(Ticket) + ticket.c_ticket = GetResultValue( + CTicket.Deserialize(tobytes(serialized))) + return ticket + + def __eq__(self, Ticket other): + return self.c_ticket == other.c_ticket + + def __repr__(self): + return f"" + + +cdef class Location(_Weakrefable): + """The location of a Flight service.""" + cdef: + CLocation location + + def __init__(self, uri): + check_flight_status(CLocation.Parse(tobytes(uri)).Value(&self.location)) + + def __repr__(self): + return f'' + + @property + def uri(self): + return self.location.ToString() + + def equals(self, Location other): + return self == other + + def __eq__(self, other): + if not isinstance(other, Location): + return NotImplemented + return self.location.Equals(( other).location) + + @staticmethod + def for_grpc_tcp(host, port): + """Create a Location for a TCP-based gRPC service.""" + cdef: + c_string c_host = tobytes(host) + int c_port = port + Location result = Location.__new__(Location) + check_flight_status( + CLocation.ForGrpcTcp(c_host, c_port).Value(&result.location)) + return result + + @staticmethod + def for_grpc_tls(host, port): + """Create a Location for a TLS-based gRPC service.""" + cdef: + c_string c_host = tobytes(host) + int c_port = port + Location result = Location.__new__(Location) + check_flight_status( + CLocation.ForGrpcTls(c_host, c_port).Value(&result.location)) + return result + + @staticmethod + def for_grpc_unix(path): + """Create a Location for a domain socket-based gRPC service.""" + cdef: + c_string c_path = tobytes(path) + Location result = Location.__new__(Location) + check_flight_status(CLocation.ForGrpcUnix(c_path).Value(&result.location)) + return result + + @staticmethod + cdef Location wrap(CLocation location): + cdef Location result = Location.__new__(Location) + result.location = location + return result + + @staticmethod + cdef CLocation unwrap(object location) except *: + cdef CLocation c_location + if isinstance(location, str): + check_flight_status( + CLocation.Parse(tobytes(location)).Value(&c_location)) + return c_location + elif not isinstance(location, Location): + raise TypeError("Must provide a Location, not '{}'".format( + type(location))) + return ( location).location + + +cdef class FlightEndpoint(_Weakrefable): + """A Flight stream, along with the ticket and locations to access it.""" + cdef: + CFlightEndpoint endpoint + + def __init__(self, ticket, locations): + """Create a FlightEndpoint from a ticket and list of locations. + + Parameters + ---------- + ticket : Ticket or bytes + the ticket needed to access this flight + locations : list of string URIs + locations where this flight is available + + Raises + ------ + ArrowException + If one of the location URIs is not a valid URI. + """ + cdef: + CLocation c_location + + if isinstance(ticket, Ticket): + self.endpoint.ticket.ticket = tobytes(ticket.ticket) + else: + self.endpoint.ticket.ticket = tobytes(ticket) + + for location in locations: + if isinstance(location, Location): + c_location = ( location).location + else: + c_location = CLocation() + check_flight_status( + CLocation.Parse(tobytes(location)).Value(&c_location)) + self.endpoint.locations.push_back(c_location) + + @property + def ticket(self): + """Get the ticket in this endpoint.""" + return Ticket(self.endpoint.ticket.ticket) + + @property + def locations(self): + return [Location.wrap(location) + for location in self.endpoint.locations] + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.endpoint.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightEndpoint endpoint = FlightEndpoint.__new__(FlightEndpoint) + endpoint.endpoint = GetResultValue( + CFlightEndpoint.Deserialize(tobytes(serialized))) + return endpoint + + def __repr__(self): + return (f"") + + def __eq__(self, FlightEndpoint other): + return self.endpoint == other.endpoint + + +cdef class SchemaResult(_Weakrefable): + """The serialized schema returned from a GetSchema request.""" + cdef: + unique_ptr[CSchemaResult] result + + def __init__(self, Schema schema): + """Create a SchemaResult from a schema. + + Parameters + ---------- + schema: Schema + the schema of the data in this flight. + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + check_flight_status(CreateSchemaResult(c_schema, &self.result)) + + @property + def schema(self): + """The schema of the data in this flight.""" + cdef: + shared_ptr[CSchema] schema + CDictionaryMemo dummy_memo + + check_flight_status(self.result.get().GetSchema(&dummy_memo).Value(&schema)) + return pyarrow_wrap_schema(schema) + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.result.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef SchemaResult result = SchemaResult.__new__(SchemaResult) + result.result.reset(new CSchemaResult(GetResultValue( + CSchemaResult.Deserialize(tobytes(serialized))))) + return result + + def __eq__(self, SchemaResult other): + return deref(self.result.get()) == deref(other.result.get()) + + def __repr__(self): + return f"" + + +cdef class FlightInfo(_Weakrefable): + """A description of a Flight stream.""" + cdef: + unique_ptr[CFlightInfo] info + + @staticmethod + cdef wrap(CFlightInfo c_info): + cdef FlightInfo obj = FlightInfo.__new__(FlightInfo) + obj.info.reset(new CFlightInfo(move(c_info))) + return obj + + def __init__(self, Schema schema, FlightDescriptor descriptor, endpoints, + total_records, total_bytes): + """Create a FlightInfo object from a schema, descriptor, and endpoints. + + Parameters + ---------- + schema : Schema + the schema of the data in this flight. + descriptor : FlightDescriptor + the descriptor for this flight. + endpoints : list of FlightEndpoint + a list of endpoints where this flight is available. + total_records : int + the total records in this flight, or -1 if unknown + total_bytes : int + the total bytes in this flight, or -1 if unknown + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + vector[CFlightEndpoint] c_endpoints + + for endpoint in endpoints: + if isinstance(endpoint, FlightEndpoint): + c_endpoints.push_back(( endpoint).endpoint) + else: + raise TypeError('Endpoint {} is not instance of' + ' FlightEndpoint'.format(endpoint)) + + check_flight_status(CreateFlightInfo(c_schema, + descriptor.descriptor, + c_endpoints, + total_records, + total_bytes, &self.info)) + + @property + def total_records(self): + """The total record count of this flight, or -1 if unknown.""" + return self.info.get().total_records() + + @property + def total_bytes(self): + """The size in bytes of the data in this flight, or -1 if unknown.""" + return self.info.get().total_bytes() + + @property + def schema(self): + """The schema of the data in this flight.""" + cdef: + shared_ptr[CSchema] schema + CDictionaryMemo dummy_memo + + check_flight_status(self.info.get().GetSchema(&dummy_memo).Value(&schema)) + return pyarrow_wrap_schema(schema) + + @property + def descriptor(self): + """The descriptor of the data in this flight.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor = self.info.get().descriptor() + return result + + @property + def endpoints(self): + """The endpoints where this flight is available.""" + # TODO: get Cython to iterate over reference directly + cdef: + vector[CFlightEndpoint] endpoints = self.info.get().endpoints() + FlightEndpoint py_endpoint + + result = [] + for endpoint in endpoints: + py_endpoint = FlightEndpoint.__new__(FlightEndpoint) + py_endpoint.endpoint = endpoint + result.append(py_endpoint) + return result + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.info.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightInfo info = FlightInfo.__new__(FlightInfo) + info.info = move(GetResultValue( + CFlightInfo.Deserialize(tobytes(serialized)))) + return info + + def __eq__(self, FlightInfo other): + return deref(self.info.get()) == deref(other.info.get()) + + def __repr__(self): + return (f"") + + +cdef class FlightStreamChunk(_Weakrefable): + """A RecordBatch with application metadata on the side.""" + cdef: + CFlightStreamChunk chunk + + @property + def data(self): + if self.chunk.data == NULL: + return None + return pyarrow_wrap_batch(self.chunk.data) + + @property + def app_metadata(self): + if self.chunk.app_metadata == NULL: + return None + return pyarrow_wrap_buffer(self.chunk.app_metadata) + + def __iter__(self): + return iter((self.data, self.app_metadata)) + + def __repr__(self): + return "".format( + self.chunk.data != NULL, self.chunk.app_metadata != NULL) + + +cdef class _MetadataRecordBatchReader(_Weakrefable, _ReadPandasMixin): + """A reader for Flight streams.""" + + # Needs to be separate class so the "real" class can subclass the + # pure-Python mixin class + + cdef dict __dict__ + cdef shared_ptr[CMetadataRecordBatchReader] reader + + def __iter__(self): + return self + + def __next__(self): + return self.read_chunk() + + @property + def schema(self): + """Get the schema for this reader.""" + cdef shared_ptr[CSchema] c_schema + with nogil: + check_flight_status(self.reader.get().GetSchema().Value(&c_schema)) + return pyarrow_wrap_schema(c_schema) + + def read_all(self): + """Read the entire contents of the stream as a Table.""" + cdef: + shared_ptr[CTable] c_table + with nogil: + check_flight_status(self.reader.get().ToTable().Value(&c_table)) + return pyarrow_wrap_table(c_table) + + def read_chunk(self): + """Read the next FlightStreamChunk along with any metadata. + + Returns + ------- + chunk : FlightStreamChunk + The next FlightStreamChunk in the stream. + + Raises + ------ + StopIteration + when the stream is finished + """ + cdef: + FlightStreamChunk chunk = FlightStreamChunk() + + with nogil: + check_flight_status(self.reader.get().Next().Value(&chunk.chunk)) + + if chunk.chunk.data == NULL and chunk.chunk.app_metadata == NULL: + raise StopIteration + + return chunk + + def to_reader(self): + """Convert this reader into a regular RecordBatchReader. + + This may fail if the schema cannot be read from the remote end. + + Returns + ------- + RecordBatchReader + """ + cdef RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + with nogil: + reader.reader = GetResultValue(MakeRecordBatchReader(self.reader)) + + return reader + + +cdef class MetadataRecordBatchReader(_MetadataRecordBatchReader): + """The base class for readers for Flight streams. + + See Also + -------- + FlightStreamReader + """ + + +cdef class FlightStreamReader(MetadataRecordBatchReader): + """A reader that can also be canceled.""" + + def cancel(self): + """Cancel the read operation.""" + with nogil: + ( self.reader.get()).Cancel() + + def read_all(self): + """Read the entire contents of the stream as a Table.""" + cdef: + shared_ptr[CTable] c_table + CStopToken stop_token + with SignalStopHandler() as stop_handler: + stop_token = ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + ( self.reader.get()) + .ToTableWithStopToken(stop_token).Value(&c_table)) + return pyarrow_wrap_table(c_table) + + +cdef class MetadataRecordBatchWriter(_CRecordBatchWriter): + """A RecordBatchWriter that also allows writing application metadata. + + This class is a context manager; on exit, close() will be called. + """ + + cdef CMetadataRecordBatchWriter* _writer(self) nogil: + return self.writer.get() + + def begin(self, schema: Schema, options=None): + """Prepare to write data to this stream with the given schema.""" + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CIpcWriteOptions c_options = _get_options(options).c_options + with nogil: + check_flight_status(self._writer().Begin(c_schema, c_options)) + + def write_metadata(self, buf): + """Write Flight metadata by itself.""" + cdef shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(as_buffer(buf)) + with nogil: + check_flight_status( + self._writer().WriteMetadata(c_buf)) + + def write_batch(self, RecordBatch batch): + """ + Write RecordBatch to stream. + + Parameters + ---------- + batch : RecordBatch + """ + cdef: + shared_ptr[const CKeyValueMetadata] custom_metadata + + # Override superclass method to use check_flight_status so we + # can generate FlightWriteSizeExceededError. We don't do this + # for write_table as callers who intend to handle the error + # and retry with a smaller batch should be working with + # individual batches to have control. + + with nogil: + check_flight_status( + self._writer().WriteRecordBatch(deref(batch.batch), custom_metadata)) + + def write_table(self, Table table, max_chunksize=None, **kwargs): + """ + Write Table to stream in (contiguous) RecordBatch objects. + + Parameters + ---------- + table : Table + max_chunksize : int, default None + Maximum size for RecordBatch chunks. Individual chunks may be + smaller depending on the chunk layout of individual columns. + """ + cdef: + # max_chunksize must be > 0 to have any impact + int64_t c_max_chunksize = -1 + + if 'chunksize' in kwargs: + max_chunksize = kwargs['chunksize'] + msg = ('The parameter chunksize is deprecated for the write_table ' + 'methods as of 0.15, please use parameter ' + 'max_chunksize instead') + warnings.warn(msg, FutureWarning) + + if max_chunksize is not None: + c_max_chunksize = max_chunksize + + with nogil: + check_flight_status( + self._writer().WriteTable(table.table[0], c_max_chunksize)) + + def close(self): + """ + Close stream and write end-of-stream 0 marker. + """ + with nogil: + check_flight_status(self._writer().Close()) + + def write_with_metadata(self, RecordBatch batch, buf): + """Write a RecordBatch along with Flight metadata. + + Parameters + ---------- + batch : RecordBatch + The next RecordBatch in the stream. + buf : Buffer + Application-specific metadata for the batch as defined by + Flight. + """ + cdef shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(as_buffer(buf)) + with nogil: + check_flight_status( + self._writer().WriteWithMetadata(deref(batch.batch), c_buf)) + + +cdef class FlightStreamWriter(MetadataRecordBatchWriter): + """A writer that also allows closing the write side of a stream.""" + + def done_writing(self): + """Indicate that the client is done writing, but not done reading.""" + with nogil: + check_flight_status( + ( self.writer.get()).DoneWriting()) + + +cdef class FlightMetadataReader(_Weakrefable): + """A reader for Flight metadata messages sent during a DoPut.""" + + cdef: + unique_ptr[CFlightMetadataReader] reader + + def read(self): + """Read the next metadata message.""" + cdef shared_ptr[CBuffer] buf + with nogil: + check_flight_status(self.reader.get().ReadMetadata(&buf)) + if buf == NULL: + return None + return pyarrow_wrap_buffer(buf) + + +cdef class FlightMetadataWriter(_Weakrefable): + """A sender for Flight metadata messages during a DoPut.""" + + cdef: + unique_ptr[CFlightMetadataWriter] writer + + def write(self, message): + """Write the next metadata message. + + Parameters + ---------- + message : Buffer + """ + cdef shared_ptr[CBuffer] buf = \ + pyarrow_unwrap_buffer(as_buffer(message)) + with nogil: + check_flight_status(self.writer.get().WriteMetadata(deref(buf))) + + +class AsyncioCall: + """State for an async RPC using asyncio.""" + + def __init__(self) -> None: + import asyncio + self._future = asyncio.get_running_loop().create_future() + + def as_awaitable(self) -> object: + return self._future + + def wakeup(self, result_or_exception) -> None: + # Mark the Future done from within its loop (asyncio + # objects are generally not thread-safe) + loop = self._future.get_loop() + if isinstance(result_or_exception, BaseException): + loop.call_soon_threadsafe( + self._future.set_exception, result_or_exception) + else: + loop.call_soon_threadsafe( + self._future.set_result, result_or_exception) + + +cdef class AsyncioFlightClient: + """ + A FlightClient with an asyncio-based async interface. + + This interface is EXPERIMENTAL. + """ + + cdef: + FlightClient _client + + def __init__(self, FlightClient client) -> None: + self._client = client + + async def get_flight_info( + self, + descriptor: FlightDescriptor, + *, + options: FlightCallOptions = None, + ): + call = AsyncioCall() + self._get_flight_info(call, descriptor, options) + return await call.as_awaitable() + + cdef _get_flight_info(self, call, descriptor, options): + cdef: + CFlightCallOptions* c_options = \ + FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + CFuture[CFlightInfo] c_future + + with nogil: + c_future = self._client.client.get().GetFlightInfoAsync( + deref(c_options), c_descriptor) + + BindFuture(move(c_future), call.wakeup, FlightInfo.wrap) + + +cdef class FlightClient(_Weakrefable): + """A client to a Flight service. + + Connect to a Flight service on the given host and port. + + Parameters + ---------- + location : str, tuple or Location + Location to connect to. Either a gRPC URI like `grpc://localhost:port`, + a tuple of (host, port) pair, or a Location instance. + tls_root_certs : bytes or None + PEM-encoded + cert_chain: bytes or None + Client certificate if using mutual TLS + private_key: bytes or None + Client private key for cert_chain is using mutual TLS + override_hostname : str or None + Override the hostname checked by TLS. Insecure, use with caution. + middleware : list optional, default None + A list of ClientMiddlewareFactory instances. + write_size_limit_bytes : int optional, default None + A soft limit on the size of a data payload sent to the + server. Enabled if positive. If enabled, writing a record + batch that (when serialized) exceeds this limit will raise an + exception; the client can retry the write with a smaller + batch. + disable_server_verification : boolean optional, default False + A flag that indicates that, if the client is connecting + with TLS, that it skips server verification. If this is + enabled, all other TLS settings are overridden. + generic_options : list optional, default None + A list of generic (string, int or string) option tuples passed + to the underlying transport. Effect is implementation + dependent. + """ + cdef: + unique_ptr[CFlightClient] client + + def __init__(self, location, *, tls_root_certs=None, cert_chain=None, + private_key=None, override_hostname=None, middleware=None, + write_size_limit_bytes=None, + disable_server_verification=None, generic_options=None): + if isinstance(location, (bytes, str)): + location = Location(location) + elif isinstance(location, tuple): + host, port = location + if tls_root_certs or disable_server_verification is not None: + location = Location.for_grpc_tls(host, port) + else: + location = Location.for_grpc_tcp(host, port) + elif not isinstance(location, Location): + raise TypeError('`location` argument must be a string, tuple or a ' + 'Location instance') + self.init(location, tls_root_certs, cert_chain, private_key, + override_hostname, middleware, write_size_limit_bytes, + disable_server_verification, generic_options) + + cdef init(self, Location location, tls_root_certs, cert_chain, + private_key, override_hostname, middleware, + write_size_limit_bytes, disable_server_verification, + generic_options): + cdef: + CLocation c_location = Location.unwrap(location) + CFlightClientOptions c_options = CFlightClientOptions.Defaults() + function[cb_client_middleware_start_call] start_call = \ + &_client_middleware_start_call + CIntStringVariant variant + + if tls_root_certs: + c_options.tls_root_certs = tobytes(tls_root_certs) + if cert_chain: + c_options.cert_chain = tobytes(cert_chain) + if private_key: + c_options.private_key = tobytes(private_key) + if override_hostname: + c_options.override_hostname = tobytes(override_hostname) + if disable_server_verification is not None: + c_options.disable_server_verification = disable_server_verification + if middleware: + for factory in middleware: + c_options.middleware.push_back( + + make_shared[CPyClientMiddlewareFactory]( + factory, start_call)) + if write_size_limit_bytes is not None: + c_options.write_size_limit_bytes = write_size_limit_bytes + else: + c_options.write_size_limit_bytes = 0 + if generic_options: + for key, value in generic_options: + if isinstance(value, (str, bytes)): + variant = CIntStringVariant( tobytes(value)) + else: + variant = CIntStringVariant( value) + c_options.generic_options.push_back( + pair[c_string, CIntStringVariant](tobytes(key), variant)) + + with nogil: + check_flight_status(CFlightClient.Connect(c_location, c_options + ).Value(&self.client)) + + @property + def supports_async(self): + return self.client.get().supports_async() + + def as_async(self) -> None: + check_status(self.client.get().CheckAsyncSupport()) + return AsyncioFlightClient(self) + + def wait_for_available(self, timeout=5): + """Block until the server can be contacted. + + Parameters + ---------- + timeout : int, default 5 + The maximum seconds to wait. + """ + deadline = time.time() + timeout + while True: + try: + list(self.list_flights()) + except FlightUnavailableError: + if time.time() < deadline: + time.sleep(0.025) + continue + else: + raise + except NotImplementedError: + # allow if list_flights is not implemented, because + # the server can be contacted nonetheless + break + else: + break + + @classmethod + def connect(cls, location, tls_root_certs=None, cert_chain=None, + private_key=None, override_hostname=None, + disable_server_verification=None): + """Connect to a Flight server. + + .. deprecated:: 0.15.0 + Use the ``FlightClient`` constructor or ``pyarrow.flight.connect`` function instead. + """ + warnings.warn("The 'FlightClient.connect' method is deprecated, use " + "FlightClient constructor or pyarrow.flight.connect " + "function instead") + return FlightClient( + location, tls_root_certs=tls_root_certs, + cert_chain=cert_chain, private_key=private_key, + override_hostname=override_hostname, + disable_server_verification=disable_server_verification + ) + + def authenticate(self, auth_handler, options: FlightCallOptions = None): + """Authenticate to the server. + + Parameters + ---------- + auth_handler : ClientAuthHandler + The authentication mechanism to use. + options : FlightCallOptions + Options for this call. + """ + cdef: + unique_ptr[CClientAuthHandler] handler + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + if not isinstance(auth_handler, ClientAuthHandler): + raise TypeError( + "FlightClient.authenticate takes a ClientAuthHandler, " + "not '{}'".format(type(auth_handler))) + handler.reset(( auth_handler).to_handler()) + with nogil: + check_flight_status( + self.client.get().Authenticate(deref(c_options), + move(handler))) + + def authenticate_basic_token(self, username, password, + options: FlightCallOptions = None): + """Authenticate to the server with HTTP basic authentication. + + Parameters + ---------- + username : string + Username to authenticate with + password : string + Password to authenticate with + options : FlightCallOptions + Options for this call + + Returns + ------- + tuple : Tuple[str, str] + A tuple representing the FlightCallOptions authorization + header entry of a bearer token. + """ + cdef: + CResult[pair[c_string, c_string]] result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + c_string user = tobytes(username) + c_string pw = tobytes(password) + + with nogil: + result = self.client.get().AuthenticateBasicToken(deref(c_options), + user, pw) + check_flight_status(result.status()) + + return GetResultValue(result) + + def list_actions(self, options: FlightCallOptions = None): + """List the actions available on a service.""" + cdef: + vector[CActionType] results + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + with SignalStopHandler() as stop_handler: + c_options.stop_token = \ + ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + self.client.get().ListActions(deref(c_options)).Value(&results)) + + result = [] + for action_type in results: + py_action = ActionType(frombytes(action_type.type), + frombytes(action_type.description)) + result.append(py_action) + + return result + + def do_action(self, action, options: FlightCallOptions = None): + """ + Execute an action on a service. + + Parameters + ---------- + action : str, tuple, or Action + Can be action type name (no body), type and body, or any Action + object + options : FlightCallOptions + RPC options + + Returns + ------- + results : iterator of Result values + """ + cdef: + unique_ptr[CResultStream] results + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + if isinstance(action, (str, bytes)): + action = Action(action, b'') + elif isinstance(action, tuple): + action = Action(*action) + elif not isinstance(action, Action): + raise TypeError("Action must be Action instance, string, or tuple") + + cdef CAction c_action = Action.unwrap( action) + with nogil: + check_flight_status( + self.client.get().DoAction( + deref(c_options), c_action).Value(&results)) + + def _do_action_response(): + cdef: + Result result + while True: + result = Result.__new__(Result) + with nogil: + check_flight_status(results.get().Next().Value(&result.result)) + if result.result == NULL: + break + yield result + return _do_action_response() + + def list_flights(self, criteria: bytes = None, + options: FlightCallOptions = None): + """List the flights available on a service.""" + cdef: + unique_ptr[CFlightListing] listing + FlightInfo result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CCriteria c_criteria + + if criteria: + c_criteria.expression = tobytes(criteria) + + with SignalStopHandler() as stop_handler: + c_options.stop_token = \ + ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + self.client.get().ListFlights(deref(c_options), + c_criteria).Value(&listing)) + + while True: + result = FlightInfo.__new__(FlightInfo) + with nogil: + check_flight_status(listing.get().Next().Value(&result.info)) + if result.info == NULL: + break + yield result + + def get_flight_info(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Request information about an available flight.""" + cdef: + FlightInfo result = FlightInfo.__new__(FlightInfo) + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().GetFlightInfo( + deref(c_options), c_descriptor).Value(&result.info)) + + return result + + def get_schema(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Request schema for an available flight.""" + cdef: + SchemaResult result = SchemaResult.__new__(SchemaResult) + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + with nogil: + check_status( + self.client.get() + .GetSchema(deref(c_options), c_descriptor).Value(&result.result) + ) + + return result + + def do_get(self, ticket: Ticket, options: FlightCallOptions = None): + """Request the data for a flight. + + Returns + ------- + reader : FlightStreamReader + """ + cdef: + unique_ptr[CFlightStreamReader] reader + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + with nogil: + check_flight_status( + self.client.get().DoGet( + deref(c_options), ticket.c_ticket).Value(&reader)) + result = FlightStreamReader() + result.reader.reset(reader.release()) + return result + + def do_put(self, descriptor: FlightDescriptor, Schema schema not None, + options: FlightCallOptions = None): + """Upload data to a flight. + + Returns + ------- + writer : FlightStreamWriter + reader : FlightMetadataReader + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CDoPutResult c_do_put_result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().DoPut( + deref(c_options), + c_descriptor, + c_schema).Value(&c_do_put_result)) + py_writer = FlightStreamWriter() + py_writer.writer.reset(c_do_put_result.writer.release()) + py_reader = FlightMetadataReader() + py_reader.reader.reset(c_do_put_result.reader.release()) + return py_writer, py_reader + + def do_exchange(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Start a bidirectional data exchange with a server. + + Parameters + ---------- + descriptor : FlightDescriptor + A descriptor for the flight. + options : FlightCallOptions + RPC options. + + Returns + ------- + writer : FlightStreamWriter + reader : FlightStreamReader + """ + cdef: + CDoExchangeResult c_do_exchange_result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().DoExchange( + deref(c_options), + c_descriptor).Value(&c_do_exchange_result)) + py_writer = FlightStreamWriter() + py_writer.writer.reset(c_do_exchange_result.writer.release()) + py_reader = FlightStreamReader() + py_reader.reader.reset(c_do_exchange_result.reader.release()) + return py_writer, py_reader + + def close(self): + """Close the client and disconnect.""" + client = self.client.get() + if client != NULL: + check_flight_status(client.Close()) + + def __del__(self): + # Not ideal, but close() wasn't originally present so + # applications may not be calling it + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +cdef class FlightDataStream(_Weakrefable): + """ + Abstract base class for Flight data streams. + + See Also + -------- + RecordBatchStream + GeneratorStream + """ + + cdef CFlightDataStream* to_stream(self) except *: + """Create the C++ data stream for the backing Python object. + + We don't expose the C++ object to Python, so we can manage its + lifetime from the Cython/C++ side. + """ + raise NotImplementedError + + +cdef class RecordBatchStream(FlightDataStream): + """A Flight data stream backed by RecordBatches. + + The remainder of this DoGet request will be handled in C++, + without having to acquire the GIL. + + """ + cdef: + object data_source + CIpcWriteOptions write_options + + def __init__(self, data_source, options=None): + """Create a RecordBatchStream from a data source. + + Parameters + ---------- + data_source : RecordBatchReader or Table + The data to stream to the client. + options : pyarrow.ipc.IpcWriteOptions, optional + Optional IPC options to control how to write the data. + """ + if (not isinstance(data_source, RecordBatchReader) and + not isinstance(data_source, lib.Table)): + raise TypeError("Expected RecordBatchReader or Table, " + "but got: {}".format(type(data_source))) + self.data_source = data_source + self.write_options = _get_options(options).c_options + + cdef CFlightDataStream* to_stream(self) except *: + cdef: + shared_ptr[CRecordBatchReader] reader + if isinstance(self.data_source, RecordBatchReader): + reader = ( self.data_source).reader + elif isinstance(self.data_source, lib.Table): + table = (
self.data_source).table + reader.reset(new TableBatchReader(deref(table))) + else: + raise RuntimeError("Can't construct RecordBatchStream " + "from type {}".format(type(self.data_source))) + return new CRecordBatchStream(reader, self.write_options) + + +cdef class GeneratorStream(FlightDataStream): + """A Flight data stream backed by a Python generator.""" + cdef: + shared_ptr[CSchema] schema + object generator + # A substream currently being consumed by the client, if + # present. Produced by the generator. + unique_ptr[CFlightDataStream] current_stream + CIpcWriteOptions c_options + + def __init__(self, schema, generator, options=None): + """Create a GeneratorStream from a Python generator. + + Parameters + ---------- + schema : Schema + The schema for the data to be returned. + + generator : iterator or iterable + The generator should yield other FlightDataStream objects, + Tables, RecordBatches, or RecordBatchReaders. + + options : pyarrow.ipc.IpcWriteOptions, optional + """ + self.schema = pyarrow_unwrap_schema(schema) + self.generator = iter(generator) + self.c_options = _get_options(options).c_options + + cdef CFlightDataStream* to_stream(self) except *: + cdef: + function[cb_data_stream_next] callback = &_data_stream_next + return new CPyGeneratorFlightDataStream(self, self.schema, callback, + self.c_options) + + +cdef class ServerCallContext(_Weakrefable): + """Per-call state/context.""" + cdef: + const CServerCallContext* context + + def peer_identity(self): + """Get the identity of the authenticated peer. + + May be the empty string. + """ + return tobytes(self.context.peer_identity()) + + def peer(self): + """Get the address of the peer.""" + # Set safe=True as gRPC on Windows sometimes gives garbage bytes + return frombytes(self.context.peer(), safe=True) + + def is_cancelled(self): + """Check if the current RPC call has been canceled by the client.""" + return self.context.is_cancelled() + + def add_header(self, key, value): + """Add a response header.""" + self.context.AddHeader(tobytes(key), tobytes(value)) + + def add_trailer(self, key, value): + """Add a response trailer.""" + self.context.AddTrailer(tobytes(key), tobytes(value)) + + def get_middleware(self, key): + """ + Get a middleware instance by key. + + Returns None if the middleware was not found. + """ + cdef: + CServerMiddleware* c_middleware = \ + self.context.GetMiddleware(CPyServerMiddlewareName) + CPyServerMiddleware* middleware + vector[CTracingServerMiddlewareTraceKey] c_trace_context + if c_middleware == NULL: + c_middleware = self.context.GetMiddleware(tobytes(key)) + + if c_middleware == NULL: + return None + elif c_middleware.name() == CPyServerMiddlewareName: + middleware = c_middleware + py_middleware = <_ServerMiddlewareWrapper> middleware.py_object() + return py_middleware.middleware.get(key) + elif c_middleware.name() == CTracingServerMiddlewareName: + c_trace_context = ( c_middleware + ).GetTraceContext() + trace_context = {pair.key: pair.value for pair in c_trace_context} + return TracingServerMiddleware(trace_context) + return None + + @staticmethod + cdef ServerCallContext wrap(const CServerCallContext& context): + cdef ServerCallContext result = \ + ServerCallContext.__new__(ServerCallContext) + result.context = &context + return result + + +cdef class ServerAuthReader(_Weakrefable): + """A reader for messages from the client during an auth handshake.""" + cdef: + CServerAuthReader* reader + + def read(self): + cdef c_string token + if not self.reader: + raise ValueError("Cannot use ServerAuthReader outside " + "ServerAuthHandler.authenticate") + with nogil: + check_flight_status(self.reader.Read(&token)) + return token + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.reader = NULL + + @staticmethod + cdef ServerAuthReader wrap(CServerAuthReader* reader): + cdef ServerAuthReader result = \ + ServerAuthReader.__new__(ServerAuthReader) + result.reader = reader + return result + + +cdef class ServerAuthSender(_Weakrefable): + """A writer for messages to the client during an auth handshake.""" + cdef: + CServerAuthSender* sender + + def write(self, message): + cdef c_string c_message = tobytes(message) + if not self.sender: + raise ValueError("Cannot use ServerAuthSender outside " + "ServerAuthHandler.authenticate") + with nogil: + check_flight_status(self.sender.Write(c_message)) + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.sender = NULL + + @staticmethod + cdef ServerAuthSender wrap(CServerAuthSender* sender): + cdef ServerAuthSender result = \ + ServerAuthSender.__new__(ServerAuthSender) + result.sender = sender + return result + + +cdef class ClientAuthReader(_Weakrefable): + """A reader for messages from the server during an auth handshake.""" + cdef: + CClientAuthReader* reader + + def read(self): + cdef c_string token + if not self.reader: + raise ValueError("Cannot use ClientAuthReader outside " + "ClientAuthHandler.authenticate") + with nogil: + check_flight_status(self.reader.Read(&token)) + return token + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.reader = NULL + + @staticmethod + cdef ClientAuthReader wrap(CClientAuthReader* reader): + cdef ClientAuthReader result = \ + ClientAuthReader.__new__(ClientAuthReader) + result.reader = reader + return result + + +cdef class ClientAuthSender(_Weakrefable): + """A writer for messages to the server during an auth handshake.""" + cdef: + CClientAuthSender* sender + + def write(self, message): + cdef c_string c_message = tobytes(message) + if not self.sender: + raise ValueError("Cannot use ClientAuthSender outside " + "ClientAuthHandler.authenticate") + with nogil: + check_flight_status(self.sender.Write(c_message)) + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.sender = NULL + + @staticmethod + cdef ClientAuthSender wrap(CClientAuthSender* sender): + cdef ClientAuthSender result = \ + ClientAuthSender.__new__(ClientAuthSender) + result.sender = sender + return result + + +cdef CStatus _data_stream_next(void* self, CFlightPayload* payload) except *: + """Callback for implementing FlightDataStream in Python.""" + cdef: + unique_ptr[CFlightDataStream] data_stream + + py_stream = self + if not isinstance(py_stream, GeneratorStream): + raise RuntimeError("self object in callback is not GeneratorStream") + stream = py_stream + + # The generator is allowed to yield a reader or table which we + # yield from; if that sub-generator is empty, we need to reset and + # try again. However, limit the number of attempts so that we + # don't just spin forever. + max_attempts = 128 + for _ in range(max_attempts): + if stream.current_stream != nullptr: + with nogil: + check_flight_status( + stream.current_stream.get().Next().Value(payload)) + # If the stream ended, see if there's another stream from the + # generator + if payload.ipc_message.metadata != nullptr: + return CStatus_OK() + stream.current_stream.reset(nullptr) + + try: + result = next(stream.generator) + except StopIteration: + payload.ipc_message.metadata.reset( nullptr) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + if isinstance(result, (list, tuple)): + result, metadata = result + else: + result, metadata = result, None + + if isinstance(result, (Table, RecordBatchReader)): + if metadata: + raise ValueError("Can only return metadata alongside a " + "RecordBatch.") + result = RecordBatchStream(result) + + stream_schema = pyarrow_wrap_schema(stream.schema) + if isinstance(result, FlightDataStream): + if metadata: + raise ValueError("Can only return metadata alongside a " + "RecordBatch.") + data_stream = unique_ptr[CFlightDataStream]( + ( result).to_stream()) + substream_schema = pyarrow_wrap_schema(data_stream.get().schema()) + if substream_schema != stream_schema: + raise ValueError("Got a FlightDataStream whose schema " + "does not match the declared schema of this " + "GeneratorStream. " + "Got: {}\nExpected: {}".format( + substream_schema, stream_schema)) + stream.current_stream.reset( + new CPyFlightDataStream(result, move(data_stream))) + # Loop around and try again + continue + elif isinstance(result, RecordBatch): + batch = result + if batch.schema != stream_schema: + raise ValueError("Got a RecordBatch whose schema does not " + "match the declared schema of this " + "GeneratorStream. " + "Got: {}\nExpected: {}".format(batch.schema, + stream_schema)) + check_flight_status(GetRecordBatchPayload( + deref(batch.batch), + stream.c_options, + &payload.ipc_message)) + if metadata: + payload.app_metadata = pyarrow_unwrap_buffer( + as_buffer(metadata)) + else: + raise TypeError("GeneratorStream must be initialized with " + "an iterator of FlightDataStream, Table, " + "RecordBatch, or RecordBatchStreamReader objects, " + "not {}.".format(type(result))) + # Don't loop around + return CStatus_OK() + # Ran out of attempts (the RPC handler kept yielding empty tables/readers) + raise RuntimeError("While getting next payload, ran out of attempts to " + "get something to send " + "(application server implementation error)") + + +cdef CStatus _list_flights(void* self, const CServerCallContext& context, + const CCriteria* c_criteria, + unique_ptr[CFlightListing]* listing) except *: + """Callback for implementing ListFlights in Python.""" + cdef: + vector[CFlightInfo] flights + + try: + result = ( self).list_flights(ServerCallContext.wrap(context), + c_criteria.expression) + for info in result: + if not isinstance(info, FlightInfo): + raise TypeError("FlightServerBase.list_flights must return " + "FlightInfo instances, but got {}".format( + type(info))) + flights.push_back(deref(( info).info.get())) + listing.reset(new CSimpleFlightListing(flights)) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _get_flight_info(void* self, const CServerCallContext& context, + CFlightDescriptor c_descriptor, + unique_ptr[CFlightInfo]* info) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + FlightDescriptor py_descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + py_descriptor.descriptor = c_descriptor + try: + result = ( self).get_flight_info( + ServerCallContext.wrap(context), + py_descriptor) + except FlightError as flight_error: + return ( flight_error).to_status() + if not isinstance(result, FlightInfo): + raise TypeError("FlightServerBase.get_flight_info must return " + "a FlightInfo instance, but got {}".format( + type(result))) + info.reset(new CFlightInfo(deref(( result).info.get()))) + return CStatus_OK() + +cdef CStatus _get_schema(void* self, const CServerCallContext& context, + CFlightDescriptor c_descriptor, + unique_ptr[CSchemaResult]* info) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + FlightDescriptor py_descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + py_descriptor.descriptor = c_descriptor + result = ( self).get_schema(ServerCallContext.wrap(context), + py_descriptor) + if not isinstance(result, SchemaResult): + raise TypeError("FlightServerBase.get_schema_info must return " + "a SchemaResult instance, but got {}".format( + type(result))) + info.reset(new CSchemaResult(deref(( result).result.get()))) + return CStatus_OK() + +cdef CStatus _do_put(void* self, const CServerCallContext& context, + unique_ptr[CFlightMessageReader] reader, + unique_ptr[CFlightMetadataWriter] writer) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + MetadataRecordBatchReader py_reader = MetadataRecordBatchReader() + FlightMetadataWriter py_writer = FlightMetadataWriter() + FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + + descriptor.descriptor = reader.get().descriptor() + py_reader.reader.reset(reader.release()) + py_writer.writer.reset(writer.release()) + try: + ( self).do_put(ServerCallContext.wrap(context), descriptor, + py_reader, py_writer) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + +cdef CStatus _do_get(void* self, const CServerCallContext& context, + CTicket ticket, + unique_ptr[CFlightDataStream]* stream) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + unique_ptr[CFlightDataStream] data_stream + + py_ticket = Ticket(ticket.ticket) + try: + result = ( self).do_get(ServerCallContext.wrap(context), + py_ticket) + except FlightError as flight_error: + return ( flight_error).to_status() + if not isinstance(result, FlightDataStream): + raise TypeError("FlightServerBase.do_get must return " + "a FlightDataStream") + data_stream = unique_ptr[CFlightDataStream]( + ( result).to_stream()) + stream[0] = unique_ptr[CFlightDataStream]( + new CPyFlightDataStream(result, move(data_stream))) + return CStatus_OK() + + +cdef CStatus _do_exchange(void* self, const CServerCallContext& context, + unique_ptr[CFlightMessageReader] reader, + unique_ptr[CFlightMessageWriter] writer) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + MetadataRecordBatchReader py_reader = MetadataRecordBatchReader() + MetadataRecordBatchWriter py_writer = MetadataRecordBatchWriter() + FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + + descriptor.descriptor = reader.get().descriptor() + py_reader.reader.reset(reader.release()) + py_writer.writer.reset(writer.release()) + try: + ( self).do_exchange(ServerCallContext.wrap(context), + descriptor, py_reader, py_writer) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + +cdef CStatus _do_action_result_next( + void* self, + unique_ptr[CFlightResult]* result +) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + CFlightResult* c_result + + try: + action_result = next( self) + if not isinstance(action_result, Result): + action_result = Result(action_result) + c_result = ( action_result).result.get() + result.reset(new CFlightResult(deref(c_result))) + except StopIteration: + result.reset(nullptr) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _do_action(void* self, const CServerCallContext& context, + const CAction& action, + unique_ptr[CResultStream]* result) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + function[cb_result_next] ptr = &_do_action_result_next + py_action = Action(action.type, pyarrow_wrap_buffer(action.body)) + try: + responses = ( self).do_action(ServerCallContext.wrap(context), + py_action) + except FlightError as flight_error: + return ( flight_error).to_status() + # Let the application return an iterator or anything convertible + # into one + if responses is None: + # Server didn't return anything + responses = [] + result.reset(new CPyFlightResultStream(iter(responses), ptr)) + return CStatus_OK() + + +cdef CStatus _list_actions(void* self, const CServerCallContext& context, + vector[CActionType]* actions) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + CActionType action_type + # Method should return a list of ActionTypes or similar tuple + try: + result = ( self).list_actions(ServerCallContext.wrap(context)) + for action in result: + if not isinstance(action, tuple): + raise TypeError( + "Results of list_actions must be ActionType or tuple") + action_type.type = tobytes(action[0]) + action_type.description = tobytes(action[1]) + actions.push_back(action_type) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _server_authenticate(void* self, CServerAuthSender* outgoing, + CServerAuthReader* incoming) except *: + """Callback for implementing authentication in Python.""" + sender = ServerAuthSender.wrap(outgoing) + reader = ServerAuthReader.wrap(incoming) + try: + ( self).authenticate(sender, reader) + except FlightError as flight_error: + return ( flight_error).to_status() + finally: + sender.poison() + reader.poison() + return CStatus_OK() + +cdef CStatus _is_valid(void* self, const c_string& token, + c_string* peer_identity) except *: + """Callback for implementing authentication in Python.""" + cdef c_string c_result + try: + c_result = tobytes(( self).is_valid(token)) + peer_identity[0] = c_result + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _client_authenticate(void* self, CClientAuthSender* outgoing, + CClientAuthReader* incoming) except *: + """Callback for implementing authentication in Python.""" + sender = ClientAuthSender.wrap(outgoing) + reader = ClientAuthReader.wrap(incoming) + try: + ( self).authenticate(sender, reader) + except FlightError as flight_error: + return ( flight_error).to_status() + finally: + sender.poison() + reader.poison() + return CStatus_OK() + + +cdef CStatus _get_token(void* self, c_string* token) except *: + """Callback for implementing authentication in Python.""" + cdef c_string c_result + try: + c_result = tobytes(( self).get_token()) + token[0] = c_result + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _middleware_sending_headers( + void* self, CAddCallHeaders* add_headers) except *: + """Callback for implementing middleware.""" + try: + headers = ( self).sending_headers() + except FlightError as flight_error: + return ( flight_error).to_status() + + if headers: + for header, values in headers.items(): + if isinstance(values, (str, bytes)): + values = (values,) + # Headers in gRPC (and HTTP/1, HTTP/2) are required to be + # valid, lowercase ASCII. + header = header.lower() + if isinstance(header, str): + header = header.encode("ascii") + for value in values: + if isinstance(value, str): + value = value.encode("ascii") + # Allow bytes values to pass through. + add_headers.AddHeader(header, value) + + return CStatus_OK() + + +cdef CStatus _middleware_call_completed( + void* self, + const CStatus& call_status) except *: + """Callback for implementing middleware.""" + try: + try: + check_flight_status(call_status) + except Exception as e: + ( self).call_completed(e) + else: + ( self).call_completed(None) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _middleware_received_headers( + void* self, + const CCallHeaders& c_headers) except *: + """Callback for implementing middleware.""" + try: + headers = convert_headers(c_headers) + ( self).received_headers(headers) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef dict convert_headers(const CCallHeaders& c_headers): + cdef: + CCallHeaders.const_iterator header_iter = c_headers.cbegin() + headers = {} + while header_iter != c_headers.cend(): + header = c_string(deref(header_iter).first).decode("ascii") + value = c_string(deref(header_iter).second) + if not header.endswith("-bin"): + # Text header values in gRPC (and HTTP/1, HTTP/2) are + # required to be valid ASCII. Binary header values are + # exposed as bytes. + value = value.decode("ascii") + headers.setdefault(header, []).append(value) + postincrement(header_iter) + return headers + + +cdef CStatus _server_middleware_start_call( + void* self, + const CCallInfo& c_info, + const CCallHeaders& c_headers, + shared_ptr[CServerMiddleware]* c_instance) except *: + """Callback for implementing server middleware.""" + instance = None + try: + call_info = wrap_call_info(c_info) + headers = convert_headers(c_headers) + instance = ( self).start_call(call_info, headers) + except FlightError as flight_error: + return ( flight_error).to_status() + + if instance: + ServerMiddleware.wrap(instance, c_instance) + + return CStatus_OK() + + +cdef CStatus _client_middleware_start_call( + void* self, + const CCallInfo& c_info, + unique_ptr[CClientMiddleware]* c_instance) except *: + """Callback for implementing client middleware.""" + instance = None + try: + call_info = wrap_call_info(c_info) + instance = ( self).start_call(call_info) + except FlightError as flight_error: + return ( flight_error).to_status() + + if instance: + ClientMiddleware.wrap(instance, c_instance) + + return CStatus_OK() + + +cdef class ServerAuthHandler(_Weakrefable): + """Authentication middleware for a server. + + To implement an authentication mechanism, subclass this class and + override its methods. + + """ + + def authenticate(self, outgoing, incoming): + """Conduct the handshake with the client. + + May raise an error if the client cannot authenticate. + + Parameters + ---------- + outgoing : ServerAuthSender + A channel to send messages to the client. + incoming : ServerAuthReader + A channel to read messages from the client. + """ + raise NotImplementedError + + def is_valid(self, token): + """Validate a client token, returning their identity. + + May return an empty string (if the auth mechanism does not + name the peer) or raise an exception (if the token is + invalid). + + Parameters + ---------- + token : bytes + The authentication token from the client. + + """ + raise NotImplementedError + + cdef PyServerAuthHandler* to_handler(self): + cdef PyServerAuthHandlerVtable vtable + vtable.authenticate = _server_authenticate + vtable.is_valid = _is_valid + return new PyServerAuthHandler(self, vtable) + + +cdef class ClientAuthHandler(_Weakrefable): + """Authentication plugin for a client.""" + + def authenticate(self, outgoing, incoming): + """Conduct the handshake with the server. + + Parameters + ---------- + outgoing : ClientAuthSender + A channel to send messages to the server. + incoming : ClientAuthReader + A channel to read messages from the server. + """ + raise NotImplementedError + + def get_token(self): + """Get the auth token for a call.""" + raise NotImplementedError + + cdef PyClientAuthHandler* to_handler(self): + cdef PyClientAuthHandlerVtable vtable + vtable.authenticate = _client_authenticate + vtable.get_token = _get_token + return new PyClientAuthHandler(self, vtable) + + +_CallInfo = collections.namedtuple("_CallInfo", ["method"]) + + +class CallInfo(_CallInfo): + """Information about a particular RPC for Flight middleware.""" + + +cdef wrap_call_info(const CCallInfo& c_info): + method = wrap_flight_method(c_info.method) + return CallInfo(method=method) + + +cdef class ClientMiddlewareFactory(_Weakrefable): + """A factory for new middleware instances. + + All middleware methods will be called from the same thread as the + RPC method implementation. That is, thread-locals set in the + client are accessible from the middleware itself. + + """ + + def start_call(self, info): + """Called at the start of an RPC. + + This must be thread-safe and must not raise exceptions. + + Parameters + ---------- + info : CallInfo + Information about the call. + + Returns + ------- + instance : ClientMiddleware + An instance of ClientMiddleware (the instance to use for + the call), or None if this call is not intercepted. + + """ + + +cdef class ClientMiddleware(_Weakrefable): + """Client-side middleware for a call, instantiated per RPC. + + Methods here should be fast and must be infallible: they should + not raise exceptions or stall indefinitely. + + """ + + def sending_headers(self): + """A callback before headers are sent. + + Returns + ------- + headers : dict + A dictionary of header values to add to the request, or + None if no headers are to be added. The dictionary should + have string keys and string or list-of-string values. + + Bytes values are allowed, but the underlying transport may + not support them or may restrict them. For gRPC, binary + values are only allowed on headers ending in "-bin". + + Header names must be lowercase ASCII. + + """ + + def received_headers(self, headers): + """A callback when headers are received. + + The default implementation does nothing. + + Parameters + ---------- + headers : dict + A dictionary of headers from the server. Keys are strings + and values are lists of strings (for text headers) or + bytes (for binary headers). + + """ + + def call_completed(self, exception): + """A callback when the call finishes. + + The default implementation does nothing. + + Parameters + ---------- + exception : ArrowException + If the call errored, this is the equivalent + exception. Will be None if the call succeeded. + + """ + + @staticmethod + cdef void wrap(object py_middleware, + unique_ptr[CClientMiddleware]* c_instance): + cdef PyClientMiddlewareVtable vtable + vtable.sending_headers = _middleware_sending_headers + vtable.received_headers = _middleware_received_headers + vtable.call_completed = _middleware_call_completed + c_instance[0].reset(new CPyClientMiddleware(py_middleware, vtable)) + + +cdef class ServerMiddlewareFactory(_Weakrefable): + """A factory for new middleware instances. + + All middleware methods will be called from the same thread as the + RPC method implementation. That is, thread-locals set in the + middleware are accessible from the method itself. + + """ + + def start_call(self, info, headers): + """Called at the start of an RPC. + + This must be thread-safe. + + Parameters + ---------- + info : CallInfo + Information about the call. + headers : dict + A dictionary of headers from the client. Keys are strings + and values are lists of strings (for text headers) or + bytes (for binary headers). + + Returns + ------- + instance : ServerMiddleware + An instance of ServerMiddleware (the instance to use for + the call), or None if this call is not intercepted. + + Raises + ------ + exception : pyarrow.ArrowException + If an exception is raised, the call will be rejected with + the given error. + + """ + + +cdef class TracingServerMiddlewareFactory(ServerMiddlewareFactory): + """A factory for tracing middleware instances. + + This enables OpenTelemetry support in Arrow (if Arrow was compiled + with OpenTelemetry support enabled). A new span will be started on + each RPC call. The TracingServerMiddleware instance can then be + retrieved within an RPC handler to get the propagated context, + which can be used to start a new span on the Python side. + + Because the Python/C++ OpenTelemetry libraries do not + interoperate, spans on the C++ side are not directly visible to + the Python side and vice versa. + + """ + + +cdef class ServerMiddleware(_Weakrefable): + """Server-side middleware for a call, instantiated per RPC. + + Methods here should be fast and must be infallible: they should + not raise exceptions or stall indefinitely. + + """ + + def sending_headers(self): + """A callback before headers are sent. + + Returns + ------- + headers : dict + A dictionary of header values to add to the response, or + None if no headers are to be added. The dictionary should + have string keys and string or list-of-string values. + + Bytes values are allowed, but the underlying transport may + not support them or may restrict them. For gRPC, binary + values are only allowed on headers ending in "-bin". + + Header names must be lowercase ASCII. + + """ + + def call_completed(self, exception): + """A callback when the call finishes. + + Parameters + ---------- + exception : pyarrow.ArrowException + If the call errored, this is the equivalent + exception. Will be None if the call succeeded. + + """ + + @staticmethod + cdef void wrap(object py_middleware, + shared_ptr[CServerMiddleware]* c_instance): + cdef PyServerMiddlewareVtable vtable + vtable.sending_headers = _middleware_sending_headers + vtable.call_completed = _middleware_call_completed + c_instance[0].reset(new CPyServerMiddleware(py_middleware, vtable)) + + +class TracingServerMiddleware(ServerMiddleware): + __slots__ = ["trace_context"] + + def __init__(self, trace_context): + self.trace_context = trace_context + + +cdef class _ServerMiddlewareFactoryWrapper(ServerMiddlewareFactory): + """Wrapper to bundle server middleware into a single C++ one.""" + + cdef: + dict factories + + def __init__(self, dict factories): + self.factories = factories + + def start_call(self, info, headers): + instances = {} + for key, factory in self.factories.items(): + instance = factory.start_call(info, headers) + if instance: + # TODO: prevent duplicate keys + instances[key] = instance + if instances: + wrapper = _ServerMiddlewareWrapper(instances) + return wrapper + return None + + +cdef class _ServerMiddlewareWrapper(ServerMiddleware): + cdef: + dict middleware + + def __init__(self, dict middleware): + self.middleware = middleware + + def sending_headers(self): + headers = collections.defaultdict(list) + for instance in self.middleware.values(): + more_headers = instance.sending_headers() + if not more_headers: + continue + # Manually merge with existing headers (since headers are + # multi-valued) + for key, values in more_headers.items(): + # ARROW-16606 gRPC aborts given non-lowercase headers + key = key.lower() + if isinstance(values, (bytes, str)): + values = (values,) + headers[key].extend(values) + return headers + + def call_completed(self, exception): + for instance in self.middleware.values(): + instance.call_completed(exception) + + +cdef class _FlightServerFinalizer(_Weakrefable): + """ + A finalizer that shuts down the server on destruction. + + See ARROW-16597. If the server is still active at interpreter + exit, the process may segfault. + """ + + cdef: + shared_ptr[PyFlightServer] server + + def finalize(self): + cdef: + PyFlightServer* server = self.server.get() + CStatus status + if server == NULL: + return + try: + with nogil: + status = server.Shutdown() + if status.ok(): + status = server.Wait() + check_flight_status(status) + finally: + self.server.reset() + + +cdef class FlightServerBase(_Weakrefable): + """A Flight service definition. + + To start the server, create an instance of this class with an + appropriate location. The server will be running as soon as the + instance is created; it is not required to call :meth:`serve`. + + Override methods to define your Flight service. + + Parameters + ---------- + location : str, tuple or Location optional, default None + Location to serve on. Either a gRPC URI like `grpc://localhost:port`, + a tuple of (host, port) pair, or a Location instance. + If None is passed then the server will be started on localhost with a + system provided random port. + auth_handler : ServerAuthHandler optional, default None + An authentication mechanism to use. May be None. + tls_certificates : list optional, default None + A list of (certificate, key) pairs. + verify_client : boolean optional, default False + If True, then enable mutual TLS: require the client to present + a client certificate, and validate the certificate. + root_certificates : bytes optional, default None + If enabling mutual TLS, this specifies the PEM-encoded root + certificate used to validate client certificates. + middleware : dict optional, default None + A dictionary of :class:`ServerMiddlewareFactory` instances. The + string keys can be used to retrieve the middleware instance within + RPC handlers (see :meth:`ServerCallContext.get_middleware`). + + """ + + cdef: + shared_ptr[PyFlightServer] server + object finalizer + + def __init__(self, location=None, auth_handler=None, + tls_certificates=None, verify_client=None, + root_certificates=None, middleware=None): + self.finalizer = None + if isinstance(location, (bytes, str)): + location = Location(location) + elif isinstance(location, (tuple, type(None))): + if location is None: + location = ('localhost', 0) + host, port = location + if tls_certificates: + location = Location.for_grpc_tls(host, port) + else: + location = Location.for_grpc_tcp(host, port) + elif not isinstance(location, Location): + raise TypeError('`location` argument must be a string, tuple or a ' + 'Location instance') + self.init(location, auth_handler, tls_certificates, verify_client, + tobytes(root_certificates or b""), middleware) + + cdef init(self, Location location, ServerAuthHandler auth_handler, + list tls_certificates, c_bool verify_client, + bytes root_certificates, dict middleware): + cdef: + PyFlightServerVtable vtable = PyFlightServerVtable() + PyFlightServer* c_server + unique_ptr[CFlightServerOptions] c_options + CCertKeyPair c_cert + function[cb_server_middleware_start_call] start_call = \ + &_server_middleware_start_call + pair[c_string, shared_ptr[CServerMiddlewareFactory]] c_middleware + + c_options.reset(new CFlightServerOptions(Location.unwrap(location))) + # mTLS configuration + c_options.get().verify_client = verify_client + c_options.get().root_certificates = root_certificates + + if auth_handler: + if not isinstance(auth_handler, ServerAuthHandler): + raise TypeError("auth_handler must be a ServerAuthHandler, " + "not a '{}'".format(type(auth_handler))) + c_options.get().auth_handler.reset( + ( auth_handler).to_handler()) + + if tls_certificates: + for cert, key in tls_certificates: + c_cert.pem_cert = tobytes(cert) + c_cert.pem_key = tobytes(key) + c_options.get().tls_certificates.push_back(c_cert) + + if middleware: + non_tracing_middleware = {} + enable_tracing = None + for key, factory in middleware.items(): + if isinstance(factory, TracingServerMiddlewareFactory): + if enable_tracing is not None: + raise ValueError( + "Can only provide " + "TracingServerMiddlewareFactory once") + if tobytes(key) == CPyServerMiddlewareName: + raise ValueError(f"Middleware key cannot be {key}") + enable_tracing = key + else: + non_tracing_middleware[key] = factory + + if enable_tracing: + c_middleware.first = tobytes(enable_tracing) + c_middleware.second = MakeTracingServerMiddlewareFactory() + c_options.get().middleware.push_back(c_middleware) + + py_middleware = _ServerMiddlewareFactoryWrapper( + non_tracing_middleware) + c_middleware.first = CPyServerMiddlewareName + c_middleware.second.reset(new CPyServerMiddlewareFactory( + py_middleware, + start_call)) + c_options.get().middleware.push_back(c_middleware) + + vtable.list_flights = &_list_flights + vtable.get_flight_info = &_get_flight_info + vtable.get_schema = &_get_schema + vtable.do_put = &_do_put + vtable.do_get = &_do_get + vtable.do_exchange = &_do_exchange + vtable.list_actions = &_list_actions + vtable.do_action = &_do_action + + c_server = new PyFlightServer(self, vtable) + self.server.reset(c_server) + with nogil: + check_flight_status(c_server.Init(deref(c_options))) + cdef _FlightServerFinalizer finalizer = _FlightServerFinalizer() + finalizer.server = self.server + self.finalizer = weakref.finalize(self, finalizer.finalize) + + @property + def port(self): + """ + Get the port that this server is listening on. + + Returns a non-positive value if the operation is invalid + (e.g. init() was not called or server is listening on a domain + socket). + """ + return self.server.get().port() + + def list_flights(self, context, criteria): + """List flights available on this service. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + criteria : bytes + Filter criteria provided by the client. + + Returns + ------- + iterator of FlightInfo + + """ + raise NotImplementedError + + def get_flight_info(self, context, descriptor): + """Get information about a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + + Returns + ------- + FlightInfo + + """ + raise NotImplementedError + + def get_schema(self, context, descriptor): + """Get the schema of a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + + Returns + ------- + Schema + + """ + raise NotImplementedError + + def do_put(self, context, descriptor, reader: MetadataRecordBatchReader, + writer: FlightMetadataWriter): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + reader : MetadataRecordBatchReader + A reader for data uploaded by the client. + writer : FlightMetadataWriter + A writer to send responses to the client. + + """ + raise NotImplementedError + + def do_get(self, context, ticket): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + ticket : Ticket + The ticket for the flight. + + Returns + ------- + FlightDataStream + A stream of data to send back to the client. + + """ + raise NotImplementedError + + def do_exchange(self, context, descriptor, reader, writer): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + reader : MetadataRecordBatchReader + A reader for data uploaded by the client. + writer : MetadataRecordBatchWriter + A writer to send responses to the client. + + """ + raise NotImplementedError + + def list_actions(self, context): + """List custom actions available on this server. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + + Returns + ------- + iterator of ActionType or tuple + + """ + raise NotImplementedError + + def do_action(self, context, action): + """Execute a custom action. + + This method should return an iterator, or it should be a + generator. Applications should override this method to + implement their own behavior. The default method raises a + NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + action : Action + The action to execute. + + Returns + ------- + iterator of bytes + + """ + raise NotImplementedError + + def serve(self): + """Block until the server shuts down. + + This method only returns if shutdown() is called or a signal is + received. + """ + if self.server.get() == nullptr: + raise ValueError("run() on uninitialized FlightServerBase") + with nogil: + check_flight_status(self.server.get().ServeWithSignals()) + + def run(self): + """Block until the server shuts down. + + .. deprecated:: 0.15.0 + Use the ``FlightServer.serve`` method instead + """ + warnings.warn("The 'FlightServer.run' method is deprecated, use " + "FlightServer.serve method instead") + self.serve() + + def shutdown(self): + """Shut down the server, blocking until current requests finish. + + Do not call this directly from the implementation of a Flight + method, as then the server will block forever waiting for that + request to finish. Instead, call this method from a background + thread. + + This method should only be called once. + """ + # Must not hold the GIL: shutdown waits for pending RPCs to + # complete. Holding the GIL means Python-implemented Flight + # methods will never get to run, so this will hang + # indefinitely. + if self.server.get() == nullptr: + raise ValueError("shutdown() on uninitialized FlightServerBase") + with nogil: + check_flight_status(self.server.get().Shutdown()) + + def wait(self): + """Block until server is terminated with shutdown.""" + with nogil: + self.server.get().Wait() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.finalizer: + self.finalizer() + + +def connect(location, **kwargs): + """ + Connect to a Flight server. + + Parameters + ---------- + location : str, tuple, or Location + Location to connect to. Either a URI like "grpc://localhost:port", + a tuple of (host, port), or a Location instance. + tls_root_certs : bytes or None + PEM-encoded. + cert_chain: str or None + If provided, enables TLS mutual authentication. + private_key: str or None + If provided, enables TLS mutual authentication. + override_hostname : str or None + Override the hostname checked by TLS. Insecure, use with caution. + middleware : list or None + A list of ClientMiddlewareFactory instances to apply. + write_size_limit_bytes : int or None + A soft limit on the size of a data payload sent to the + server. Enabled if positive. If enabled, writing a record + batch that (when serialized) exceeds this limit will raise an + exception; the client can retry the write with a smaller + batch. + disable_server_verification : boolean or None + Disable verifying the server when using TLS. + Insecure, use with caution. + generic_options : list or None + A list of generic (string, int or string) options to pass to + the underlying transport. + + Returns + ------- + client : FlightClient + """ + return FlightClient(location, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4504b78b837ea8c7ec309510a319bde7b8dd3bc1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_fs.pxd @@ -0,0 +1,94 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow.lib import _detect_compression, frombytes, tobytes +from pyarrow.lib cimport * + + +cpdef enum FileType: + NotFound = CFileType_NotFound + Unknown = CFileType_Unknown + File = CFileType_File + Directory = CFileType_Directory + + +cdef class FileInfo(_Weakrefable): + cdef: + CFileInfo info + + @staticmethod + cdef wrap(CFileInfo info) + + cdef inline CFileInfo unwrap(self) nogil + + @staticmethod + cdef CFileInfo unwrap_safe(obj) + + +cdef class FileSelector(_Weakrefable): + cdef: + CFileSelector selector + + @staticmethod + cdef FileSelector wrap(CFileSelector selector) + + cdef inline CFileSelector unwrap(self) nogil + + +cdef class FileSystem(_Weakrefable): + cdef: + shared_ptr[CFileSystem] wrapped + CFileSystem* fs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + @staticmethod + cdef wrap(const shared_ptr[CFileSystem]& sp) + + cdef inline shared_ptr[CFileSystem] unwrap(self) nogil + + +cdef class LocalFileSystem(FileSystem): + cdef: + CLocalFileSystem* localfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class SubTreeFileSystem(FileSystem): + cdef: + CSubTreeFileSystem* subtreefs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class _MockFileSystem(FileSystem): + cdef: + CMockFileSystem* mockfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) + + +cdef class PyFileSystem(FileSystem): + cdef: + CPyFileSystem* pyfs + + cdef init(self, const shared_ptr[CFileSystem]& wrapped) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..5e69413cea953639e36ba5485cb383b88193748b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_gcsfs.pyx @@ -0,0 +1,212 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + +from pyarrow.lib cimport (pyarrow_wrap_metadata, + pyarrow_unwrap_metadata) +from pyarrow.lib import frombytes, tobytes, ensure_metadata +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem, TimePoint_to_ns, PyDateTime_to_TimePoint + +from datetime import datetime, timedelta, timezone + + +cdef class GcsFileSystem(FileSystem): + """ + Google Cloud Storage (GCS) backed FileSystem implementation + + By default uses the process described in https://google.aip.dev/auth/4110 + to resolve credentials. If not running on Google Cloud Platform (GCP), + this generally requires the environment variable + GOOGLE_APPLICATION_CREDENTIALS to point to a JSON file + containing credentials. + + Note: GCS buckets are special and the operations available on them may be + limited or more expensive than expected compared to local file systems. + + Note: When pickling a GcsFileSystem that uses default credentials, resolution + credentials are not stored in the serialized data. Therefore, when unpickling + it is assumed that the necessary credentials are in place for the target + process. + + Parameters + ---------- + anonymous : boolean, default False + Whether to connect anonymously. + If true, will not attempt to look up credentials using standard GCP + configuration methods. + access_token : str, default None + GCP access token. If provided, temporary credentials will be fetched by + assuming this role; also, a `credential_token_expiration` must be + specified as well. + target_service_account : str, default None + An optional service account to try to impersonate when accessing GCS. This + requires the specified credential user or service account to have the necessary + permissions. + credential_token_expiration : datetime, default None + Expiration for credential generated with an access token. Must be specified + if `access_token` is specified. + default_bucket_location : str, default 'US' + GCP region to create buckets in. + scheme : str, default 'https' + GCS connection transport scheme. + endpoint_override : str, default None + Override endpoint with a connect string such as "localhost:9000" + default_metadata : mapping or pyarrow.KeyValueMetadata, default None + Default metadata for `open_output_stream`. This will be ignored if + non-empty metadata is passed to `open_output_stream`. + retry_time_limit : timedelta, default None + Set the maximum amount of time the GCS client will attempt to retry + transient errors. Subsecond granularity is ignored. + project_id : str, default None + The GCP project identifier to use for creating buckets. + If not set, the library uses the GOOGLE_CLOUD_PROJECT environment + variable. Most I/O operations do not need a project id, only applications + that create new buckets need a project id. + """ + + cdef: + CGcsFileSystem* gcsfs + + def __init__(self, *, bint anonymous=False, access_token=None, + target_service_account=None, credential_token_expiration=None, + default_bucket_location='US', + scheme=None, + endpoint_override=None, + default_metadata=None, + retry_time_limit=None, + project_id=None): + cdef: + CGcsOptions options + shared_ptr[CGcsFileSystem] wrapped + double time_limit_seconds + + # Intentional use of truthiness because empty strings aren't valid and + # for reconstruction from pickling will give empty strings. + if anonymous and (target_service_account or access_token): + raise ValueError( + 'anonymous option is not compatible with target_service_account and ' + 'access_token' + ) + elif bool(access_token) != bool(credential_token_expiration): + raise ValueError( + 'access_token and credential_token_expiration must be ' + 'specified together' + ) + + elif anonymous: + options = CGcsOptions.Anonymous() + elif access_token: + if not isinstance(credential_token_expiration, datetime): + raise ValueError( + "credential_token_expiration must be a datetime") + options = CGcsOptions.FromAccessToken( + tobytes(access_token), + PyDateTime_to_TimePoint(credential_token_expiration)) + else: + options = CGcsOptions.Defaults() + + # Target service account requires base credentials so + # it is not part of the if/else chain above which only + # handles base credentials. + if target_service_account: + options = CGcsOptions.FromImpersonatedServiceAccount( + options.credentials, tobytes(target_service_account)) + + options.default_bucket_location = tobytes(default_bucket_location) + + if scheme is not None: + options.scheme = tobytes(scheme) + if endpoint_override is not None: + options.endpoint_override = tobytes(endpoint_override) + if default_metadata is not None: + options.default_metadata = pyarrow_unwrap_metadata( + ensure_metadata(default_metadata)) + if retry_time_limit is not None: + time_limit_seconds = retry_time_limit.total_seconds() + options.retry_limit_seconds = time_limit_seconds + if project_id is not None: + options.project_id = tobytes(project_id) + + with nogil: + wrapped = GetResultValue(CGcsFileSystem.Make(options)) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.gcsfs = wrapped.get() + + def _expiration_datetime_from_options(self): + expiration_ns = TimePoint_to_ns( + self.gcsfs.options().credentials.expiration()) + if expiration_ns == 0: + return None + return datetime.fromtimestamp(expiration_ns / 1.0e9, timezone.utc) + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return GcsFileSystem(**kwargs) + + def __reduce__(self): + cdef CGcsOptions opts = self.gcsfs.options() + service_account = frombytes(opts.credentials.target_service_account()) + expiration_dt = self._expiration_datetime_from_options() + retry_time_limit = None + if opts.retry_limit_seconds.has_value(): + retry_time_limit = timedelta( + seconds=opts.retry_limit_seconds.value()) + project_id = None + if opts.project_id.has_value(): + project_id = frombytes(opts.project_id.value()) + return ( + GcsFileSystem._reconstruct, (dict( + access_token=frombytes(opts.credentials.access_token()), + anonymous=opts.credentials.anonymous(), + credential_token_expiration=expiration_dt, + target_service_account=service_account, + scheme=frombytes(opts.scheme), + endpoint_override=frombytes(opts.endpoint_override), + default_bucket_location=frombytes( + opts.default_bucket_location), + default_metadata=pyarrow_wrap_metadata(opts.default_metadata), + retry_time_limit=retry_time_limit, + project_id=project_id + ),)) + + @property + def default_bucket_location(self): + """ + The GCP location this filesystem will write to. + """ + return frombytes(self.gcsfs.options().default_bucket_location) + + @property + def project_id(self): + """ + The GCP project id this filesystem will use. + """ + if self.gcsfs.options().project_id.has_value(): + return frombytes(self.gcsfs.options().project_id.value()) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_generated_version.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/_generated_version.py new file mode 100644 index 0000000000000000000000000000000000000000..3f36717259eed1ce155222ab6b8e9743ce191b34 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_generated_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '15.0.2' +__version_tuple__ = version_tuple = (15, 0, 2) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c426337a12ec184feb2d699e1e685228c249466e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfs.pyx @@ -0,0 +1,160 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem + +from pyarrow.lib import frombytes, tobytes +from pyarrow.util import _stringify_path + + +cdef class HadoopFileSystem(FileSystem): + """ + HDFS backed FileSystem implementation + + Parameters + ---------- + host : str + HDFS host to connect to. Set to "default" for fs.defaultFS from + core-site.xml. + port : int, default 8020 + HDFS port to connect to. Set to 0 for default or logical (HA) nodes. + user : str, default None + Username when connecting to HDFS; None implies login user. + replication : int, default 3 + Number of copies each block will have. + buffer_size : int, default 0 + If 0, no buffering will happen otherwise the size of the temporary read + and write buffer. + default_block_size : int, default None + None means the default configuration for HDFS, a typical block size is + 128 MB. + kerb_ticket : string or path, default None + If not None, the path to the Kerberos ticket cache. + extra_conf : dict, default None + Extra key/value pairs for configuration; will override any + hdfs-site.xml properties. + + Examples + -------- + >>> from pyarrow import fs + >>> hdfs = fs.HadoopFileSystem(host, port, user=user, kerb_ticket=ticket_cache_path) # doctest: +SKIP + + For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`. + """ + + cdef: + CHadoopFileSystem* hdfs + + def __init__(self, str host, int port=8020, *, str user=None, + int replication=3, int buffer_size=0, + default_block_size=None, kerb_ticket=None, + extra_conf=None): + cdef: + CHdfsOptions options + shared_ptr[CHadoopFileSystem] wrapped + + if not host.startswith(('hdfs://', 'viewfs://')) and host != "default": + # TODO(kszucs): do more sanitization + host = 'hdfs://{}'.format(host) + + options.ConfigureEndPoint(tobytes(host), int(port)) + options.ConfigureReplication(replication) + options.ConfigureBufferSize(buffer_size) + + if user is not None: + options.ConfigureUser(tobytes(user)) + if default_block_size is not None: + options.ConfigureBlockSize(default_block_size) + if kerb_ticket is not None: + options.ConfigureKerberosTicketCachePath( + tobytes(_stringify_path(kerb_ticket))) + if extra_conf is not None: + for k, v in extra_conf.items(): + options.ConfigureExtraConf(tobytes(k), tobytes(v)) + + with nogil: + wrapped = GetResultValue(CHadoopFileSystem.Make(options)) + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.hdfs = wrapped.get() + + @staticmethod + def from_uri(uri): + """ + Instantiate HadoopFileSystem object from an URI string. + + The following two calls are equivalent + + * ``HadoopFileSystem.from_uri('hdfs://localhost:8020/?user=test\ +&replication=1')`` + * ``HadoopFileSystem('localhost', port=8020, user='test', \ +replication=1)`` + + Parameters + ---------- + uri : str + A string URI describing the connection to HDFS. + In order to change the user, replication, buffer_size or + default_block_size pass the values as query parts. + + Returns + ------- + HadoopFileSystem + """ + cdef: + HadoopFileSystem self = HadoopFileSystem.__new__(HadoopFileSystem) + shared_ptr[CHadoopFileSystem] wrapped + CHdfsOptions options + + options = GetResultValue(CHdfsOptions.FromUriString(tobytes(uri))) + with nogil: + wrapped = GetResultValue(CHadoopFileSystem.Make(options)) + + self.init( wrapped) + return self + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return HadoopFileSystem(**kwargs) + + def __reduce__(self): + cdef CHdfsOptions opts = self.hdfs.options() + return ( + HadoopFileSystem._reconstruct, (dict( + host=frombytes(opts.connection_config.host), + port=opts.connection_config.port, + user=frombytes(opts.connection_config.user), + replication=opts.replication, + buffer_size=opts.buffer_size, + default_block_size=opts.default_block_size, + kerb_ticket=frombytes(opts.connection_config.kerb_ticket), + extra_conf={frombytes(k): frombytes(v) + for k, v in opts.connection_config.extra_conf}, + ),) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..446c114394be16af6c23e35b478da51d7fe2a967 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_hdfsio.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8be646e5def2b098732fc95f92e7fd4d58a351d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_json.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d4c54ed097be9dbcc00a4c630b0fab7e0634d1d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pxd new file mode 100644 index 0000000000000000000000000000000000000000..aecbba317aecd1b331261ca600058e30e0c4f184 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_orc.pxd @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from libcpp cimport bool as c_bool +from libc.string cimport const_char +from libcpp.vector cimport vector as std_vector +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport (CArray, CSchema, CStatus, + CResult, CTable, CMemoryPool, + CKeyValueMetadata, + CRecordBatch, + CTable, CCompressionType, + CRandomAccessFile, COutputStream, + TimeUnit) + +cdef extern from "arrow/adapters/orc/options.h" \ + namespace "arrow::adapters::orc" nogil: + cdef enum CompressionStrategy \ + " arrow::adapters::orc::CompressionStrategy": + _CompressionStrategy_SPEED \ + " arrow::adapters::orc::CompressionStrategy::kSpeed" + _CompressionStrategy_COMPRESSION \ + " arrow::adapters::orc::CompressionStrategy::kCompression" + + cdef enum WriterId" arrow::adapters::orc::WriterId": + _WriterId_ORC_JAVA_WRITER" arrow::adapters::orc::WriterId::kOrcJava" + _WriterId_ORC_CPP_WRITER" arrow::adapters::orc::WriterId::kOrcCpp" + _WriterId_PRESTO_WRITER" arrow::adapters::orc::WriterId::kPresto" + _WriterId_SCRITCHLEY_GO \ + " arrow::adapters::orc::WriterId::kScritchleyGo" + _WriterId_TRINO_WRITER" arrow::adapters::orc::WriterId::kTrino" + _WriterId_UNKNOWN_WRITER" arrow::adapters::orc::WriterId::kUnknown" + + cdef enum WriterVersion" arrow::adapters::orc::WriterVersion": + _WriterVersion_ORIGINAL \ + " arrow::adapters::orc::WriterVersion::kOriginal" + _WriterVersion_HIVE_8732 \ + " arrow::adapters::orc::WriterVersion::kHive8732" + _WriterVersion_HIVE_4243 \ + " arrow::adapters::orc::WriterVersion::kHive4243" + _WriterVersion_HIVE_12055 \ + " arrow::adapters::orc::WriterVersion::kHive12055" + _WriterVersion_HIVE_13083 \ + " arrow::adapters::orc::WriterVersion::kHive13083" + _WriterVersion_ORC_101" arrow::adapters::orc::WriterVersion::kOrc101" + _WriterVersion_ORC_135" arrow::adapters::orc::WriterVersion::kOrc135" + _WriterVersion_ORC_517" arrow::adapters::orc::WriterVersion::kOrc517" + _WriterVersion_ORC_203" arrow::adapters::orc::WriterVersion::kOrc203" + _WriterVersion_ORC_14" arrow::adapters::orc::WriterVersion::kOrc14" + _WriterVersion_MAX" arrow::adapters::orc::WriterVersion::kMax" + + cdef cppclass FileVersion" arrow::adapters::orc::FileVersion": + FileVersion(uint32_t major_version, uint32_t minor_version) + uint32_t major_version() + uint32_t minor_version() + c_string ToString() + + cdef struct WriteOptions" arrow::adapters::orc::WriteOptions": + int64_t batch_size + FileVersion file_version + int64_t stripe_size + CCompressionType compression + int64_t compression_block_size + CompressionStrategy compression_strategy + int64_t row_index_stride + double padding_tolerance + double dictionary_key_size_threshold + std_vector[int64_t] bloom_filter_columns + double bloom_filter_fpp + + +cdef extern from "arrow/adapters/orc/adapter.h" \ + namespace "arrow::adapters::orc" nogil: + + cdef cppclass ORCFileReader: + @staticmethod + CResult[unique_ptr[ORCFileReader]] Open( + const shared_ptr[CRandomAccessFile]& file, + CMemoryPool* pool) + + CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata() + + CResult[shared_ptr[CSchema]] ReadSchema() + + CResult[shared_ptr[CRecordBatch]] ReadStripe(int64_t stripe) + CResult[shared_ptr[CRecordBatch]] ReadStripe( + int64_t stripe, std_vector[c_string]) + + CResult[shared_ptr[CTable]] Read() + CResult[shared_ptr[CTable]] Read(std_vector[c_string]) + + int64_t NumberOfStripes() + int64_t NumberOfRows() + FileVersion GetFileVersion() + c_string GetSoftwareVersion() + CResult[CCompressionType] GetCompression() + int64_t GetCompressionSize() + int64_t GetRowIndexStride() + WriterId GetWriterId() + int32_t GetWriterIdValue() + WriterVersion GetWriterVersion() + int64_t GetNumberOfStripeStatistics() + int64_t GetContentLength() + int64_t GetStripeStatisticsLength() + int64_t GetFileFooterLength() + int64_t GetFilePostscriptLength() + int64_t GetFileLength() + c_string GetSerializedFileTail() + + cdef cppclass ORCFileWriter: + @staticmethod + CResult[unique_ptr[ORCFileWriter]] Open( + COutputStream* output_stream, const WriteOptions& writer_options) + + CStatus Write(const CTable& table) + + CStatus Close() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..ae4094d8b4b5f2ba6a08fcbdcc2cced66fb1d8ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet.pxd @@ -0,0 +1,674 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ +# cython: language_level = 3 + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport (CChunkedArray, CScalar, CSchema, CStatus, + CTable, CMemoryPool, CBuffer, + CKeyValueMetadata, CRandomAccessFile, + COutputStream, CCacheOptions, + TimeUnit, CRecordBatchReader) +from pyarrow.lib cimport _Weakrefable + + +cdef extern from "parquet/api/schema.h" namespace "parquet::schema" nogil: + cdef cppclass Node: + pass + + cdef cppclass GroupNode(Node): + pass + + cdef cppclass PrimitiveNode(Node): + pass + + cdef cppclass ColumnPath: + c_string ToDotString() + vector[c_string] ToDotVector() + + +cdef extern from "parquet/api/schema.h" namespace "parquet" nogil: + enum ParquetType" parquet::Type::type": + ParquetType_BOOLEAN" parquet::Type::BOOLEAN" + ParquetType_INT32" parquet::Type::INT32" + ParquetType_INT64" parquet::Type::INT64" + ParquetType_INT96" parquet::Type::INT96" + ParquetType_FLOAT" parquet::Type::FLOAT" + ParquetType_DOUBLE" parquet::Type::DOUBLE" + ParquetType_BYTE_ARRAY" parquet::Type::BYTE_ARRAY" + ParquetType_FIXED_LEN_BYTE_ARRAY" parquet::Type::FIXED_LEN_BYTE_ARRAY" + + enum ParquetLogicalTypeId" parquet::LogicalType::Type::type": + ParquetLogicalType_UNDEFINED" parquet::LogicalType::Type::UNDEFINED" + ParquetLogicalType_STRING" parquet::LogicalType::Type::STRING" + ParquetLogicalType_MAP" parquet::LogicalType::Type::MAP" + ParquetLogicalType_LIST" parquet::LogicalType::Type::LIST" + ParquetLogicalType_ENUM" parquet::LogicalType::Type::ENUM" + ParquetLogicalType_DECIMAL" parquet::LogicalType::Type::DECIMAL" + ParquetLogicalType_DATE" parquet::LogicalType::Type::DATE" + ParquetLogicalType_TIME" parquet::LogicalType::Type::TIME" + ParquetLogicalType_TIMESTAMP" parquet::LogicalType::Type::TIMESTAMP" + ParquetLogicalType_INT" parquet::LogicalType::Type::INT" + ParquetLogicalType_JSON" parquet::LogicalType::Type::JSON" + ParquetLogicalType_BSON" parquet::LogicalType::Type::BSON" + ParquetLogicalType_UUID" parquet::LogicalType::Type::UUID" + ParquetLogicalType_NONE" parquet::LogicalType::Type::NONE" + + enum ParquetTimeUnit" parquet::LogicalType::TimeUnit::unit": + ParquetTimeUnit_UNKNOWN" parquet::LogicalType::TimeUnit::UNKNOWN" + ParquetTimeUnit_MILLIS" parquet::LogicalType::TimeUnit::MILLIS" + ParquetTimeUnit_MICROS" parquet::LogicalType::TimeUnit::MICROS" + ParquetTimeUnit_NANOS" parquet::LogicalType::TimeUnit::NANOS" + + enum ParquetConvertedType" parquet::ConvertedType::type": + ParquetConvertedType_NONE" parquet::ConvertedType::NONE" + ParquetConvertedType_UTF8" parquet::ConvertedType::UTF8" + ParquetConvertedType_MAP" parquet::ConvertedType::MAP" + ParquetConvertedType_MAP_KEY_VALUE \ + " parquet::ConvertedType::MAP_KEY_VALUE" + ParquetConvertedType_LIST" parquet::ConvertedType::LIST" + ParquetConvertedType_ENUM" parquet::ConvertedType::ENUM" + ParquetConvertedType_DECIMAL" parquet::ConvertedType::DECIMAL" + ParquetConvertedType_DATE" parquet::ConvertedType::DATE" + ParquetConvertedType_TIME_MILLIS" parquet::ConvertedType::TIME_MILLIS" + ParquetConvertedType_TIME_MICROS" parquet::ConvertedType::TIME_MICROS" + ParquetConvertedType_TIMESTAMP_MILLIS \ + " parquet::ConvertedType::TIMESTAMP_MILLIS" + ParquetConvertedType_TIMESTAMP_MICROS \ + " parquet::ConvertedType::TIMESTAMP_MICROS" + ParquetConvertedType_UINT_8" parquet::ConvertedType::UINT_8" + ParquetConvertedType_UINT_16" parquet::ConvertedType::UINT_16" + ParquetConvertedType_UINT_32" parquet::ConvertedType::UINT_32" + ParquetConvertedType_UINT_64" parquet::ConvertedType::UINT_64" + ParquetConvertedType_INT_8" parquet::ConvertedType::INT_8" + ParquetConvertedType_INT_16" parquet::ConvertedType::INT_16" + ParquetConvertedType_INT_32" parquet::ConvertedType::INT_32" + ParquetConvertedType_INT_64" parquet::ConvertedType::INT_64" + ParquetConvertedType_JSON" parquet::ConvertedType::JSON" + ParquetConvertedType_BSON" parquet::ConvertedType::BSON" + ParquetConvertedType_INTERVAL" parquet::ConvertedType::INTERVAL" + + enum ParquetRepetition" parquet::Repetition::type": + ParquetRepetition_REQUIRED" parquet::REPETITION::REQUIRED" + ParquetRepetition_OPTIONAL" parquet::REPETITION::OPTIONAL" + ParquetRepetition_REPEATED" parquet::REPETITION::REPEATED" + + enum ParquetEncoding" parquet::Encoding::type": + ParquetEncoding_PLAIN" parquet::Encoding::PLAIN" + ParquetEncoding_PLAIN_DICTIONARY" parquet::Encoding::PLAIN_DICTIONARY" + ParquetEncoding_RLE" parquet::Encoding::RLE" + ParquetEncoding_BIT_PACKED" parquet::Encoding::BIT_PACKED" + ParquetEncoding_DELTA_BINARY_PACKED \ + " parquet::Encoding::DELTA_BINARY_PACKED" + ParquetEncoding_DELTA_LENGTH_BYTE_ARRAY \ + " parquet::Encoding::DELTA_LENGTH_BYTE_ARRAY" + ParquetEncoding_DELTA_BYTE_ARRAY" parquet::Encoding::DELTA_BYTE_ARRAY" + ParquetEncoding_RLE_DICTIONARY" parquet::Encoding::RLE_DICTIONARY" + ParquetEncoding_BYTE_STREAM_SPLIT \ + " parquet::Encoding::BYTE_STREAM_SPLIT" + + enum ParquetCompression" parquet::Compression::type": + ParquetCompression_UNCOMPRESSED" parquet::Compression::UNCOMPRESSED" + ParquetCompression_SNAPPY" parquet::Compression::SNAPPY" + ParquetCompression_GZIP" parquet::Compression::GZIP" + ParquetCompression_LZO" parquet::Compression::LZO" + ParquetCompression_BROTLI" parquet::Compression::BROTLI" + ParquetCompression_LZ4" parquet::Compression::LZ4" + ParquetCompression_ZSTD" parquet::Compression::ZSTD" + + enum ParquetVersion" parquet::ParquetVersion::type": + ParquetVersion_V1" parquet::ParquetVersion::PARQUET_1_0" + ParquetVersion_V2_0" parquet::ParquetVersion::PARQUET_2_0" + ParquetVersion_V2_4" parquet::ParquetVersion::PARQUET_2_4" + ParquetVersion_V2_6" parquet::ParquetVersion::PARQUET_2_6" + + enum ParquetSortOrder" parquet::SortOrder::type": + ParquetSortOrder_SIGNED" parquet::SortOrder::SIGNED" + ParquetSortOrder_UNSIGNED" parquet::SortOrder::UNSIGNED" + ParquetSortOrder_UNKNOWN" parquet::SortOrder::UNKNOWN" + + cdef cppclass CParquetLogicalType" parquet::LogicalType": + c_string ToString() const + c_string ToJSON() const + ParquetLogicalTypeId type() const + + cdef cppclass CParquetDecimalType \ + " parquet::DecimalLogicalType"(CParquetLogicalType): + int32_t precision() const + int32_t scale() const + + cdef cppclass CParquetIntType \ + " parquet::IntLogicalType"(CParquetLogicalType): + int bit_width() const + c_bool is_signed() const + + cdef cppclass CParquetTimeType \ + " parquet::TimeLogicalType"(CParquetLogicalType): + c_bool is_adjusted_to_utc() const + ParquetTimeUnit time_unit() const + + cdef cppclass CParquetTimestampType \ + " parquet::TimestampLogicalType"(CParquetLogicalType): + c_bool is_adjusted_to_utc() const + ParquetTimeUnit time_unit() const + + cdef cppclass ColumnDescriptor" parquet::ColumnDescriptor": + c_bool Equals(const ColumnDescriptor& other) + + shared_ptr[ColumnPath] path() + int16_t max_definition_level() + int16_t max_repetition_level() + + ParquetType physical_type() + const shared_ptr[const CParquetLogicalType]& logical_type() + ParquetConvertedType converted_type() + const c_string& name() + int type_length() + int type_precision() + int type_scale() + + cdef cppclass SchemaDescriptor: + const ColumnDescriptor* Column(int i) + shared_ptr[Node] schema() + GroupNode* group() + c_bool Equals(const SchemaDescriptor& other) + c_string ToString() + int num_columns() + + cdef c_string FormatStatValue(ParquetType parquet_type, c_string val) + + enum ParquetCipher" parquet::ParquetCipher::type": + ParquetCipher_AES_GCM_V1" parquet::ParquetCipher::AES_GCM_V1" + ParquetCipher_AES_GCM_CTR_V1" parquet::ParquetCipher::AES_GCM_CTR_V1" + + struct AadMetadata: + c_string aad_prefix + c_string aad_file_unique + c_bool supply_aad_prefix + + struct EncryptionAlgorithm: + ParquetCipher algorithm + AadMetadata aad + +cdef extern from "parquet/api/reader.h" namespace "parquet" nogil: + cdef cppclass ColumnReader: + pass + + cdef cppclass BoolReader(ColumnReader): + pass + + cdef cppclass Int32Reader(ColumnReader): + pass + + cdef cppclass Int64Reader(ColumnReader): + pass + + cdef cppclass Int96Reader(ColumnReader): + pass + + cdef cppclass FloatReader(ColumnReader): + pass + + cdef cppclass DoubleReader(ColumnReader): + pass + + cdef cppclass ByteArrayReader(ColumnReader): + pass + + cdef cppclass RowGroupReader: + pass + + cdef cppclass CEncodedStatistics" parquet::EncodedStatistics": + const c_string& max() const + const c_string& min() const + int64_t null_count + int64_t distinct_count + bint has_min + bint has_max + bint has_null_count + bint has_distinct_count + + cdef cppclass ParquetByteArray" parquet::ByteArray": + uint32_t len + const uint8_t* ptr + + cdef cppclass ParquetFLBA" parquet::FLBA": + const uint8_t* ptr + + cdef cppclass CStatistics" parquet::Statistics": + int64_t null_count() const + int64_t distinct_count() const + int64_t num_values() const + bint HasMinMax() + bint HasNullCount() + bint HasDistinctCount() + c_bool Equals(const CStatistics&) const + void Reset() + c_string EncodeMin() + c_string EncodeMax() + CEncodedStatistics Encode() + void SetComparator() + ParquetType physical_type() const + const ColumnDescriptor* descr() const + + cdef cppclass CBoolStatistics" parquet::BoolStatistics"(CStatistics): + c_bool min() + c_bool max() + + cdef cppclass CInt32Statistics" parquet::Int32Statistics"(CStatistics): + int32_t min() + int32_t max() + + cdef cppclass CInt64Statistics" parquet::Int64Statistics"(CStatistics): + int64_t min() + int64_t max() + + cdef cppclass CFloatStatistics" parquet::FloatStatistics"(CStatistics): + float min() + float max() + + cdef cppclass CDoubleStatistics" parquet::DoubleStatistics"(CStatistics): + double min() + double max() + + cdef cppclass CByteArrayStatistics \ + " parquet::ByteArrayStatistics"(CStatistics): + ParquetByteArray min() + ParquetByteArray max() + + cdef cppclass CFLBAStatistics" parquet::FLBAStatistics"(CStatistics): + ParquetFLBA min() + ParquetFLBA max() + + cdef cppclass CColumnCryptoMetaData" parquet::ColumnCryptoMetaData": + shared_ptr[ColumnPath] path_in_schema() const + c_bool encrypted_with_footer_key() const + const c_string& key_metadata() const + + cdef cppclass ParquetIndexLocation" parquet::IndexLocation": + int64_t offset + int32_t length + + cdef cppclass CColumnChunkMetaData" parquet::ColumnChunkMetaData": + int64_t file_offset() const + const c_string& file_path() const + + c_bool is_metadata_set() const + ParquetType type() const + int64_t num_values() const + shared_ptr[ColumnPath] path_in_schema() const + bint is_stats_set() const + shared_ptr[CStatistics] statistics() const + ParquetCompression compression() const + const vector[ParquetEncoding]& encodings() const + c_bool Equals(const CColumnChunkMetaData&) const + + int64_t has_dictionary_page() const + int64_t dictionary_page_offset() const + int64_t data_page_offset() const + int64_t index_page_offset() const + int64_t total_compressed_size() const + int64_t total_uncompressed_size() const + unique_ptr[CColumnCryptoMetaData] crypto_metadata() const + optional[ParquetIndexLocation] GetColumnIndexLocation() const + optional[ParquetIndexLocation] GetOffsetIndexLocation() const + + struct CSortingColumn" parquet::SortingColumn": + int column_idx + c_bool descending + c_bool nulls_first + + cdef cppclass CRowGroupMetaData" parquet::RowGroupMetaData": + c_bool Equals(const CRowGroupMetaData&) const + int num_columns() const + int64_t num_rows() const + int64_t total_byte_size() const + vector[CSortingColumn] sorting_columns() const + unique_ptr[CColumnChunkMetaData] ColumnChunk(int i) const + + cdef cppclass CFileMetaData" parquet::FileMetaData": + c_bool Equals(const CFileMetaData&) const + uint32_t size() + int num_columns() + int64_t num_rows() + int num_row_groups() + ParquetVersion version() + const c_string created_by() + int num_schema_elements() + + void set_file_path(const c_string& path) + void AppendRowGroups(const CFileMetaData& other) except + + + unique_ptr[CRowGroupMetaData] RowGroup(int i) + const SchemaDescriptor* schema() + shared_ptr[const CKeyValueMetadata] key_value_metadata() const + void WriteTo(COutputStream* dst) const + + inline c_bool is_encryption_algorithm_set() const + inline EncryptionAlgorithm encryption_algorithm() const + inline const c_string& footer_signing_key_metadata() const + + cdef shared_ptr[CFileMetaData] CFileMetaData_Make \ + " parquet::FileMetaData::Make"(const void* serialized_metadata, + uint32_t* metadata_len) + + cdef cppclass CReaderProperties" parquet::ReaderProperties": + c_bool is_buffered_stream_enabled() const + void enable_buffered_stream() + void disable_buffered_stream() + + void set_buffer_size(int64_t buf_size) + int64_t buffer_size() const + + void set_thrift_string_size_limit(int32_t size) + int32_t thrift_string_size_limit() const + + void set_thrift_container_size_limit(int32_t size) + int32_t thrift_container_size_limit() const + + void file_decryption_properties(shared_ptr[CFileDecryptionProperties] + decryption) + shared_ptr[CFileDecryptionProperties] file_decryption_properties() \ + const + + c_bool page_checksum_verification() const + void set_page_checksum_verification(c_bool check_crc) + + CReaderProperties default_reader_properties() + + cdef cppclass ArrowReaderProperties: + ArrowReaderProperties() + void set_read_dictionary(int column_index, c_bool read_dict) + c_bool read_dictionary() + void set_batch_size(int64_t batch_size) + int64_t batch_size() + void set_pre_buffer(c_bool pre_buffer) + c_bool pre_buffer() const + void set_cache_options(CCacheOptions options) + CCacheOptions cache_options() const + void set_coerce_int96_timestamp_unit(TimeUnit unit) + TimeUnit coerce_int96_timestamp_unit() const + + ArrowReaderProperties default_arrow_reader_properties() + + cdef cppclass ParquetFileReader: + shared_ptr[CFileMetaData] metadata() + + +cdef extern from "parquet/api/writer.h" namespace "parquet" nogil: + cdef cppclass WriterProperties: + cppclass Builder: + Builder* data_page_version(ParquetDataPageVersion version) + Builder* version(ParquetVersion version) + Builder* compression(ParquetCompression codec) + Builder* compression(const c_string& path, + ParquetCompression codec) + Builder* compression_level(int compression_level) + Builder* compression_level(const c_string& path, + int compression_level) + Builder* encryption( + shared_ptr[CFileEncryptionProperties] + file_encryption_properties) + Builder* disable_dictionary() + Builder* enable_dictionary() + Builder* enable_dictionary(const c_string& path) + Builder* set_sorting_columns(vector[CSortingColumn] sorting_columns) + Builder* disable_statistics() + Builder* enable_statistics() + Builder* enable_statistics(const c_string& path) + Builder* data_pagesize(int64_t size) + Builder* encoding(ParquetEncoding encoding) + Builder* encoding(const c_string& path, + ParquetEncoding encoding) + Builder* max_row_group_length(int64_t size) + Builder* write_batch_size(int64_t batch_size) + Builder* dictionary_pagesize_limit(int64_t dictionary_pagesize_limit) + Builder* enable_write_page_index() + Builder* disable_write_page_index() + Builder* enable_page_checksum() + Builder* disable_page_checksum() + shared_ptr[WriterProperties] build() + + cdef cppclass ArrowWriterProperties: + cppclass Builder: + Builder() + Builder* disable_deprecated_int96_timestamps() + Builder* enable_deprecated_int96_timestamps() + Builder* coerce_timestamps(TimeUnit unit) + Builder* allow_truncated_timestamps() + Builder* disallow_truncated_timestamps() + Builder* store_schema() + Builder* enable_compliant_nested_types() + Builder* disable_compliant_nested_types() + Builder* set_engine_version(ArrowWriterEngineVersion version) + shared_ptr[ArrowWriterProperties] build() + c_bool support_deprecated_int96_timestamps() + + +cdef extern from "parquet/arrow/reader.h" namespace "parquet::arrow" nogil: + cdef cppclass FileReader: + FileReader(CMemoryPool* pool, unique_ptr[ParquetFileReader] reader) + + CStatus GetSchema(shared_ptr[CSchema]* out) + + CStatus ReadColumn(int i, shared_ptr[CChunkedArray]* out) + CStatus ReadSchemaField(int i, shared_ptr[CChunkedArray]* out) + + int num_row_groups() + CStatus ReadRowGroup(int i, shared_ptr[CTable]* out) + CStatus ReadRowGroup(int i, const vector[int]& column_indices, + shared_ptr[CTable]* out) + + CStatus ReadRowGroups(const vector[int]& row_groups, + shared_ptr[CTable]* out) + CStatus ReadRowGroups(const vector[int]& row_groups, + const vector[int]& column_indices, + shared_ptr[CTable]* out) + + CStatus GetRecordBatchReader(const vector[int]& row_group_indices, + const vector[int]& column_indices, + unique_ptr[CRecordBatchReader]* out) + CStatus GetRecordBatchReader(const vector[int]& row_group_indices, + unique_ptr[CRecordBatchReader]* out) + + CStatus ReadTable(shared_ptr[CTable]* out) + CStatus ReadTable(const vector[int]& column_indices, + shared_ptr[CTable]* out) + + CStatus ScanContents(vector[int] columns, int32_t column_batch_size, + int64_t* num_rows) + + const ParquetFileReader* parquet_reader() + + void set_use_threads(c_bool use_threads) + + void set_batch_size(int64_t batch_size) + + cdef cppclass FileReaderBuilder: + FileReaderBuilder() + CStatus Open(const shared_ptr[CRandomAccessFile]& file, + const CReaderProperties& properties, + const shared_ptr[CFileMetaData]& metadata) + + ParquetFileReader* raw_reader() + FileReaderBuilder* memory_pool(CMemoryPool*) + FileReaderBuilder* properties(const ArrowReaderProperties&) + CStatus Build(unique_ptr[FileReader]* out) + + CStatus FromParquetSchema( + const SchemaDescriptor* parquet_schema, + const ArrowReaderProperties& properties, + const shared_ptr[const CKeyValueMetadata]& key_value_metadata, + shared_ptr[CSchema]* out) + + CStatus StatisticsAsScalars(const CStatistics& Statistics, + shared_ptr[CScalar]* min, + shared_ptr[CScalar]* max) + +cdef extern from "parquet/arrow/schema.h" namespace "parquet::arrow" nogil: + + CStatus ToParquetSchema( + const CSchema* arrow_schema, + const WriterProperties& properties, + const ArrowWriterProperties& arrow_properties, + shared_ptr[SchemaDescriptor]* out) + + +cdef extern from "parquet/properties.h" namespace "parquet" nogil: + cdef enum ArrowWriterEngineVersion: + V1 "parquet::ArrowWriterProperties::V1", + V2 "parquet::ArrowWriterProperties::V2" + + cdef cppclass ParquetDataPageVersion: + pass + + cdef ParquetDataPageVersion ParquetDataPageVersion_V1 \ + " parquet::ParquetDataPageVersion::V1" + cdef ParquetDataPageVersion ParquetDataPageVersion_V2 \ + " parquet::ParquetDataPageVersion::V2" + +cdef extern from "parquet/arrow/writer.h" namespace "parquet::arrow" nogil: + cdef cppclass FileWriter: + + @staticmethod + CResult[unique_ptr[FileWriter]] Open(const CSchema& schema, CMemoryPool* pool, + const shared_ptr[COutputStream]& sink, + const shared_ptr[WriterProperties]& properties, + const shared_ptr[ArrowWriterProperties]& arrow_properties) + + CStatus WriteTable(const CTable& table, int64_t chunk_size) + CStatus NewRowGroup(int64_t chunk_size) + CStatus Close() + + const shared_ptr[CFileMetaData] metadata() const + + CStatus WriteMetaDataFile( + const CFileMetaData& file_metadata, + const COutputStream* sink) + +cdef class FileEncryptionProperties: + """File-level encryption properties for the low-level API""" + cdef: + shared_ptr[CFileEncryptionProperties] properties + + @staticmethod + cdef inline FileEncryptionProperties wrap( + shared_ptr[CFileEncryptionProperties] properties): + + result = FileEncryptionProperties() + result.properties = properties + return result + + cdef inline shared_ptr[CFileEncryptionProperties] unwrap(self): + return self.properties + +cdef shared_ptr[WriterProperties] _create_writer_properties( + use_dictionary=*, + compression=*, + version=*, + write_statistics=*, + data_page_size=*, + compression_level=*, + use_byte_stream_split=*, + column_encoding=*, + data_page_version=*, + FileEncryptionProperties encryption_properties=*, + write_batch_size=*, + dictionary_pagesize_limit=*, + write_page_index=*, + write_page_checksum=*, + sorting_columns=*, +) except * + + +cdef shared_ptr[ArrowWriterProperties] _create_arrow_writer_properties( + use_deprecated_int96_timestamps=*, + coerce_timestamps=*, + allow_truncated_timestamps=*, + writer_engine_version=*, + use_compliant_nested_type=*, + store_schema=*, +) except * + +cdef class ParquetSchema(_Weakrefable): + cdef: + FileMetaData parent # the FileMetaData owning the SchemaDescriptor + const SchemaDescriptor* schema + +cdef class FileMetaData(_Weakrefable): + cdef: + shared_ptr[CFileMetaData] sp_metadata + CFileMetaData* _metadata + ParquetSchema _schema + + cdef inline init(self, const shared_ptr[CFileMetaData]& metadata): + self.sp_metadata = metadata + self._metadata = metadata.get() + +cdef class RowGroupMetaData(_Weakrefable): + cdef: + int index # for pickling support + unique_ptr[CRowGroupMetaData] up_metadata + CRowGroupMetaData* metadata + FileMetaData parent + +cdef class ColumnChunkMetaData(_Weakrefable): + cdef: + unique_ptr[CColumnChunkMetaData] up_metadata + CColumnChunkMetaData* metadata + RowGroupMetaData parent + + cdef inline init(self, RowGroupMetaData parent, int i): + self.up_metadata = parent.metadata.ColumnChunk(i) + self.metadata = self.up_metadata.get() + self.parent = parent + +cdef class Statistics(_Weakrefable): + cdef: + shared_ptr[CStatistics] statistics + ColumnChunkMetaData parent + + cdef inline init(self, const shared_ptr[CStatistics]& statistics, + ColumnChunkMetaData parent): + self.statistics = statistics + self.parent = parent + +cdef extern from "parquet/encryption/encryption.h" namespace "parquet" nogil: + cdef cppclass CFileDecryptionProperties\ + " parquet::FileDecryptionProperties": + pass + + cdef cppclass CFileEncryptionProperties\ + " parquet::FileEncryptionProperties": + pass + +cdef class FileDecryptionProperties: + """File-level decryption properties for the low-level API""" + cdef: + shared_ptr[CFileDecryptionProperties] properties + + @staticmethod + cdef inline FileDecryptionProperties wrap( + shared_ptr[CFileDecryptionProperties] properties): + + result = FileDecryptionProperties() + result.properties = properties + return result + + cdef inline shared_ptr[CFileDecryptionProperties] unwrap(self): + return self.properties diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6ee361f2e2b7d5fbc1076b45d3cbe957008bb343 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d0a9a6612328c547bc724d6fcf2d37ae5e7badd3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_parquet_encryption.pyx @@ -0,0 +1,484 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ + +from datetime import timedelta + +from cython.operator cimport dereference as deref +from libcpp.memory cimport shared_ptr +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport _Weakrefable +from pyarrow.lib import tobytes, frombytes + + +cdef ParquetCipher cipher_from_name(name): + name = name.upper() + if name == 'AES_GCM_V1': + return ParquetCipher_AES_GCM_V1 + elif name == 'AES_GCM_CTR_V1': + return ParquetCipher_AES_GCM_CTR_V1 + else: + raise ValueError(f'Invalid cipher name: {name!r}') + + +cdef cipher_to_name(ParquetCipher cipher): + if ParquetCipher_AES_GCM_V1 == cipher: + return 'AES_GCM_V1' + elif ParquetCipher_AES_GCM_CTR_V1 == cipher: + return 'AES_GCM_CTR_V1' + else: + raise ValueError('Invalid cipher value: {0}'.format(cipher)) + +cdef class EncryptionConfiguration(_Weakrefable): + """Configuration of the encryption, such as which columns to encrypt""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, footer_key, *, column_keys=None, + encryption_algorithm=None, + plaintext_footer=None, double_wrapping=None, + cache_lifetime=None, internal_key_material=None, + data_key_length_bits=None): + self.configuration.reset( + new CEncryptionConfiguration(tobytes(footer_key))) + if column_keys is not None: + self.column_keys = column_keys + if encryption_algorithm is not None: + self.encryption_algorithm = encryption_algorithm + if plaintext_footer is not None: + self.plaintext_footer = plaintext_footer + if double_wrapping is not None: + self.double_wrapping = double_wrapping + if cache_lifetime is not None: + self.cache_lifetime = cache_lifetime + if internal_key_material is not None: + self.internal_key_material = internal_key_material + if data_key_length_bits is not None: + self.data_key_length_bits = data_key_length_bits + + @property + def footer_key(self): + """ID of the master key for footer encryption/signing""" + return frombytes(self.configuration.get().footer_key) + + @property + def column_keys(self): + """ + List of columns to encrypt, with master key IDs. + """ + column_keys_str = frombytes(self.configuration.get().column_keys) + # Convert from "masterKeyID:colName,colName;masterKeyID:colName..." + # (see HIVE-21848) to dictionary of master key ID to column name lists + column_keys_to_key_list_str = dict(subString.replace(" ", "").split( + ":") for subString in column_keys_str.split(";")) + column_keys_dict = {k: v.split( + ",") for k, v in column_keys_to_key_list_str.items()} + return column_keys_dict + + @column_keys.setter + def column_keys(self, dict value): + if value is not None: + # convert a dictionary such as + # '{"key1": ["col1 ", "col2"], "key2": ["col3 ", "col4"]}'' + # to the string defined by the spec + # 'key1: col1 , col2; key2: col3 , col4' + column_keys = "; ".join( + ["{}: {}".format(k, ", ".join(v)) for k, v in value.items()]) + self.configuration.get().column_keys = tobytes(column_keys) + + @property + def encryption_algorithm(self): + """Parquet encryption algorithm. + Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1".""" + return cipher_to_name(self.configuration.get().encryption_algorithm) + + @encryption_algorithm.setter + def encryption_algorithm(self, value): + cipher = cipher_from_name(value) + self.configuration.get().encryption_algorithm = cipher + + @property + def plaintext_footer(self): + """Write files with plaintext footer.""" + return self.configuration.get().plaintext_footer + + @plaintext_footer.setter + def plaintext_footer(self, value): + self.configuration.get().plaintext_footer = value + + @property + def double_wrapping(self): + """Use double wrapping - where data encryption keys (DEKs) are + encrypted with key encryption keys (KEKs), which in turn are + encrypted with master keys. + If set to false, use single wrapping - where DEKs are + encrypted directly with master keys.""" + return self.configuration.get().double_wrapping + + @double_wrapping.setter + def double_wrapping(self, value): + self.configuration.get().double_wrapping = value + + @property + def cache_lifetime(self): + """Lifetime of cached entities (key encryption keys, + local wrapping keys, KMS client objects).""" + return timedelta( + seconds=self.configuration.get().cache_lifetime_seconds) + + @cache_lifetime.setter + def cache_lifetime(self, value): + if not isinstance(value, timedelta): + raise TypeError("cache_lifetime should be a timedelta") + self.configuration.get().cache_lifetime_seconds = value.total_seconds() + + @property + def internal_key_material(self): + """Store key material inside Parquet file footers; this mode doesn’t + produce additional files. If set to false, key material is stored in + separate files in the same folder, which enables key rotation for + immutable Parquet files.""" + return self.configuration.get().internal_key_material + + @internal_key_material.setter + def internal_key_material(self, value): + self.configuration.get().internal_key_material = value + + @property + def data_key_length_bits(self): + """Length of data encryption keys (DEKs), randomly generated by parquet key + management tools. Can be 128, 192 or 256 bits.""" + return self.configuration.get().data_key_length_bits + + @data_key_length_bits.setter + def data_key_length_bits(self, value): + self.configuration.get().data_key_length_bits = value + + cdef inline shared_ptr[CEncryptionConfiguration] unwrap(self) nogil: + return self.configuration + + +cdef class DecryptionConfiguration(_Weakrefable): + """Configuration of the decryption, such as cache timeout.""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, cache_lifetime=None): + self.configuration.reset(new CDecryptionConfiguration()) + + @property + def cache_lifetime(self): + """Lifetime of cached entities (key encryption keys, + local wrapping keys, KMS client objects).""" + return timedelta( + seconds=self.configuration.get().cache_lifetime_seconds) + + @cache_lifetime.setter + def cache_lifetime(self, value): + self.configuration.get().cache_lifetime_seconds = value.total_seconds() + + cdef inline shared_ptr[CDecryptionConfiguration] unwrap(self) nogil: + return self.configuration + + +cdef class KmsConnectionConfig(_Weakrefable): + """Configuration of the connection to the Key Management Service (KMS)""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, kms_instance_id=None, kms_instance_url=None, + key_access_token=None, custom_kms_conf=None): + self.configuration.reset(new CKmsConnectionConfig()) + if kms_instance_id is not None: + self.kms_instance_id = kms_instance_id + if kms_instance_url is not None: + self.kms_instance_url = kms_instance_url + if key_access_token is None: + self.key_access_token = b'DEFAULT' + else: + self.key_access_token = key_access_token + if custom_kms_conf is not None: + self.custom_kms_conf = custom_kms_conf + + @property + def kms_instance_id(self): + """ID of the KMS instance that will be used for encryption + (if multiple KMS instances are available).""" + return frombytes(self.configuration.get().kms_instance_id) + + @kms_instance_id.setter + def kms_instance_id(self, value): + self.configuration.get().kms_instance_id = tobytes(value) + + @property + def kms_instance_url(self): + """URL of the KMS instance.""" + return frombytes(self.configuration.get().kms_instance_url) + + @kms_instance_url.setter + def kms_instance_url(self, value): + self.configuration.get().kms_instance_url = tobytes(value) + + @property + def key_access_token(self): + """Authorization token that will be passed to KMS.""" + return frombytes(self.configuration.get() + .refreshable_key_access_token.get().value()) + + @key_access_token.setter + def key_access_token(self, value): + self.refresh_key_access_token(value) + + @property + def custom_kms_conf(self): + """A dictionary with KMS-type-specific configuration""" + custom_kms_conf = { + frombytes(k): frombytes(v) + for k, v in self.configuration.get().custom_kms_conf + } + return custom_kms_conf + + @custom_kms_conf.setter + def custom_kms_conf(self, dict value): + if value is not None: + for k, v in value.items(): + if isinstance(k, str) and isinstance(v, str): + self.configuration.get().custom_kms_conf[tobytes(k)] = \ + tobytes(v) + else: + raise TypeError("Expected custom_kms_conf to be " + + "a dictionary of strings") + + def refresh_key_access_token(self, value): + cdef: + shared_ptr[CKeyAccessToken] c_key_access_token = \ + self.configuration.get().refreshable_key_access_token + + c_key_access_token.get().Refresh(tobytes(value)) + + cdef inline shared_ptr[CKmsConnectionConfig] unwrap(self) nogil: + return self.configuration + + @staticmethod + cdef wrap(const CKmsConnectionConfig& config): + result = KmsConnectionConfig() + result.configuration = make_shared[CKmsConnectionConfig](move(config)) + return result + + +# Callback definitions for CPyKmsClientVtable +cdef void _cb_wrap_key( + handler, const c_string& key_bytes, + const c_string& master_key_identifier, c_string* out) except *: + mkid_str = frombytes(master_key_identifier) + wrapped_key = handler.wrap_key(key_bytes, mkid_str) + out[0] = tobytes(wrapped_key) + + +cdef void _cb_unwrap_key( + handler, const c_string& wrapped_key, + const c_string& master_key_identifier, c_string* out) except *: + mkid_str = frombytes(master_key_identifier) + wk_str = frombytes(wrapped_key) + key = handler.unwrap_key(wk_str, mkid_str) + out[0] = tobytes(key) + + +cdef class KmsClient(_Weakrefable): + """The abstract base class for KmsClient implementations.""" + cdef: + shared_ptr[CKmsClient] client + + def __init__(self): + self.init() + + cdef init(self): + cdef: + CPyKmsClientVtable vtable = CPyKmsClientVtable() + + vtable.wrap_key = _cb_wrap_key + vtable.unwrap_key = _cb_unwrap_key + + self.client.reset(new CPyKmsClient(self, vtable)) + + def wrap_key(self, key_bytes, master_key_identifier): + """Wrap a key - encrypt it with the master key.""" + raise NotImplementedError() + + def unwrap_key(self, wrapped_key, master_key_identifier): + """Unwrap a key - decrypt it with the master key.""" + raise NotImplementedError() + + cdef inline shared_ptr[CKmsClient] unwrap(self) nogil: + return self.client + + +# Callback definition for CPyKmsClientFactoryVtable +cdef void _cb_create_kms_client( + handler, + const CKmsConnectionConfig& kms_connection_config, + shared_ptr[CKmsClient]* out) except *: + connection_config = KmsConnectionConfig.wrap(kms_connection_config) + + result = handler(connection_config) + if not isinstance(result, KmsClient): + raise TypeError( + "callable must return KmsClient instances, but got {}".format( + type(result))) + + out[0] = ( result).unwrap() + + +cdef class CryptoFactory(_Weakrefable): + """ A factory that produces the low-level FileEncryptionProperties and + FileDecryptionProperties objects, from the high-level parameters.""" + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, kms_client_factory): + """Create CryptoFactory. + + Parameters + ---------- + kms_client_factory : a callable that accepts KmsConnectionConfig + and returns a KmsClient + """ + self.factory.reset(new CPyCryptoFactory()) + + if callable(kms_client_factory): + self.init(kms_client_factory) + else: + raise TypeError("Parameter kms_client_factory must be a callable") + + cdef init(self, callable_client_factory): + cdef: + CPyKmsClientFactoryVtable vtable + shared_ptr[CPyKmsClientFactory] kms_client_factory + + vtable.create_kms_client = _cb_create_kms_client + kms_client_factory.reset( + new CPyKmsClientFactory(callable_client_factory, vtable)) + # A KmsClientFactory object must be registered + # via this method before calling any of + # file_encryption_properties()/file_decryption_properties() methods. + self.factory.get().RegisterKmsClientFactory( + static_pointer_cast[CKmsClientFactory, CPyKmsClientFactory]( + kms_client_factory)) + + def file_encryption_properties(self, + KmsConnectionConfig kms_connection_config, + EncryptionConfiguration encryption_config): + """Create file encryption properties. + + Parameters + ---------- + kms_connection_config : KmsConnectionConfig + Configuration of connection to KMS + + encryption_config : EncryptionConfiguration + Configuration of the encryption, such as which columns to encrypt + + Returns + ------- + file_encryption_properties : FileEncryptionProperties + File encryption properties. + """ + cdef: + CResult[shared_ptr[CFileEncryptionProperties]] \ + file_encryption_properties_result + with nogil: + file_encryption_properties_result = \ + self.factory.get().SafeGetFileEncryptionProperties( + deref(kms_connection_config.unwrap().get()), + deref(encryption_config.unwrap().get())) + file_encryption_properties = GetResultValue( + file_encryption_properties_result) + return FileEncryptionProperties.wrap(file_encryption_properties) + + def file_decryption_properties( + self, + KmsConnectionConfig kms_connection_config, + DecryptionConfiguration decryption_config=None): + """Create file decryption properties. + + Parameters + ---------- + kms_connection_config : KmsConnectionConfig + Configuration of connection to KMS + + decryption_config : DecryptionConfiguration, default None + Configuration of the decryption, such as cache timeout. + Can be None. + + Returns + ------- + file_decryption_properties : FileDecryptionProperties + File decryption properties. + """ + cdef: + CDecryptionConfiguration c_decryption_config + CResult[shared_ptr[CFileDecryptionProperties]] \ + c_file_decryption_properties + if decryption_config is None: + c_decryption_config = CDecryptionConfiguration() + else: + c_decryption_config = deref(decryption_config.unwrap().get()) + with nogil: + c_file_decryption_properties = \ + self.factory.get().SafeGetFileDecryptionProperties( + deref(kms_connection_config.unwrap().get()), + c_decryption_config) + file_decryption_properties = GetResultValue( + c_file_decryption_properties) + return FileDecryptionProperties.wrap(file_decryption_properties) + + def remove_cache_entries_for_token(self, access_token): + self.factory.get().RemoveCacheEntriesForToken(tobytes(access_token)) + + def remove_cache_entries_for_all_tokens(self): + self.factory.get().RemoveCacheEntriesForAllTokens() + + cdef inline shared_ptr[CPyCryptoFactory] unwrap(self): + return self.factory + + +cdef shared_ptr[CCryptoFactory] pyarrow_unwrap_cryptofactory(object crypto_factory) except *: + if isinstance(crypto_factory, CryptoFactory): + pycf = ( crypto_factory).unwrap() + return static_pointer_cast[CCryptoFactory, CPyCryptoFactory](pycf) + raise TypeError("Expected CryptoFactory, got %s" % type(crypto_factory)) + + +cdef shared_ptr[CKmsConnectionConfig] pyarrow_unwrap_kmsconnectionconfig(object kmsconnectionconfig) except *: + if isinstance(kmsconnectionconfig, KmsConnectionConfig): + return ( kmsconnectionconfig).unwrap() + raise TypeError("Expected KmsConnectionConfig, got %s" % type(kmsconnectionconfig)) + + +cdef shared_ptr[CEncryptionConfiguration] pyarrow_unwrap_encryptionconfig(object encryptionconfig) except *: + if isinstance(encryptionconfig, EncryptionConfiguration): + return ( encryptionconfig).unwrap() + raise TypeError("Expected EncryptionConfiguration, got %s" % type(encryptionconfig)) + + +cdef shared_ptr[CDecryptionConfiguration] pyarrow_unwrap_decryptionconfig(object decryptionconfig) except *: + if isinstance(decryptionconfig, DecryptionConfiguration): + return ( decryptionconfig).unwrap() + raise TypeError("Expected DecryptionConfiguration, got %s" % type(decryptionconfig)) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..db0909eacdbe82720ddd2b2d2bf1988768f26937 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx new file mode 100644 index 0000000000000000000000000000000000000000..adb148351306c02667346b3750c08f2efd8a6625 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.pyx @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False, binding=True +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport check_status + +from pyarrow.lib import frombytes + + +cdef class CppTestCase: + """ + A simple wrapper for a C++ test case. + """ + cdef: + CTestCase c_case + + @staticmethod + cdef wrap(CTestCase c_case): + cdef: + CppTestCase obj + obj = CppTestCase.__new__(CppTestCase) + obj.c_case = c_case + return obj + + @property + def name(self): + return frombytes(self.c_case.name) + + def __repr__(self): + return f"<{self.__class__.__name__} {self.name!r}>" + + def __call__(self): + check_status(self.c_case.func()) + + +def get_cpp_tests(): + """ + Get a list of C++ test cases. + """ + cases = [] + c_cases = GetCppTestCases() + for c_case in c_cases: + cases.append(CppTestCase.wrap(c_case)) + return cases diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..364d0e9da714ce4fa36def25220d0cbabed619a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.pyx new file mode 100644 index 0000000000000000000000000000000000000000..067cb5f91681bacf430945bc5aec2bb04e0cb01b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_substrait.pyx @@ -0,0 +1,349 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 +from cython.operator cimport dereference as deref +from libcpp.vector cimport vector as std_vector + +from pyarrow import Buffer, py_buffer +from pyarrow._compute cimport Expression +from pyarrow.lib import frombytes, tobytes +from pyarrow.lib cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_substrait cimport * + + +# TODO GH-37235: Fix exception handling +cdef CDeclaration _create_named_table_provider( + dict named_args, const std_vector[c_string]& names, const CSchema& schema +) noexcept: + cdef: + c_string c_name + shared_ptr[CTable] c_in_table + shared_ptr[CTableSourceNodeOptions] c_tablesourceopts + shared_ptr[CExecNodeOptions] c_input_node_opts + vector[CDeclaration.Input] no_c_inputs + + py_names = [] + for i in range(names.size()): + c_name = names[i] + py_names.append(frombytes(c_name)) + py_schema = pyarrow_wrap_schema(make_shared[CSchema](schema)) + + py_table = named_args["provider"](py_names, py_schema) + c_in_table = pyarrow_unwrap_table(py_table) + c_tablesourceopts = make_shared[CTableSourceNodeOptions](c_in_table) + c_input_node_opts = static_pointer_cast[CExecNodeOptions, CTableSourceNodeOptions]( + c_tablesourceopts) + return CDeclaration(tobytes("table_source"), + no_c_inputs, c_input_node_opts) + + +def run_query(plan, *, table_provider=None, use_threads=True): + """ + Execute a Substrait plan and read the results as a RecordBatchReader. + + Parameters + ---------- + plan : Union[Buffer, bytes] + The serialized Substrait plan to execute. + table_provider : object (optional) + A function to resolve any NamedTable relation to a table. + The function will receive two arguments which will be a list + of strings representing the table name and a pyarrow.Schema representing + the expected schema and should return a pyarrow.Table. + use_threads : bool, default True + If True then multiple threads will be used to run the query. If False then + all CPU intensive work will be done on the calling thread. + + Returns + ------- + RecordBatchReader + A reader containing the result of the executed query + + Examples + -------- + >>> import pyarrow as pa + >>> from pyarrow.lib import tobytes + >>> import pyarrow.substrait as substrait + >>> test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]}) + >>> test_table_2 = pa.Table.from_pydict({"x": [4, 5, 6]}) + >>> def table_provider(names, schema): + ... if not names: + ... raise Exception("No names provided") + ... elif names[0] == "t1": + ... return test_table_1 + ... elif names[1] == "t2": + ... return test_table_2 + ... else: + ... raise Exception("Unrecognized table name") + ... + >>> substrait_query = ''' + ... { + ... "relations": [ + ... {"rel": { + ... "read": { + ... "base_schema": { + ... "struct": { + ... "types": [ + ... {"i64": {}} + ... ] + ... }, + ... "names": [ + ... "x" + ... ] + ... }, + ... "namedTable": { + ... "names": ["t1"] + ... } + ... } + ... }} + ... ] + ... } + ... ''' + >>> buf = pa._substrait._parse_json_plan(tobytes(substrait_query)) + >>> reader = pa.substrait.run_query(buf, table_provider=table_provider) + >>> reader.read_all() + pyarrow.Table + x: int64 + ---- + x: [[1,2,3]] + """ + + cdef: + CResult[shared_ptr[CRecordBatchReader]] c_res_reader + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader reader + shared_ptr[CBuffer] c_buf_plan + CConversionOptions c_conversion_options + c_bool c_use_threads + + c_use_threads = use_threads + if isinstance(plan, bytes): + c_buf_plan = pyarrow_unwrap_buffer(py_buffer(plan)) + elif isinstance(plan, Buffer): + c_buf_plan = pyarrow_unwrap_buffer(plan) + else: + raise TypeError( + f"Expected 'pyarrow.Buffer' or bytes, got '{type(plan)}'") + + if table_provider is not None: + named_table_args = { + "provider": table_provider + } + c_conversion_options.named_table_provider = BindFunction[CNamedTableProvider]( + &_create_named_table_provider, named_table_args) + + with nogil: + c_res_reader = ExecuteSerializedPlan( + deref(c_buf_plan), default_extension_id_registry(), + GetFunctionRegistry(), c_conversion_options, c_use_threads) + + c_reader = GetResultValue(c_res_reader) + + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = c_reader + return reader + + +def _parse_json_plan(plan): + """ + Parse a JSON plan into equivalent serialized Protobuf. + + Parameters + ---------- + plan : bytes + Substrait plan in JSON. + + Returns + ------- + Buffer + A buffer containing the serialized Protobuf plan. + """ + + cdef: + CResult[shared_ptr[CBuffer]] c_res_buffer + c_string c_str_plan + shared_ptr[CBuffer] c_buf_plan + + c_str_plan = plan + c_res_buffer = SerializeJsonPlan(c_str_plan) + with nogil: + c_buf_plan = GetResultValue(c_res_buffer) + return pyarrow_wrap_buffer(c_buf_plan) + + +def serialize_expressions(exprs, names, schema, *, allow_arrow_extensions=False): + """ + Serialize a collection of expressions into Substrait + + Substrait expressions must be bound to a schema. For example, + the Substrait expression ``a:i32 + b:i32`` is different from the + Substrait expression ``a:i64 + b:i64``. Pyarrow expressions are + typically unbound. For example, both of the above expressions + would be represented as ``a + b`` in pyarrow. + + This means a schema must be provided when serializing an expression. + It also means that the serialization may fail if a matching function + call cannot be found for the expression. + + Parameters + ---------- + exprs : list of Expression + The expressions to serialize + names : list of str + Names for the expressions + schema : Schema + The schema the expressions will be bound to + allow_arrow_extensions : bool, default False + If False then only functions that are part of the core Substrait function + definitions will be allowed. Set this to True to allow pyarrow-specific functions + and user defined functions but the result may not be accepted by other + compute libraries. + + Returns + ------- + Buffer + An ExtendedExpression message containing the serialized expressions + """ + cdef: + CResult[shared_ptr[CBuffer]] c_res_buffer + shared_ptr[CBuffer] c_buffer + CNamedExpression c_named_expr + CBoundExpressions c_bound_exprs + CConversionOptions c_conversion_options + + if len(exprs) != len(names): + raise ValueError("exprs and names need to have the same length") + for expr, name in zip(exprs, names): + if not isinstance(expr, Expression): + raise TypeError(f"Expected Expression, got '{type(expr)}' in exprs") + if not isinstance(name, str): + raise TypeError(f"Expected str, got '{type(name)}' in names") + c_named_expr.expression = ( expr).unwrap() + c_named_expr.name = tobytes( name) + c_bound_exprs.named_expressions.push_back(c_named_expr) + + c_bound_exprs.schema = ( schema).sp_schema + + c_conversion_options.allow_arrow_extensions = allow_arrow_extensions + + with nogil: + c_res_buffer = SerializeExpressions(c_bound_exprs, c_conversion_options) + c_buffer = GetResultValue(c_res_buffer) + return pyarrow_wrap_buffer(c_buffer) + + +cdef class BoundExpressions(_Weakrefable): + """ + A collection of named expressions and the schema they are bound to + + This is equivalent to the Substrait ExtendedExpression message + """ + + cdef: + CBoundExpressions c_bound_exprs + + def __init__(self): + msg = 'BoundExpressions is an abstract class thus cannot be initialized.' + raise TypeError(msg) + + cdef void init(self, CBoundExpressions bound_expressions): + self.c_bound_exprs = bound_expressions + + @property + def schema(self): + """ + The common schema that all expressions are bound to + """ + return pyarrow_wrap_schema(self.c_bound_exprs.schema) + + @property + def expressions(self): + """ + A dict from expression name to expression + """ + expr_dict = {} + for named_expr in self.c_bound_exprs.named_expressions: + name = frombytes(named_expr.name) + expr = Expression.wrap(named_expr.expression) + expr_dict[name] = expr + return expr_dict + + @staticmethod + cdef wrap(const CBoundExpressions& bound_expressions): + cdef BoundExpressions self = BoundExpressions.__new__(BoundExpressions) + self.init(bound_expressions) + return self + + +def deserialize_expressions(buf): + """ + Deserialize an ExtendedExpression Substrait message into a BoundExpressions object + + Parameters + ---------- + buf : Buffer or bytes + The message to deserialize + + Returns + ------- + BoundExpressions + The deserialized expressions, their names, and the bound schema + """ + cdef: + shared_ptr[CBuffer] c_buffer + CResult[CBoundExpressions] c_res_bound_exprs + CBoundExpressions c_bound_exprs + + if isinstance(buf, bytes): + c_buffer = pyarrow_unwrap_buffer(py_buffer(buf)) + elif isinstance(buf, Buffer): + c_buffer = pyarrow_unwrap_buffer(buf) + else: + raise TypeError( + f"Expected 'pyarrow.Buffer' or bytes, got '{type(buf)}'") + + with nogil: + c_res_bound_exprs = DeserializeExpressions(deref(c_buffer)) + c_bound_exprs = GetResultValue(c_res_bound_exprs) + + return BoundExpressions.wrap(c_bound_exprs) + + +def get_supported_functions(): + """ + Get a list of Substrait functions that the underlying + engine currently supports. + + Returns + ------- + list[str] + A list of function ids encoded as '{uri}#{name}' + """ + + cdef: + ExtensionIdRegistry* c_id_registry + std_vector[c_string] c_ids + + c_id_registry = default_extension_id_registry() + c_ids = c_id_registry.GetSupportedSubstraitFunctions() + + functions_list = [] + for c_id in c_ids: + functions_list.append(frombytes(c_id)) + return functions_list diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/array.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/array.pxi new file mode 100644 index 0000000000000000000000000000000000000000..1416f5f4346d9d47c5a7522640c1a6e7ef4d07ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/array.pxi @@ -0,0 +1,3790 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport PyCapsule_CheckExact, PyCapsule_GetPointer, PyCapsule_New + +import os +import warnings +from cython import sizeof + + +cdef _sequence_to_array(object sequence, object mask, object size, + DataType type, CMemoryPool* pool, c_bool from_pandas): + cdef: + int64_t c_size + PyConversionOptions options + shared_ptr[CChunkedArray] chunked + + if type is not None: + options.type = type.sp_type + + if size is not None: + options.size = size + + options.from_pandas = from_pandas + options.ignore_timezone = os.environ.get('PYARROW_IGNORE_TIMEZONE', False) + + with nogil: + chunked = GetResultValue( + ConvertPySequence(sequence, mask, options, pool) + ) + + if chunked.get().num_chunks() == 1: + return pyarrow_wrap_array(chunked.get().chunk(0)) + else: + return pyarrow_wrap_chunked_array(chunked) + + +cdef inline _is_array_like(obj): + if isinstance(obj, np.ndarray): + return True + return pandas_api._have_pandas_internal() and pandas_api.is_array_like(obj) + + +def _ndarray_to_arrow_type(object values, DataType type): + return pyarrow_wrap_data_type(_ndarray_to_type(values, type)) + + +cdef shared_ptr[CDataType] _ndarray_to_type(object values, + DataType type) except *: + cdef shared_ptr[CDataType] c_type + + dtype = values.dtype + + if type is None and dtype != object: + c_type = GetResultValue(NumPyDtypeToArrow(dtype)) + + if type is not None: + c_type = type.sp_type + + return c_type + + +cdef _ndarray_to_array(object values, object mask, DataType type, + c_bool from_pandas, c_bool safe, CMemoryPool* pool): + cdef: + shared_ptr[CChunkedArray] chunked_out + shared_ptr[CDataType] c_type = _ndarray_to_type(values, type) + CCastOptions cast_options = CCastOptions(safe) + + with nogil: + check_status(NdarrayToArrow(pool, values, mask, from_pandas, + c_type, cast_options, &chunked_out)) + + if chunked_out.get().num_chunks() > 1: + return pyarrow_wrap_chunked_array(chunked_out) + else: + return pyarrow_wrap_array(chunked_out.get().chunk(0)) + + +cdef _codes_to_indices(object codes, object mask, DataType type, + MemoryPool memory_pool): + """ + Convert the codes of a pandas Categorical to indices for a pyarrow + DictionaryArray, taking into account missing values + mask + """ + if mask is None: + mask = codes == -1 + else: + mask = mask | (codes == -1) + return array(codes, mask=mask, type=type, memory_pool=memory_pool) + + +def _handle_arrow_array_protocol(obj, type, mask, size): + if mask is not None or size is not None: + raise ValueError( + "Cannot specify a mask or a size when passing an object that is " + "converted with the __arrow_array__ protocol.") + res = obj.__arrow_array__(type=type) + if not isinstance(res, (Array, ChunkedArray)): + raise TypeError("The object's __arrow_array__ method does not " + "return a pyarrow Array or ChunkedArray.") + if isinstance(res, ChunkedArray) and res.num_chunks==1: + res = res.chunk(0) + return res + + +def array(object obj, type=None, mask=None, size=None, from_pandas=None, + bint safe=True, MemoryPool memory_pool=None): + """ + Create pyarrow.Array instance from a Python object. + + Parameters + ---------- + obj : sequence, iterable, ndarray, pandas.Series, Arrow-compatible array + If both type and size are specified may be a single use iterable. If + not strongly-typed, Arrow type will be inferred for resulting array. + Any Arrow-compatible array that implements the Arrow PyCapsule Protocol + (has an ``__arrow_c_array__`` method) can be passed as well. + type : pyarrow.DataType + Explicit type to attempt to coerce to, otherwise will be inferred from + the data. + mask : array[bool], optional + Indicate which values are null (True) or not null (False). + size : int64, optional + Size of the elements. If the input is larger than size bail at this + length. For iterators, if size is larger than the input iterator this + will be treated as a "max size", but will involve an initial allocation + of size followed by a resize to the actual size (so if you know the + exact size specifying it correctly will give you better performance). + from_pandas : bool, default None + Use pandas's semantics for inferring nulls from values in + ndarray-like data. If passed, the mask tasks precedence, but + if a value is unmasked (not-null), but still null according to + pandas semantics, then it is null. Defaults to False if not + passed explicitly by user, or True if a pandas object is + passed in. + safe : bool, default True + Check for overflows or other unsafe conversions. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the currently-set default + memory pool. + + Returns + ------- + array : pyarrow.Array or pyarrow.ChunkedArray + A ChunkedArray instead of an Array is returned if: + + - the object data overflowed binary storage. + - the object's ``__arrow_array__`` protocol method returned a chunked + array. + + Notes + ----- + Timezone will be preserved in the returned array for timezone-aware data, + else no timezone will be returned for naive timestamps. + Internally, UTC values are stored for timezone-aware data with the + timezone set in the data type. + + Pandas's DateOffsets and dateutil.relativedelta.relativedelta are by + default converted as MonthDayNanoIntervalArray. relativedelta leapdays + are ignored as are all absolute fields on both objects. datetime.timedelta + can also be converted to MonthDayNanoIntervalArray but this requires + passing MonthDayNanoIntervalType explicitly. + + Converting to dictionary array will promote to a wider integer type for + indices if the number of distinct values cannot be represented, even if + the index type was explicitly set. This means that if there are more than + 127 values the returned dictionary array's index type will be at least + pa.int16() even if pa.int8() was passed to the function. Note that an + explicit index type will not be demoted even if it is wider than required. + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> pa.array(pd.Series([1, 2])) + + [ + 1, + 2 + ] + + >>> pa.array(["a", "b", "a"], type=pa.dictionary(pa.int8(), pa.string())) + + ... + -- dictionary: + [ + "a", + "b" + ] + -- indices: + [ + 0, + 1, + 0 + ] + + >>> import numpy as np + >>> pa.array(pd.Series([1, 2]), mask=np.array([0, 1], dtype=bool)) + + [ + 1, + null + ] + + >>> arr = pa.array(range(1024), type=pa.dictionary(pa.int8(), pa.int64())) + >>> arr.type.index_type + DataType(int16) + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + bint is_pandas_object = False + bint c_from_pandas + + type = ensure_type(type, allow_none=True) + + extension_type = None + if type is not None and type.id == _Type_EXTENSION: + extension_type = type + type = type.storage_type + + if from_pandas is None: + c_from_pandas = False + else: + c_from_pandas = from_pandas + + if isinstance(obj, Array): + if type is not None and not obj.type.equals(type): + obj = obj.cast(type, safe=safe, memory_pool=memory_pool) + return obj + + if hasattr(obj, '__arrow_array__'): + return _handle_arrow_array_protocol(obj, type, mask, size) + elif hasattr(obj, '__arrow_c_array__'): + if type is not None: + requested_type = type.__arrow_c_schema__() + else: + requested_type = None + schema_capsule, array_capsule = obj.__arrow_c_array__(requested_type) + out_array = Array._import_from_c_capsule(schema_capsule, array_capsule) + if type is not None and out_array.type != type: + # PyCapsule interface type coercion is best effort, so we need to + # check the type of the returned array and cast if necessary + out_array = array.cast(type, safe=safe, memory_pool=memory_pool) + return out_array + elif _is_array_like(obj): + if mask is not None: + if _is_array_like(mask): + mask = get_values(mask, &is_pandas_object) + else: + raise TypeError("Mask must be a numpy array " + "when converting numpy arrays") + + values = get_values(obj, &is_pandas_object) + if is_pandas_object and from_pandas is None: + c_from_pandas = True + + if isinstance(values, np.ma.MaskedArray): + if mask is not None: + raise ValueError("Cannot pass a numpy masked array and " + "specify a mask at the same time") + else: + # don't use shrunken masks + mask = None if values.mask is np.ma.nomask else values.mask + values = values.data + + if mask is not None: + if mask.dtype != np.bool_: + raise TypeError("Mask must be boolean dtype") + if mask.ndim != 1: + raise ValueError("Mask must be 1D array") + if len(values) != len(mask): + raise ValueError( + "Mask is a different length from sequence being converted") + + if hasattr(values, '__arrow_array__'): + return _handle_arrow_array_protocol(values, type, mask, size) + elif (pandas_api.is_categorical(values) and + type is not None and type.id != Type_DICTIONARY): + result = _ndarray_to_array( + np.asarray(values), mask, type, c_from_pandas, safe, pool + ) + elif pandas_api.is_categorical(values): + if type is not None: + index_type = type.index_type + value_type = type.value_type + if values.ordered != type.ordered: + raise ValueError( + "The 'ordered' flag of the passed categorical values " + "does not match the 'ordered' of the specified type. ") + else: + index_type = None + value_type = None + + indices = _codes_to_indices( + values.codes, mask, index_type, memory_pool) + try: + dictionary = array( + values.categories.values, type=value_type, + memory_pool=memory_pool) + except TypeError: + # TODO when removing the deprecation warning, this whole + # try/except can be removed (to bubble the TypeError of + # the first array(..) call) + if value_type is not None: + warnings.warn( + "The dtype of the 'categories' of the passed " + "categorical values ({0}) does not match the " + "specified type ({1}). For now ignoring the specified " + "type, but in the future this mismatch will raise a " + "TypeError".format( + values.categories.dtype, value_type), + FutureWarning, stacklevel=2) + dictionary = array( + values.categories.values, memory_pool=memory_pool) + else: + raise + + return DictionaryArray.from_arrays( + indices, dictionary, ordered=values.ordered, safe=safe) + else: + if pandas_api.have_pandas: + values, type = pandas_api.compat.get_datetimetz_type( + values, obj.dtype, type) + result = _ndarray_to_array(values, mask, type, c_from_pandas, safe, + pool) + else: + # ConvertPySequence does strict conversion if type is explicitly passed + result = _sequence_to_array(obj, mask, size, type, pool, c_from_pandas) + + if extension_type is not None: + result = ExtensionArray.from_storage(extension_type, result) + return result + + +def asarray(values, type=None): + """ + Convert to pyarrow.Array, inferring type if not provided. + + Parameters + ---------- + values : array-like + This can be a sequence, numpy.ndarray, pyarrow.Array or + pyarrow.ChunkedArray. If a ChunkedArray is passed, the output will be + a ChunkedArray, otherwise the output will be a Array. + type : string or DataType + Explicitly construct the array with this type. Attempt to cast if + indicated type is different. + + Returns + ------- + arr : Array or ChunkedArray + """ + if isinstance(values, (Array, ChunkedArray)): + if type is not None and not values.type.equals(type): + values = values.cast(type) + return values + else: + return array(values, type=type) + + +def nulls(size, type=None, MemoryPool memory_pool=None): + """ + Create a strongly-typed Array instance with all elements null. + + Parameters + ---------- + size : int + Array length. + type : pyarrow.DataType, default None + Explicit type for the array. By default use NullType. + memory_pool : MemoryPool, default None + Arrow MemoryPool to use for allocations. Uses the default memory + pool if not passed. + + Returns + ------- + arr : Array + + Examples + -------- + >>> import pyarrow as pa + >>> pa.nulls(10) + + 10 nulls + + >>> pa.nulls(3, pa.uint32()) + + [ + null, + null, + null + ] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + int64_t length = size + shared_ptr[CDataType] ty + shared_ptr[CArray] arr + + type = ensure_type(type, allow_none=True) + if type is None: + type = null() + + ty = pyarrow_unwrap_data_type(type) + with nogil: + arr = GetResultValue(MakeArrayOfNull(ty, length, pool)) + + return pyarrow_wrap_array(arr) + + +def repeat(value, size, MemoryPool memory_pool=None): + """ + Create an Array instance whose slots are the given scalar. + + Parameters + ---------- + value : Scalar-like object + Either a pyarrow.Scalar or any python object coercible to a Scalar. + size : int + Number of times to repeat the scalar in the output Array. + memory_pool : MemoryPool, default None + Arrow MemoryPool to use for allocations. Uses the default memory + pool if not passed. + + Returns + ------- + arr : Array + + Examples + -------- + >>> import pyarrow as pa + >>> pa.repeat(10, 3) + + [ + 10, + 10, + 10 + ] + + >>> pa.repeat([1, 2], 2) + + [ + [ + 1, + 2 + ], + [ + 1, + 2 + ] + ] + + >>> pa.repeat("string", 3) + + [ + "string", + "string", + "string" + ] + + >>> pa.repeat(pa.scalar({'a': 1, 'b': [1, 2]}), 2) + + -- is_valid: all not null + -- child 0 type: int64 + [ + 1, + 1 + ] + -- child 1 type: list + [ + [ + 1, + 2 + ], + [ + 1, + 2 + ] + ] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + int64_t length = size + shared_ptr[CArray] c_array + shared_ptr[CScalar] c_scalar + + if not isinstance(value, Scalar): + value = scalar(value, memory_pool=memory_pool) + + c_scalar = ( value).unwrap() + with nogil: + c_array = GetResultValue( + MakeArrayFromScalar(deref(c_scalar), length, pool) + ) + + return pyarrow_wrap_array(c_array) + + +def infer_type(values, mask=None, from_pandas=False): + """ + Attempt to infer Arrow data type that can hold the passed Python + sequence type in an Array object + + Parameters + ---------- + values : array-like + Sequence to infer type from. + mask : ndarray (bool type), optional + Optional exclusion mask where True marks null, False non-null. + from_pandas : bool, default False + Use pandas's NA/null sentinel values for type inference. + + Returns + ------- + type : DataType + """ + cdef: + shared_ptr[CDataType] out + c_bool use_pandas_sentinels = from_pandas + + if mask is not None and not isinstance(mask, np.ndarray): + mask = np.array(mask, dtype=bool) + + out = GetResultValue(InferArrowType(values, mask, use_pandas_sentinels)) + return pyarrow_wrap_data_type(out) + + +def _normalize_slice(object arrow_obj, slice key): + """ + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + """ + cdef: + Py_ssize_t start, stop, step + Py_ssize_t n = len(arrow_obj) + + start = key.start or 0 + if start < 0: + start += n + if start < 0: + start = 0 + elif start >= n: + start = n + + stop = key.stop if key.stop is not None else n + if stop < 0: + stop += n + if stop < 0: + stop = 0 + elif stop >= n: + stop = n + + step = key.step or 1 + if step != 1: + if step < 0: + # Negative steps require some special handling + if key.start is None: + start = n - 1 + + if key.stop is None: + stop = -1 + + indices = np.arange(start, stop, step) + return arrow_obj.take(indices) + else: + length = max(stop - start, 0) + return arrow_obj.slice(start, length) + + +cdef Py_ssize_t _normalize_index(Py_ssize_t index, + Py_ssize_t length) except -1: + if index < 0: + index += length + if index < 0: + raise IndexError("index out of bounds") + elif index >= length: + raise IndexError("index out of bounds") + return index + + +cdef wrap_datum(const CDatum& datum): + if datum.kind() == DatumType_ARRAY: + return pyarrow_wrap_array(MakeArray(datum.array())) + elif datum.kind() == DatumType_CHUNKED_ARRAY: + return pyarrow_wrap_chunked_array(datum.chunked_array()) + elif datum.kind() == DatumType_RECORD_BATCH: + return pyarrow_wrap_batch(datum.record_batch()) + elif datum.kind() == DatumType_TABLE: + return pyarrow_wrap_table(datum.table()) + elif datum.kind() == DatumType_SCALAR: + return pyarrow_wrap_scalar(datum.scalar()) + else: + raise ValueError("Unable to wrap Datum in a Python object") + + +cdef _append_array_buffers(const CArrayData* ad, list res): + """ + Recursively append Buffer wrappers from *ad* and its children. + """ + cdef size_t i, n + assert ad != NULL + n = ad.buffers.size() + for i in range(n): + buf = ad.buffers[i] + res.append(pyarrow_wrap_buffer(buf) + if buf.get() != NULL else None) + n = ad.child_data.size() + for i in range(n): + _append_array_buffers(ad.child_data[i].get(), res) + + +cdef _reduce_array_data(const CArrayData* ad): + """ + Recursively dissect ArrayData to (pickable) tuples. + """ + cdef size_t i, n + assert ad != NULL + + n = ad.buffers.size() + buffers = [] + for i in range(n): + buf = ad.buffers[i] + buffers.append(pyarrow_wrap_buffer(buf) + if buf.get() != NULL else None) + + children = [] + n = ad.child_data.size() + for i in range(n): + children.append(_reduce_array_data(ad.child_data[i].get())) + + if ad.dictionary.get() != NULL: + dictionary = _reduce_array_data(ad.dictionary.get()) + else: + dictionary = None + + return pyarrow_wrap_data_type(ad.type), ad.length, ad.null_count, \ + ad.offset, buffers, children, dictionary + + +cdef shared_ptr[CArrayData] _reconstruct_array_data(data): + """ + Reconstruct CArrayData objects from the tuple structure generated + by _reduce_array_data. + """ + cdef: + int64_t length, null_count, offset, i + DataType dtype + Buffer buf + vector[shared_ptr[CBuffer]] c_buffers + vector[shared_ptr[CArrayData]] c_children + shared_ptr[CArrayData] c_dictionary + + dtype, length, null_count, offset, buffers, children, dictionary = data + + for i in range(len(buffers)): + buf = buffers[i] + if buf is None: + c_buffers.push_back(shared_ptr[CBuffer]()) + else: + c_buffers.push_back(buf.buffer) + + for i in range(len(children)): + c_children.push_back(_reconstruct_array_data(children[i])) + + if dictionary is not None: + c_dictionary = _reconstruct_array_data(dictionary) + + return CArrayData.MakeWithChildrenAndDictionary( + dtype.sp_type, + length, + c_buffers, + c_children, + c_dictionary, + null_count, + offset) + + +def _restore_array(data): + """ + Reconstruct an Array from pickled ArrayData. + """ + cdef shared_ptr[CArrayData] ad = _reconstruct_array_data(data) + return pyarrow_wrap_array(MakeArray(ad)) + + +cdef class _PandasConvertible(_Weakrefable): + + def to_pandas( + self, + memory_pool=None, + categories=None, + bint strings_to_categorical=False, + bint zero_copy_only=False, + bint integer_object_nulls=False, + bint date_as_object=True, + bint timestamp_as_object=False, + bint use_threads=True, + bint deduplicate_objects=True, + bint ignore_metadata=False, + bint safe=True, + bint split_blocks=False, + bint self_destruct=False, + str maps_as_pydicts=None, + types_mapper=None, + bint coerce_temporal_nanoseconds=False + ): + """ + Convert to a pandas-compatible NumPy array or DataFrame, as appropriate + + Parameters + ---------- + memory_pool : MemoryPool, default None + Arrow MemoryPool to use for allocations. Uses the default memory + pool if not passed. + categories : list, default empty + List of fields that should be returned as pandas.Categorical. Only + applies to table-like data structures. + strings_to_categorical : bool, default False + Encode string (UTF8) and binary types to pandas.Categorical. + zero_copy_only : bool, default False + Raise an ArrowException if this function call would require copying + the underlying data. + integer_object_nulls : bool, default False + Cast integers with nulls to objects + date_as_object : bool, default True + Cast dates to objects. If False, convert to datetime64 dtype with + the equivalent time unit (if supported). Note: in pandas version + < 2.0, only datetime64[ns] conversion is supported. + timestamp_as_object : bool, default False + Cast non-nanosecond timestamps (np.datetime64) to objects. This is + useful in pandas version 1.x if you have timestamps that don't fit + in the normal date range of nanosecond timestamps (1678 CE-2262 CE). + Non-nanosecond timestamps are supported in pandas version 2.0. + If False, all timestamps are converted to datetime64 dtype. + use_threads : bool, default True + Whether to parallelize the conversion using multiple threads. + deduplicate_objects : bool, default True + Do not create multiple copies Python objects when created, to save + on memory use. Conversion will be slower. + ignore_metadata : bool, default False + If True, do not use the 'pandas' metadata to reconstruct the + DataFrame index, if present + safe : bool, default True + For certain data types, a cast is needed in order to store the + data in a pandas DataFrame or Series (e.g. timestamps are always + stored as nanoseconds in pandas). This option controls whether it + is a safe cast or not. + split_blocks : bool, default False + If True, generate one internal "block" for each column when + creating a pandas.DataFrame from a RecordBatch or Table. While this + can temporarily reduce memory note that various pandas operations + can trigger "consolidation" which may balloon memory use. + self_destruct : bool, default False + EXPERIMENTAL: If True, attempt to deallocate the originating Arrow + memory while converting the Arrow object to pandas. If you use the + object after calling to_pandas with this option it will crash your + program. + + Note that you may not see always memory usage improvements. For + example, if multiple columns share an underlying allocation, + memory can't be freed until all columns are converted. + maps_as_pydicts : str, optional, default `None` + Valid values are `None`, 'lossy', or 'strict'. + The default behavior (`None`), is to convert Arrow Map arrays to + Python association lists (list-of-tuples) in the same order as the + Arrow Map, as in [(key1, value1), (key2, value2), ...]. + + If 'lossy' or 'strict', convert Arrow Map arrays to native Python dicts. + This can change the ordering of (key, value) pairs, and will + deduplicate multiple keys, resulting in a possible loss of data. + + If 'lossy', this key deduplication results in a warning printed + when detected. If 'strict', this instead results in an exception + being raised when detected. + types_mapper : function, default None + A function mapping a pyarrow DataType to a pandas ExtensionDtype. + This can be used to override the default pandas type for conversion + of built-in pyarrow types or in absence of pandas_metadata in the + Table schema. The function receives a pyarrow DataType and is + expected to return a pandas ExtensionDtype or ``None`` if the + default conversion should be used for that type. If you have + a dictionary mapping, you can pass ``dict.get`` as function. + coerce_temporal_nanoseconds : bool, default False + Only applicable to pandas version >= 2.0. + A legacy option to coerce date32, date64, duration, and timestamp + time units to nanoseconds when converting to pandas. This is the + default behavior in pandas version 1.x. Set this option to True if + you'd like to use this coercion when using pandas version >= 2.0 + for backwards compatibility (not recommended otherwise). + + Returns + ------- + pandas.Series or pandas.DataFrame depending on type of object + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + + Convert a Table to pandas DataFrame: + + >>> table = pa.table([ + ... pa.array([2, 4, 5, 100]), + ... pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + ... ], names=['n_legs', 'animals']) + >>> table.to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + >>> isinstance(table.to_pandas(), pd.DataFrame) + True + + Convert a RecordBatch to pandas DataFrame: + + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.record_batch([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + >>> batch.to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + >>> isinstance(batch.to_pandas(), pd.DataFrame) + True + + Convert a Chunked Array to pandas Series: + + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.to_pandas() + 0 2 + 1 2 + 2 4 + 3 4 + 4 5 + 5 100 + dtype: int64 + >>> isinstance(n_legs.to_pandas(), pd.Series) + True + """ + options = dict( + pool=memory_pool, + strings_to_categorical=strings_to_categorical, + zero_copy_only=zero_copy_only, + integer_object_nulls=integer_object_nulls, + date_as_object=date_as_object, + timestamp_as_object=timestamp_as_object, + use_threads=use_threads, + deduplicate_objects=deduplicate_objects, + safe=safe, + split_blocks=split_blocks, + self_destruct=self_destruct, + maps_as_pydicts=maps_as_pydicts, + coerce_temporal_nanoseconds=coerce_temporal_nanoseconds + ) + return self._to_pandas(options, categories=categories, + ignore_metadata=ignore_metadata, + types_mapper=types_mapper) + + +cdef PandasOptions _convert_pandas_options(dict options): + cdef PandasOptions result + result.pool = maybe_unbox_memory_pool(options['pool']) + result.strings_to_categorical = options['strings_to_categorical'] + result.zero_copy_only = options['zero_copy_only'] + result.integer_object_nulls = options['integer_object_nulls'] + result.date_as_object = options['date_as_object'] + result.timestamp_as_object = options['timestamp_as_object'] + result.use_threads = options['use_threads'] + result.deduplicate_objects = options['deduplicate_objects'] + result.safe_cast = options['safe'] + result.split_blocks = options['split_blocks'] + result.self_destruct = options['self_destruct'] + result.coerce_temporal_nanoseconds = options['coerce_temporal_nanoseconds'] + result.ignore_timezone = os.environ.get('PYARROW_IGNORE_TIMEZONE', False) + + maps_as_pydicts = options['maps_as_pydicts'] + if maps_as_pydicts is None: + result.maps_as_pydicts = MapConversionType.DEFAULT + elif maps_as_pydicts == "lossy": + result.maps_as_pydicts = MapConversionType.LOSSY + elif maps_as_pydicts == "strict": + result.maps_as_pydicts = MapConversionType.STRICT_ + else: + raise ValueError( + "Invalid value for 'maps_as_pydicts': " + + "valid values are 'lossy', 'strict' or `None` (default). " + + f"Received '{maps_as_pydicts}'." + ) + return result + + +cdef class Array(_PandasConvertible): + """ + The base class for all Arrow arrays. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use one of " + "the `pyarrow.Array.from_*` functions instead." + .format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CArray]& sp_array) except *: + self.sp_array = sp_array + self.ap = sp_array.get() + self.type = pyarrow_wrap_data_type(self.sp_array.get().type()) + + def _debug_print(self): + with nogil: + check_status(DebugPrint(deref(self.ap), 0)) + + def diff(self, Array other): + """ + Compare contents of this array against another one. + + Return a string containing the result of diffing this array + (on the left side) against the other array (on the right side). + + Parameters + ---------- + other : Array + The other array to compare this array with. + + Returns + ------- + diff : str + A human-readable printout of the differences. + + Examples + -------- + >>> import pyarrow as pa + >>> left = pa.array(["one", "two", "three"]) + >>> right = pa.array(["two", None, "two-and-a-half", "three"]) + >>> print(left.diff(right)) # doctest: +SKIP + + @@ -0, +0 @@ + -"one" + @@ -2, +1 @@ + +null + +"two-and-a-half" + + """ + cdef c_string result + with nogil: + result = self.ap.Diff(deref(other.ap)) + return frombytes(result, safe=True) + + def cast(self, object target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast array values to another data type + + See :func:`pyarrow.compute.cast` for usage. + + Parameters + ---------- + target_type : DataType, default None + Type to cast array to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Returns + ------- + cast : Array + """ + return _pc().cast(self, target_type, safe=safe, + options=options, memory_pool=memory_pool) + + def view(self, object target_type): + """ + Return zero-copy "view" of array as another data type. + + The data types must have compatible columnar buffer layouts + + Parameters + ---------- + target_type : DataType + Type to construct view as. + + Returns + ------- + view : Array + """ + cdef DataType type = ensure_type(target_type) + cdef shared_ptr[CArray] result + with nogil: + result = GetResultValue(self.ap.View(type.sp_type)) + return pyarrow_wrap_array(result) + + def sum(self, **kwargs): + """ + Sum the values in a numerical array. + + See :func:`pyarrow.compute.sum` for full usage. + + Parameters + ---------- + **kwargs : dict, optional + Options to pass to :func:`pyarrow.compute.sum`. + + Returns + ------- + sum : Scalar + A scalar containing the sum value. + """ + options = _pc().ScalarAggregateOptions(**kwargs) + return _pc().call_function('sum', [self], options) + + def unique(self): + """ + Compute distinct elements in array. + + Returns + ------- + unique : Array + An array of the same data type, with deduplicated elements. + """ + return _pc().call_function('unique', [self]) + + def dictionary_encode(self, null_encoding='mask'): + """ + Compute dictionary-encoded representation of array. + + See :func:`pyarrow.compute.dictionary_encode` for full usage. + + Parameters + ---------- + null_encoding : str, default "mask" + How to handle null entries. + + Returns + ------- + encoded : DictionaryArray + A dictionary-encoded version of this array. + """ + options = _pc().DictionaryEncodeOptions(null_encoding) + return _pc().call_function('dictionary_encode', [self], options) + + def value_counts(self): + """ + Compute counts of unique elements in array. + + Returns + ------- + StructArray + An array of structs + """ + return _pc().call_function('value_counts', [self]) + + @staticmethod + def from_pandas(obj, mask=None, type=None, bint safe=True, + MemoryPool memory_pool=None): + """ + Convert pandas.Series to an Arrow Array. + + This method uses Pandas semantics about what values indicate + nulls. See pyarrow.array for more general conversion from arrays or + sequences to Arrow arrays. + + Parameters + ---------- + obj : ndarray, pandas.Series, array-like + mask : array (boolean), optional + Indicate which values are null (True) or not null (False). + type : pyarrow.DataType + Explicit type to attempt to coerce to, otherwise will be inferred + from the data. + safe : bool, default True + Check for overflows or other unsafe conversions. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the currently-set default + memory pool. + + Notes + ----- + Localized timestamps will currently be returned as UTC (pandas's native + representation). Timezone-naive data will be implicitly interpreted as + UTC. + + Returns + ------- + array : pyarrow.Array or pyarrow.ChunkedArray + ChunkedArray is returned if object data overflows binary buffer. + """ + return array(obj, mask=mask, type=type, safe=safe, from_pandas=True, + memory_pool=memory_pool) + + def __reduce__(self): + return _restore_array, \ + (_reduce_array_data(self.sp_array.get().data().get()),) + + @staticmethod + def from_buffers(DataType type, length, buffers, null_count=-1, offset=0, + children=None): + """ + Construct an Array from a sequence of buffers. + + The concrete type returned depends on the datatype. + + Parameters + ---------- + type : DataType + The value type of the array. + length : int + The number of values in the array. + buffers : List[Buffer] + The buffers backing this array. + null_count : int, default -1 + The number of null entries in the array. Negative value means that + the null count is not known. + offset : int, default 0 + The array's logical offset (in values, not in bytes) from the + start of each buffer. + children : List[Array], default None + Nested type children with length matching type.num_fields. + + Returns + ------- + array : Array + """ + cdef: + Buffer buf + Array child + vector[shared_ptr[CBuffer]] c_buffers + vector[shared_ptr[CArrayData]] c_child_data + shared_ptr[CArrayData] array_data + + children = children or [] + + if type.num_fields != len(children): + raise ValueError("Type's expected number of children " + "({0}) did not match the passed number " + "({1}).".format(type.num_fields, len(children))) + + if type.num_buffers != len(buffers): + raise ValueError("Type's expected number of buffers " + "({0}) did not match the passed number " + "({1}).".format(type.num_buffers, len(buffers))) + + for buf in buffers: + # None will produce a null buffer pointer + c_buffers.push_back(pyarrow_unwrap_buffer(buf)) + + for child in children: + c_child_data.push_back(child.ap.data()) + + array_data = CArrayData.MakeWithChildren(type.sp_type, length, + c_buffers, c_child_data, + null_count, offset) + cdef Array result = pyarrow_wrap_array(MakeArray(array_data)) + result.validate() + return result + + @property + def null_count(self): + return self.sp_array.get().null_count() + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the array. + + In other words, the sum of bytes from all buffer + ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + """ + cdef: + CResult[int64_t] c_size_res + + with nogil: + c_size_res = ReferencedBufferSize(deref(self.ap)) + size = GetResultValue(c_size_res) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the array. + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + """ + cdef: + int64_t total_buffer_size + + total_buffer_size = TotalBufferSize(deref(self.ap)) + return total_buffer_size + + def __sizeof__(self): + return super(Array, self).__sizeof__() + self.nbytes + + def __iter__(self): + for i in range(len(self)): + yield self.getitem(i) + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def to_string(self, *, int indent=2, int top_level_indent=0, int window=10, + int container_window=2, c_bool skip_new_lines=False): + """ + Render a "pretty-printed" string representation of the Array. + + Parameters + ---------- + indent : int, default 2 + How much to indent the internal items in the string to + the right, by default ``2``. + top_level_indent : int, default 0 + How much to indent right the entire content of the array, + by default ``0``. + window : int + How many primitive items to preview at the begin and end + of the array when the array is bigger than the window. + The other items will be ellipsed. + container_window : int + How many container items (such as a list in a list array) + to preview at the begin and end of the array when the array + is bigger than the window. + skip_new_lines : bool + If the array should be rendered as a single line of text + or if each element should be on its own line. + """ + cdef: + c_string result + PrettyPrintOptions options + + with nogil: + options = PrettyPrintOptions(top_level_indent, window) + options.skip_new_lines = skip_new_lines + options.indent_size = indent + check_status( + PrettyPrint( + deref(self.ap), + options, + &result + ) + ) + + return frombytes(result, safe=True) + + def format(self, **kwargs): + """ + DEPRECATED, use pyarrow.Array.to_string + + Parameters + ---------- + **kwargs : dict + + Returns + ------- + str + """ + import warnings + warnings.warn('Array.format is deprecated, use Array.to_string') + return self.to_string(**kwargs) + + def __str__(self): + return self.to_string() + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + # This also handles comparing with None + # as Array.equals(None) raises a TypeError. + return NotImplemented + + def equals(Array self, Array other not None): + """ + Parameters + ---------- + other : pyarrow.Array + + Returns + ------- + bool + """ + return self.ap.Equals(deref(other.ap)) + + def __len__(self): + return self.length() + + cdef int64_t length(self): + if self.sp_array.get(): + return self.sp_array.get().length() + else: + return 0 + + def is_null(self, *, nan_is_null=False): + """ + Return BooleanArray indicating the null values. + + Parameters + ---------- + nan_is_null : bool (optional, default False) + Whether floating-point NaN values should also be considered null. + + Returns + ------- + array : boolean Array + """ + options = _pc().NullOptions(nan_is_null=nan_is_null) + return _pc().call_function('is_null', [self], options) + + def is_nan(self): + """ + Return BooleanArray indicating the NaN values. + + Returns + ------- + array : boolean Array + """ + return _pc().call_function('is_nan', [self]) + + def is_valid(self): + """ + Return BooleanArray indicating the non-null values. + """ + return _pc().is_valid(self) + + def fill_null(self, fill_value): + """ + See :func:`pyarrow.compute.fill_null` for usage. + + Parameters + ---------- + fill_value : any + The replacement value for null entries. + + Returns + ------- + result : Array + A new array with nulls replaced by the given value. + """ + return _pc().fill_null(self, fill_value) + + def __getitem__(self, key): + """ + Slice or return value at given index + + Parameters + ---------- + key : integer or slice + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + + Returns + ------- + value : Scalar (index) or Array (slice) + """ + if isinstance(key, slice): + return _normalize_slice(self, key) + + return self.getitem(_normalize_index(key, self.length())) + + cdef getitem(self, int64_t i): + return Scalar.wrap(GetResultValue(self.ap.GetScalar(i))) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this array. + + Parameters + ---------- + offset : int, default 0 + Offset from start of array to slice. + length : int, default None + Length of slice (default is until end of Array starting from + offset). + + Returns + ------- + sliced : RecordBatch + """ + cdef: + shared_ptr[CArray] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.ap.Slice(offset) + else: + if length < 0: + raise ValueError('Length must be non-negative') + result = self.ap.Slice(offset, length) + + return pyarrow_wrap_array(result) + + def take(self, object indices): + """ + Select values from an array. + + See :func:`pyarrow.compute.take` for full usage. + + Parameters + ---------- + indices : Array or array-like + The indices in the array whose values will be returned. + + Returns + ------- + taken : Array + An array with the same datatype, containing the taken values. + """ + return _pc().take(self, indices) + + def drop_null(self): + """ + Remove missing values from an array. + """ + return _pc().drop_null(self) + + def filter(self, Array mask, *, null_selection_behavior='drop'): + """ + Select values from an array. + + See :func:`pyarrow.compute.filter` for full usage. + + Parameters + ---------- + mask : Array or array-like + The boolean mask to filter the array with. + null_selection_behavior : str, default "drop" + How nulls in the mask should be handled. + + Returns + ------- + filtered : Array + An array of the same type, with only the elements selected by + the boolean mask. + """ + return _pc().filter(self, mask, + null_selection_behavior=null_selection_behavior) + + def index(self, value, start=None, end=None, *, memory_pool=None): + """ + Find the first index of a value. + + See :func:`pyarrow.compute.index` for full usage. + + Parameters + ---------- + value : Scalar or object + The value to look for in the array. + start : int, optional + The start index where to look for `value`. + end : int, optional + The end index where to look for `value`. + memory_pool : MemoryPool, optional + A memory pool for potential memory allocations. + + Returns + ------- + index : Int64Scalar + The index of the value in the array (-1 if not found). + """ + return _pc().index(self, value, start, end, memory_pool=memory_pool) + + def sort(self, order="ascending", **kwargs): + """ + Sort the Array + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + result : Array + """ + indices = _pc().sort_indices( + self, + options=_pc().SortOptions(sort_keys=[("", order)], **kwargs) + ) + return self.take(indices) + + def _to_pandas(self, options, types_mapper=None, **kwargs): + return _array_like_to_pandas(self, options, types_mapper=types_mapper) + + def __array__(self, dtype=None): + values = self.to_numpy(zero_copy_only=False) + if dtype is None: + return values + return values.astype(dtype) + + def to_numpy(self, zero_copy_only=True, writable=False): + """ + Return a NumPy view or copy of this array (experimental). + + By default, tries to return a view of this array. This is only + supported for primitive arrays with the same memory layout as NumPy + (i.e. integers, floating point, ..) and without any nulls. + + For the extension arrays, this method simply delegates to the + underlying storage array. + + Parameters + ---------- + zero_copy_only : bool, default True + If True, an exception will be raised if the conversion to a numpy + array would require copying the underlying data (e.g. in presence + of nulls, or for non-primitive types). + writable : bool, default False + For numpy arrays created with zero copy (view on the Arrow data), + the resulting array is not writable (Arrow data is immutable). + By setting this to True, a copy of the array is made to ensure + it is writable. + + Returns + ------- + array : numpy.ndarray + """ + cdef: + PyObject* out + PandasOptions c_options + object values + + if zero_copy_only and writable: + raise ValueError( + "Cannot return a writable array if asking for zero-copy") + + # If there are nulls and the array is a DictionaryArray + # decoding the dictionary will make sure nulls are correctly handled. + # Decoding a dictionary does imply a copy by the way, + # so it can't be done if the user requested a zero_copy. + c_options.decode_dictionaries = not zero_copy_only + c_options.zero_copy_only = zero_copy_only + c_options.to_numpy = True + + with nogil: + check_status(ConvertArrayToPandas(c_options, self.sp_array, + self, &out)) + + # wrap_array_output uses pandas to convert to Categorical, here + # always convert to numpy array without pandas dependency + array = PyObject_to_object(out) + + if isinstance(array, dict): + array = np.take(array['dictionary'], array['indices']) + + if writable and not array.flags.writeable: + # if the conversion already needed to a copy, writeable is True + array = array.copy() + return array + + def to_pylist(self): + """ + Convert to a list of native Python objects. + + Returns + ------- + lst : list + """ + return [x.as_py() for x in self] + + def tolist(self): + """ + Alias of to_pylist for compatibility with NumPy. + """ + return self.to_pylist() + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.ap.ValidateFull()) + else: + with nogil: + check_status(self.ap.Validate()) + + @property + def offset(self): + """ + A relative position into another array's data. + + The purpose is to enable zero-copy slicing. This value defaults to zero + but must be applied on all operations with the physical storage + buffers. + """ + return self.sp_array.get().offset() + + def buffers(self): + """ + Return a list of Buffer objects pointing to this array's physical + storage. + + To correctly interpret these buffers, you need to also apply the offset + multiplied with the size of the stored data type. + """ + res = [] + _append_array_buffers(self.sp_array.get().data().get(), res) + return res + + def _export_to_c(self, out_ptr, out_schema_ptr=0): + """ + Export to a C ArrowArray struct, given its pointer. + + If a C ArrowSchema struct pointer is also given, the array type + is exported to it at the same time. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowArray struct. + out_schema_ptr: int (optional) + The raw pointer to a C ArrowSchema struct. + + Be careful: if you don't pass the ArrowArray struct to a consumer, + array memory will leak. This is a low-level function intended for + expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + void* c_schema_ptr = _as_c_pointer(out_schema_ptr, + allow_null=True) + with nogil: + check_status(ExportArray(deref(self.sp_array), + c_ptr, + c_schema_ptr)) + + @staticmethod + def _import_from_c(in_ptr, type): + """ + Import Array from a C ArrowArray struct, given its pointer + and the imported array type. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowArray struct. + type: DataType or int + Either a DataType object, or the raw pointer to a C ArrowSchema + struct. + + This is a low-level function intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(in_ptr) + void* c_type_ptr + shared_ptr[CArray] c_array + + c_type = pyarrow_unwrap_data_type(type) + if c_type == nullptr: + # Not a DataType object, perhaps a raw ArrowSchema pointer + c_type_ptr = _as_c_pointer(type) + with nogil: + c_array = GetResultValue(ImportArray( + c_ptr, c_type_ptr)) + else: + with nogil: + c_array = GetResultValue(ImportArray( c_ptr, + c_type)) + return pyarrow_wrap_array(c_array) + + def __arrow_c_array__(self, requested_schema=None): + """ + Get a pair of PyCapsules containing a C ArrowArray representation of the object. + + Parameters + ---------- + requested_schema : PyCapsule | None + A PyCapsule containing a C ArrowSchema representation of a requested + schema. PyArrow will attempt to cast the array to this data type. + If None, the array will be returned as-is, with a type matching the + one returned by :meth:`__arrow_c_schema__()`. + + Returns + ------- + Tuple[PyCapsule, PyCapsule] + A pair of PyCapsules containing a C ArrowSchema and ArrowArray, + respectively. + """ + cdef: + ArrowArray* c_array + ArrowSchema* c_schema + shared_ptr[CArray] inner_array + + if requested_schema is not None: + target_type = DataType._import_from_c_capsule(requested_schema) + + if target_type != self.type: + try: + casted_array = _pc().cast(self, target_type, safe=True) + inner_array = pyarrow_unwrap_array(casted_array) + except ArrowInvalid as e: + raise ValueError( + f"Could not cast {self.type} to requested type {target_type}: {e}" + ) + else: + inner_array = self.sp_array + else: + inner_array = self.sp_array + + schema_capsule = alloc_c_schema(&c_schema) + array_capsule = alloc_c_array(&c_array) + + with nogil: + check_status(ExportArray(deref(inner_array), c_array, c_schema)) + + return schema_capsule, array_capsule + + @staticmethod + def _import_from_c_capsule(schema_capsule, array_capsule): + cdef: + ArrowSchema* c_schema + ArrowArray* c_array + shared_ptr[CArray] array + + c_schema = PyCapsule_GetPointer(schema_capsule, 'arrow_schema') + c_array = PyCapsule_GetPointer(array_capsule, 'arrow_array') + + with nogil: + array = GetResultValue(ImportArray(c_array, c_schema)) + + return pyarrow_wrap_array(array) + + def __dlpack__(self, stream=None): + """Export a primitive array as a DLPack capsule. + + Parameters + ---------- + stream : int, optional + A Python integer representing a pointer to a stream. Currently not supported. + Stream is provided by the consumer to the producer to instruct the producer + to ensure that operations can safely be performed on the array. + + Returns + ------- + capsule : PyCapsule + A DLPack capsule for the array, pointing to a DLManagedTensor. + """ + if stream is None: + dlm_tensor = GetResultValue(ExportToDLPack(self.sp_array)) + + return PyCapsule_New(dlm_tensor, 'dltensor', dlpack_pycapsule_deleter) + else: + raise NotImplementedError( + "Only stream=None is supported." + ) + + def __dlpack_device__(self): + """ + Return the DLPack device tuple this arrays resides on. + + Returns + ------- + tuple : Tuple[int, int] + Tuple with index specifying the type of the device (where + CPU = 1, see cpp/src/arrow/c/dpack_abi.h) and index of the + device which is 0 by default for CPU. + """ + device = GetResultValue(ExportDevice(self.sp_array)) + return device.device_type, device.device_id + + +cdef _array_like_to_pandas(obj, options, types_mapper): + cdef: + PyObject* out + PandasOptions c_options = _convert_pandas_options(options) + + original_type = obj.type + name = obj._name + dtype = None + + if types_mapper: + dtype = types_mapper(original_type) + elif original_type.id == _Type_EXTENSION: + try: + dtype = original_type.to_pandas_dtype() + except NotImplementedError: + pass + + # Only call __from_arrow__ for Arrow extension types or when explicitly + # overridden via types_mapper + if hasattr(dtype, '__from_arrow__'): + arr = dtype.__from_arrow__(obj) + return pandas_api.series(arr, name=name, copy=False) + + if pandas_api.is_v1(): + # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + c_options.coerce_temporal_nanoseconds = True + + if isinstance(obj, Array): + with nogil: + check_status(ConvertArrayToPandas(c_options, + ( obj).sp_array, + obj, &out)) + elif isinstance(obj, ChunkedArray): + with nogil: + check_status(libarrow_python.ConvertChunkedArrayToPandas( + c_options, + ( obj).sp_chunked_array, + obj, &out)) + + arr = wrap_array_output(out) + + if (isinstance(original_type, TimestampType) and + options["timestamp_as_object"]): + # ARROW-5359 - need to specify object dtype to avoid pandas to + # coerce back to ns resolution + dtype = "object" + elif types_mapper: + dtype = types_mapper(original_type) + else: + dtype = None + + result = pandas_api.series(arr, dtype=dtype, name=name, copy=False) + + if (isinstance(original_type, TimestampType) and + original_type.tz is not None and + # can be object dtype for non-ns and timestamp_as_object=True + result.dtype.kind == "M"): + from pyarrow.pandas_compat import make_tz_aware + result = make_tz_aware(result, original_type.tz) + + return result + + +cdef wrap_array_output(PyObject* output): + cdef object obj = PyObject_to_object(output) + + if isinstance(obj, dict): + return _pandas_api.categorical_type.from_codes( + obj['indices'], categories=obj['dictionary'], ordered=obj['ordered'] + ) + else: + return obj + + +cdef class NullArray(Array): + """ + Concrete class for Arrow arrays of null data type. + """ + + +cdef class BooleanArray(Array): + """ + Concrete class for Arrow arrays of boolean data type. + """ + @property + def false_count(self): + return ( self.ap).false_count() + + @property + def true_count(self): + return ( self.ap).true_count() + + +cdef class NumericArray(Array): + """ + A base class for Arrow numeric arrays. + """ + + +cdef class IntegerArray(NumericArray): + """ + A base class for Arrow integer arrays. + """ + + +cdef class FloatingPointArray(NumericArray): + """ + A base class for Arrow floating-point arrays. + """ + + +cdef class Int8Array(IntegerArray): + """ + Concrete class for Arrow arrays of int8 data type. + """ + + +cdef class UInt8Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint8 data type. + """ + + +cdef class Int16Array(IntegerArray): + """ + Concrete class for Arrow arrays of int16 data type. + """ + + +cdef class UInt16Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint16 data type. + """ + + +cdef class Int32Array(IntegerArray): + """ + Concrete class for Arrow arrays of int32 data type. + """ + + +cdef class UInt32Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint32 data type. + """ + + +cdef class Int64Array(IntegerArray): + """ + Concrete class for Arrow arrays of int64 data type. + """ + + +cdef class UInt64Array(IntegerArray): + """ + Concrete class for Arrow arrays of uint64 data type. + """ + + +cdef class Date32Array(NumericArray): + """ + Concrete class for Arrow arrays of date32 data type. + """ + + +cdef class Date64Array(NumericArray): + """ + Concrete class for Arrow arrays of date64 data type. + """ + + +cdef class TimestampArray(NumericArray): + """ + Concrete class for Arrow arrays of timestamp data type. + """ + + +cdef class Time32Array(NumericArray): + """ + Concrete class for Arrow arrays of time32 data type. + """ + + +cdef class Time64Array(NumericArray): + """ + Concrete class for Arrow arrays of time64 data type. + """ + + +cdef class DurationArray(NumericArray): + """ + Concrete class for Arrow arrays of duration data type. + """ + + +cdef class MonthDayNanoIntervalArray(Array): + """ + Concrete class for Arrow arrays of interval[MonthDayNano] type. + """ + + def to_pylist(self): + """ + Convert to a list of native Python objects. + + pyarrow.MonthDayNano is used as the native representation. + + Returns + ------- + lst : list + """ + cdef: + CResult[PyObject*] maybe_py_list + PyObject* py_list + CMonthDayNanoIntervalArray* array + array = self.sp_array.get() + maybe_py_list = MonthDayNanoIntervalArrayToPyList(deref(array)) + py_list = GetResultValue(maybe_py_list) + return PyObject_to_object(py_list) + + +cdef class HalfFloatArray(FloatingPointArray): + """ + Concrete class for Arrow arrays of float16 data type. + """ + + +cdef class FloatArray(FloatingPointArray): + """ + Concrete class for Arrow arrays of float32 data type. + """ + + +cdef class DoubleArray(FloatingPointArray): + """ + Concrete class for Arrow arrays of float64 data type. + """ + + +cdef class FixedSizeBinaryArray(Array): + """ + Concrete class for Arrow arrays of a fixed-size binary data type. + """ + + +cdef class Decimal128Array(FixedSizeBinaryArray): + """ + Concrete class for Arrow arrays of decimal128 data type. + """ + + +cdef class Decimal256Array(FixedSizeBinaryArray): + """ + Concrete class for Arrow arrays of decimal256 data type. + """ + +cdef class BaseListArray(Array): + + def flatten(self): + """ + Unnest this ListArray/LargeListArray by one level. + + The returned Array is logically a concatenation of all the sub-lists + in this Array. + + Note that this method is different from ``self.values`` in that + it takes care of the slicing offset as well as null elements backed + by non-empty sub-lists. + + Returns + ------- + result : Array + """ + return _pc().list_flatten(self) + + def value_parent_indices(self): + """ + Return array of same length as list child values array where each + output value is the index of the parent list array slot containing each + child value. + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array([[1, 2, 3], [], None, [4]], + ... type=pa.list_(pa.int32())) + >>> arr.value_parent_indices() + + [ + 0, + 0, + 0, + 3 + ] + """ + return _pc().list_parent_indices(self) + + def value_lengths(self): + """ + Return integers array with values equal to the respective length of + each list element. Null list values are null in the output. + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array([[1, 2, 3], [], None, [4]], + ... type=pa.list_(pa.int32())) + >>> arr.value_lengths() + + [ + 3, + 0, + null, + 1 + ] + """ + return _pc().list_value_length(self) + + +cdef class ListArray(BaseListArray): + """ + Concrete class for Arrow arrays of a list data type. + """ + + @staticmethod + def from_arrays(offsets, values, DataType type=None, MemoryPool pool=None, mask=None): + """ + Construct ListArray from arrays of int32 offsets and values. + + Parameters + ---------- + offsets : Array (int32 type) + values : Array (any type) + type : DataType, optional + If not specified, a default ListType with the values' type is + used. + pool : MemoryPool, optional + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + Returns + ------- + list_array : ListArray + + Examples + -------- + >>> import pyarrow as pa + >>> values = pa.array([1, 2, 3, 4]) + >>> offsets = pa.array([0, 2, 4]) + >>> pa.ListArray.from_arrays(offsets, values) + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + >>> # nulls in the offsets array become null lists + >>> offsets = pa.array([0, None, 2, 4]) + >>> pa.ListArray.from_arrays(offsets, values) + + [ + [ + 1, + 2 + ], + null, + [ + 3, + 4 + ] + ] + """ + cdef: + Array _offsets, _values + shared_ptr[CArray] out + shared_ptr[CBuffer] c_mask + cdef CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int32') + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, pool) + + if type is not None: + with nogil: + out = GetResultValue( + CListArray.FromArraysAndType( + type.sp_type, _offsets.ap[0], _values.ap[0], cpool, c_mask)) + else: + with nogil: + out = GetResultValue( + CListArray.FromArrays( + _offsets.ap[0], _values.ap[0], cpool, c_mask)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the ListArray + ignoring the array's offset. + + If any of the list elements are null, but are backed by a + non-empty sub-list, those elements will be included in the + output. + + Compare with :meth:`flatten`, which returns only the non-null + values taking into consideration the array's offset. + + Returns + ------- + values : Array + + See Also + -------- + ListArray.flatten : ... + + Examples + -------- + + The values include null elements from sub-lists: + + >>> import pyarrow as pa + >>> array = pa.array([[1, 2], None, [3, 4, None, 6]]) + >>> array.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + + If an array is sliced, the slice still uses the same + underlying data as the original array, just with an + offset. Since values ignores the offset, the values are the + same: + + >>> sliced = array.slice(1, 2) + >>> sliced + + [ + null, + [ + 3, + 4, + null, + 6 + ] + ] + >>> sliced.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + + """ + cdef CListArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + @property + def offsets(self): + """ + Return the list offsets as an int32 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `ListArray.from_arrays` and get back the same + list array if the original one has nulls. + + Returns + ------- + offsets : Int32Array + + Examples + -------- + >>> import pyarrow as pa + >>> array = pa.array([[1, 2], None, [3, 4, 5]]) + >>> array.offsets + + [ + 0, + 2, + 2, + 5 + ] + """ + return pyarrow_wrap_array(( self.ap).offsets()) + + +cdef class LargeListArray(BaseListArray): + """ + Concrete class for Arrow arrays of a large list data type. + + Identical to ListArray, but 64-bit offsets. + """ + + @staticmethod + def from_arrays(offsets, values, DataType type=None, MemoryPool pool=None, mask=None): + """ + Construct LargeListArray from arrays of int64 offsets and values. + + Parameters + ---------- + offsets : Array (int64 type) + values : Array (any type) + type : DataType, optional + If not specified, a default ListType with the values' type is + used. + pool : MemoryPool, optional + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + Returns + ------- + list_array : LargeListArray + """ + cdef: + Array _offsets, _values + shared_ptr[CArray] out + shared_ptr[CBuffer] c_mask + + cdef CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int64') + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, pool) + + if type is not None: + with nogil: + out = GetResultValue( + CLargeListArray.FromArraysAndType( + type.sp_type, _offsets.ap[0], _values.ap[0], cpool, c_mask)) + else: + with nogil: + out = GetResultValue( + CLargeListArray.FromArrays( + _offsets.ap[0], _values.ap[0], cpool, c_mask)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the LargeListArray + ignoring the array's offset. + + If any of the list elements are null, but are backed by a + non-empty sub-list, those elements will be included in the + output. + + Compare with :meth:`flatten`, which returns only the non-null + values taking into consideration the array's offset. + + Returns + ------- + values : Array + + See Also + -------- + LargeListArray.flatten : ... + + Examples + -------- + + The values include null elements from the sub-lists: + + >>> import pyarrow as pa + >>> array = pa.array( + ... [[1, 2], None, [3, 4, None, 6]], + ... type=pa.large_list(pa.int32()), + ... ) + >>> array.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + + If an array is sliced, the slice still uses the same + underlying data as the original array, just with an + offset. Since values ignores the offset, the values are the + same: + + >>> sliced = array.slice(1, 2) + >>> sliced + + [ + null, + [ + 3, + 4, + null, + 6 + ] + ] + >>> sliced.values + + [ + 1, + 2, + 3, + 4, + null, + 6 + ] + """ + cdef CLargeListArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + @property + def offsets(self): + """ + Return the list offsets as an int64 array. + + The returned array will not have a validity bitmap, so you cannot + expect to pass it to `LargeListArray.from_arrays` and get back the + same list array if the original one has nulls. + + Returns + ------- + offsets : Int64Array + """ + return pyarrow_wrap_array(( self.ap).offsets()) + + +cdef class MapArray(ListArray): + """ + Concrete class for Arrow arrays of a map data type. + """ + + @staticmethod + def from_arrays(offsets, keys, items, DataType type=None, MemoryPool pool=None): + """ + Construct MapArray from arrays of int32 offsets and key, item arrays. + + Parameters + ---------- + offsets : array-like or sequence (int32 type) + keys : array-like or sequence (any type) + items : array-like or sequence (any type) + type : DataType, optional + If not specified, a default MapArray with the keys' and items' type is used. + pool : MemoryPool + + Returns + ------- + map_array : MapArray + + Examples + -------- + First, let's understand the structure of our dataset when viewed in a rectangular data model. + The total of 5 respondents answered the question "How much did you like the movie x?". + The value -1 in the integer array means that the value is missing. The boolean array + represents the null bitmask corresponding to the missing values in the integer array. + + >>> import pyarrow as pa + >>> movies_rectangular = np.ma.masked_array([ + ... [10, -1, -1], + ... [8, 4, 5], + ... [-1, 10, 3], + ... [-1, -1, -1], + ... [-1, -1, -1] + ... ], + ... [ + ... [False, True, True], + ... [False, False, False], + ... [True, False, False], + ... [True, True, True], + ... [True, True, True], + ... ]) + + To represent the same data with the MapArray and from_arrays, the data is + formed like this: + + >>> offsets = [ + ... 0, # -- row 1 start + ... 1, # -- row 2 start + ... 4, # -- row 3 start + ... 6, # -- row 4 start + ... 6, # -- row 5 start + ... 6, # -- row 5 end + ... ] + >>> movies = [ + ... "Dark Knight", # ---------------------------------- row 1 + ... "Dark Knight", "Meet the Parents", "Superman", # -- row 2 + ... "Meet the Parents", "Superman", # ----------------- row 3 + ... ] + >>> likings = [ + ... 10, # -------- row 1 + ... 8, 4, 5, # --- row 2 + ... 10, 3 # ------ row 3 + ... ] + >>> pa.MapArray.from_arrays(offsets, movies, likings).to_pandas() + 0 [(Dark Knight, 10)] + 1 [(Dark Knight, 8), (Meet the Parents, 4), (Sup... + 2 [(Meet the Parents, 10), (Superman, 3)] + 3 [] + 4 [] + dtype: object + + If the data in the empty rows needs to be marked as missing, it's possible + to do so by modifying the offsets argument, so that we specify `None` as + the starting positions of the rows we want marked as missing. The end row + offset still has to refer to the existing value from keys (and values): + + >>> offsets = [ + ... 0, # ----- row 1 start + ... 1, # ----- row 2 start + ... 4, # ----- row 3 start + ... None, # -- row 4 start + ... None, # -- row 5 start + ... 6, # ----- row 5 end + ... ] + >>> pa.MapArray.from_arrays(offsets, movies, likings).to_pandas() + 0 [(Dark Knight, 10)] + 1 [(Dark Knight, 8), (Meet the Parents, 4), (Sup... + 2 [(Meet the Parents, 10), (Superman, 3)] + 3 None + 4 None + dtype: object + """ + cdef: + Array _offsets, _keys, _items + shared_ptr[CArray] out + cdef CMemoryPool* cpool = maybe_unbox_memory_pool(pool) + + _offsets = asarray(offsets, type='int32') + _keys = asarray(keys) + _items = asarray(items) + + if type is not None: + with nogil: + out = GetResultValue( + CMapArray.FromArraysAndType( + type.sp_type, _offsets.sp_array, + _keys.sp_array, _items.sp_array, cpool)) + else: + with nogil: + out = GetResultValue( + CMapArray.FromArrays(_offsets.sp_array, + _keys.sp_array, + _items.sp_array, cpool)) + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @property + def keys(self): + """Flattened array of keys across all maps in array""" + return pyarrow_wrap_array(( self.ap).keys()) + + @property + def items(self): + """Flattened array of items across all maps in array""" + return pyarrow_wrap_array(( self.ap).items()) + + +cdef class FixedSizeListArray(BaseListArray): + """ + Concrete class for Arrow arrays of a fixed size list data type. + """ + + @staticmethod + def from_arrays(values, list_size=None, DataType type=None, mask=None): + """ + Construct FixedSizeListArray from array of values and a list length. + + Parameters + ---------- + values : Array (any type) + list_size : int + The fixed length of the lists. + type : DataType, optional + If not specified, a default ListType with the values' type and + `list_size` length is used. + mask : Array (boolean type), optional + Indicate which values are null (True) or not null (False). + + + Returns + ------- + FixedSizeListArray + + Examples + -------- + + Create from a values array and a list size: + + >>> import pyarrow as pa + >>> values = pa.array([1, 2, 3, 4]) + >>> arr = pa.FixedSizeListArray.from_arrays(values, 2) + >>> arr + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + + Or create from a values array, list size and matching type: + + >>> typ = pa.list_(pa.field("values", pa.int64()), 2) + >>> arr = pa.FixedSizeListArray.from_arrays(values,type=typ) + >>> arr + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + """ + cdef: + Array _values + int32_t _list_size + CResult[shared_ptr[CArray]] c_result + + _values = asarray(values) + + c_mask = c_mask_inverted_from_obj(mask, None) + + if type is not None: + if list_size is not None: + raise ValueError("Cannot specify both list_size and type") + with nogil: + c_result = CFixedSizeListArray.FromArraysAndType( + _values.sp_array, type.sp_type, c_mask) + else: + if list_size is None: + raise ValueError("Should specify one of list_size and type") + _list_size = list_size + with nogil: + c_result = CFixedSizeListArray.FromArrays( + _values.sp_array, _list_size, c_mask) + cdef Array result = pyarrow_wrap_array(GetResultValue(c_result)) + result.validate() + return result + + @property + def values(self): + """ + Return the underlying array of values which backs the + FixedSizeListArray. + + Note even null elements are included. + + Compare with :meth:`flatten`, which returns only the non-null + sub-list values. + + Returns + ------- + values : Array + + See Also + -------- + FixedSizeListArray.flatten : ... + + Examples + -------- + >>> import pyarrow as pa + >>> array = pa.array( + ... [[1, 2], None, [3, None]], + ... type=pa.list_(pa.int32(), 2) + ... ) + >>> array.values + + [ + 1, + 2, + null, + null, + 3, + null + ] + + """ + cdef CFixedSizeListArray* arr = self.ap + return pyarrow_wrap_array(arr.values()) + + +cdef class UnionArray(Array): + """ + Concrete class for Arrow arrays of a Union data type. + """ + + def child(self, int pos): + """ + DEPRECATED, use field() instead. + + Parameters + ---------- + pos : int + The physical index of the union child field (not its type code). + + Returns + ------- + field : pyarrow.Field + The given child field. + """ + import warnings + warnings.warn("child is deprecated, use field", FutureWarning) + return self.field(pos) + + def field(self, int pos): + """ + Return the given child field as an individual array. + + For sparse unions, the returned array has its offset, length, + and null count adjusted. + + For dense unions, the returned array is unchanged. + + Parameters + ---------- + pos : int + The physical index of the union child field (not its type code). + + Returns + ------- + field : Array + The given child field. + """ + cdef shared_ptr[CArray] result + result = ( self.ap).field(pos) + if result != NULL: + return pyarrow_wrap_array(result) + raise KeyError("UnionArray does not have child {}".format(pos)) + + @property + def type_codes(self): + """Get the type codes array.""" + buf = pyarrow_wrap_buffer(( self.ap).type_codes()) + return Array.from_buffers(int8(), len(self), [None, buf]) + + @property + def offsets(self): + """ + Get the value offsets array (dense arrays only). + + Does not account for any slice offset. + """ + if self.type.mode != "dense": + raise ArrowTypeError("Can only get value offsets for dense arrays") + cdef CDenseUnionArray* dense = self.ap + buf = pyarrow_wrap_buffer(dense.value_offsets()) + return Array.from_buffers(int32(), len(self), [None, buf]) + + @staticmethod + def from_dense(Array types, Array value_offsets, list children, + list field_names=None, list type_codes=None): + """ + Construct dense UnionArray from arrays of int8 types, int32 offsets and + children arrays + + Parameters + ---------- + types : Array (int8 type) + value_offsets : Array (int32 type) + children : list + field_names : list + type_codes : list + + Returns + ------- + union_array : UnionArray + """ + cdef: + shared_ptr[CArray] out + vector[shared_ptr[CArray]] c + Array child + vector[c_string] c_field_names + vector[int8_t] c_type_codes + + for child in children: + c.push_back(child.sp_array) + if field_names is not None: + for x in field_names: + c_field_names.push_back(tobytes(x)) + if type_codes is not None: + for x in type_codes: + c_type_codes.push_back(x) + + with nogil: + out = GetResultValue(CDenseUnionArray.Make( + deref(types.ap), deref(value_offsets.ap), c, c_field_names, + c_type_codes)) + + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + @staticmethod + def from_sparse(Array types, list children, list field_names=None, + list type_codes=None): + """ + Construct sparse UnionArray from arrays of int8 types and children + arrays + + Parameters + ---------- + types : Array (int8 type) + children : list + field_names : list + type_codes : list + + Returns + ------- + union_array : UnionArray + """ + cdef: + shared_ptr[CArray] out + vector[shared_ptr[CArray]] c + Array child + vector[c_string] c_field_names + vector[int8_t] c_type_codes + + for child in children: + c.push_back(child.sp_array) + if field_names is not None: + for x in field_names: + c_field_names.push_back(tobytes(x)) + if type_codes is not None: + for x in type_codes: + c_type_codes.push_back(x) + + with nogil: + out = GetResultValue(CSparseUnionArray.Make( + deref(types.ap), c, c_field_names, c_type_codes)) + + cdef Array result = pyarrow_wrap_array(out) + result.validate() + return result + + +cdef class StringArray(Array): + """ + Concrete class for Arrow arrays of string (or utf8) data type. + """ + + @staticmethod + def from_buffers(int length, Buffer value_offsets, Buffer data, + Buffer null_bitmap=None, int null_count=-1, + int offset=0): + """ + Construct a StringArray from value_offsets and data buffers. + If there are nulls in the data, also a null_bitmap and the matching + null_count must be passed. + + Parameters + ---------- + length : int + value_offsets : Buffer + data : Buffer + null_bitmap : Buffer, optional + null_count : int, default 0 + offset : int, default 0 + + Returns + ------- + string_array : StringArray + """ + return Array.from_buffers(utf8(), length, + [null_bitmap, value_offsets, data], + null_count, offset) + + +cdef class LargeStringArray(Array): + """ + Concrete class for Arrow arrays of large string (or utf8) data type. + """ + + @staticmethod + def from_buffers(int length, Buffer value_offsets, Buffer data, + Buffer null_bitmap=None, int null_count=-1, + int offset=0): + """ + Construct a LargeStringArray from value_offsets and data buffers. + If there are nulls in the data, also a null_bitmap and the matching + null_count must be passed. + + Parameters + ---------- + length : int + value_offsets : Buffer + data : Buffer + null_bitmap : Buffer, optional + null_count : int, default 0 + offset : int, default 0 + + Returns + ------- + string_array : StringArray + """ + return Array.from_buffers(large_utf8(), length, + [null_bitmap, value_offsets, data], + null_count, offset) + + +cdef class BinaryArray(Array): + """ + Concrete class for Arrow arrays of variable-sized binary data type. + """ + @property + def total_values_length(self): + """ + The number of bytes from beginning to end of the data buffer addressed + by the offsets of this BinaryArray. + """ + return ( self.ap).total_values_length() + + +cdef class LargeBinaryArray(Array): + """ + Concrete class for Arrow arrays of large variable-sized binary data type. + """ + @property + def total_values_length(self): + """ + The number of bytes from beginning to end of the data buffer addressed + by the offsets of this LargeBinaryArray. + """ + return ( self.ap).total_values_length() + + +cdef class DictionaryArray(Array): + """ + Concrete class for dictionary-encoded Arrow arrays. + """ + + def dictionary_encode(self): + return self + + def dictionary_decode(self): + """ + Decodes the DictionaryArray to an Array. + """ + return self.dictionary.take(self.indices) + + @property + def dictionary(self): + cdef CDictionaryArray* darr = (self.ap) + + if self._dictionary is None: + self._dictionary = pyarrow_wrap_array(darr.dictionary()) + + return self._dictionary + + @property + def indices(self): + cdef CDictionaryArray* darr = (self.ap) + + if self._indices is None: + self._indices = pyarrow_wrap_array(darr.indices()) + + return self._indices + + @staticmethod + def from_buffers(DataType type, int64_t length, buffers, Array dictionary, + int64_t null_count=-1, int64_t offset=0): + """ + Construct a DictionaryArray from buffers. + + Parameters + ---------- + type : pyarrow.DataType + length : int + The number of values in the array. + buffers : List[Buffer] + The buffers backing the indices array. + dictionary : pyarrow.Array, ndarray or pandas.Series + The array of values referenced by the indices. + null_count : int, default -1 + The number of null entries in the indices array. Negative value means that + the null count is not known. + offset : int, default 0 + The array's logical offset (in values, not in bytes) from the + start of each buffer. + + Returns + ------- + dict_array : DictionaryArray + """ + cdef: + vector[shared_ptr[CBuffer]] c_buffers + shared_ptr[CDataType] c_type + shared_ptr[CArrayData] c_data + shared_ptr[CArray] c_result + + for buf in buffers: + c_buffers.push_back(pyarrow_unwrap_buffer(buf)) + + c_type = pyarrow_unwrap_data_type(type) + + with nogil: + c_data = CArrayData.Make( + c_type, length, c_buffers, null_count, offset) + c_data.get().dictionary = dictionary.sp_array.get().data() + c_result.reset(new CDictionaryArray(c_data)) + + cdef Array result = pyarrow_wrap_array(c_result) + result.validate() + return result + + @staticmethod + def from_arrays(indices, dictionary, mask=None, bint ordered=False, + bint from_pandas=False, bint safe=True, + MemoryPool memory_pool=None): + """ + Construct a DictionaryArray from indices and values. + + Parameters + ---------- + indices : pyarrow.Array, numpy.ndarray or pandas.Series, int type + Non-negative integers referencing the dictionary values by zero + based index. + dictionary : pyarrow.Array, ndarray or pandas.Series + The array of values referenced by the indices. + mask : ndarray or pandas.Series, bool type + True values indicate that indices are actually null. + ordered : bool, default False + Set to True if the category values are ordered. + from_pandas : bool, default False + If True, the indices should be treated as though they originated in + a pandas.Categorical (null encoded as -1). + safe : bool, default True + If True, check that the dictionary indices are in range. + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise uses default pool. + + Returns + ------- + dict_array : DictionaryArray + """ + cdef: + Array _indices, _dictionary + shared_ptr[CDataType] c_type + shared_ptr[CArray] c_result + + if isinstance(indices, Array): + if mask is not None: + raise NotImplementedError( + "mask not implemented with Arrow array inputs yet") + _indices = indices + else: + if from_pandas: + _indices = _codes_to_indices(indices, mask, None, memory_pool) + else: + _indices = array(indices, mask=mask, memory_pool=memory_pool) + + if isinstance(dictionary, Array): + _dictionary = dictionary + else: + _dictionary = array(dictionary, memory_pool=memory_pool) + + if not isinstance(_indices, IntegerArray): + raise ValueError('Indices must be integer type') + + cdef c_bool c_ordered = ordered + + c_type.reset(new CDictionaryType(_indices.type.sp_type, + _dictionary.sp_array.get().type(), + c_ordered)) + + if safe: + with nogil: + c_result = GetResultValue( + CDictionaryArray.FromArrays(c_type, _indices.sp_array, + _dictionary.sp_array)) + else: + c_result.reset(new CDictionaryArray(c_type, _indices.sp_array, + _dictionary.sp_array)) + + cdef Array result = pyarrow_wrap_array(c_result) + result.validate() + return result + + +cdef class StructArray(Array): + """ + Concrete class for Arrow arrays of a struct data type. + """ + + def field(self, index): + """ + Retrieves the child array belonging to field. + + Parameters + ---------- + index : Union[int, str] + Index / position or name of the field. + + Returns + ------- + result : Array + """ + cdef: + CStructArray* arr = self.ap + shared_ptr[CArray] child + + if isinstance(index, (bytes, str)): + child = arr.GetFieldByName(tobytes(index)) + if child == nullptr: + raise KeyError(index) + elif isinstance(index, int): + child = arr.field( + _normalize_index(index, self.ap.num_fields())) + else: + raise TypeError('Expected integer or string index') + + return pyarrow_wrap_array(child) + + def _flattened_field(self, index, MemoryPool memory_pool=None): + """ + Retrieves the child array belonging to field, + accounting for the parent array null bitmap. + + Parameters + ---------- + index : Union[int, str] + Index / position or name of the field. + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + + Returns + ------- + result : Array + """ + cdef: + CStructArray* arr = self.ap + shared_ptr[CArray] child + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + if isinstance(index, (bytes, str)): + int_index = self.type.get_field_index(index) + if int_index < 0: + raise KeyError(index) + elif isinstance(index, int): + int_index = _normalize_index(index, self.ap.num_fields()) + else: + raise TypeError('Expected integer or string index') + + child = GetResultValue(arr.GetFlattenedField(int_index, pool)) + return pyarrow_wrap_array(child) + + def flatten(self, MemoryPool memory_pool=None): + """ + Return one individual array for each field in the struct. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + + Returns + ------- + result : List[Array] + """ + cdef: + vector[shared_ptr[CArray]] arrays + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + CStructArray* sarr = self.ap + + with nogil: + arrays = GetResultValue(sarr.Flatten(pool)) + + return [pyarrow_wrap_array(arr) for arr in arrays] + + @staticmethod + def from_arrays(arrays, names=None, fields=None, mask=None, + memory_pool=None): + """ + Construct StructArray from collection of arrays representing + each field in the struct. + + Either field names or field instances must be passed. + + Parameters + ---------- + arrays : sequence of Array + names : List[str] (optional) + Field names for each struct child. + fields : List[Field] (optional) + Field instances for each struct child. + mask : pyarrow.Array[bool] (optional) + Indicate which values are null (True) or not null (False). + memory_pool : MemoryPool (optional) + For memory allocations, if required, otherwise uses default pool. + + Returns + ------- + result : StructArray + """ + cdef: + shared_ptr[CArray] c_array + shared_ptr[CBuffer] c_mask + vector[shared_ptr[CArray]] c_arrays + vector[c_string] c_names + vector[shared_ptr[CField]] c_fields + CResult[shared_ptr[CArray]] c_result + ssize_t num_arrays + ssize_t length + ssize_t i + Field py_field + DataType struct_type + + if names is None and fields is None: + raise ValueError('Must pass either names or fields') + if names is not None and fields is not None: + raise ValueError('Must pass either names or fields, not both') + + c_mask = c_mask_inverted_from_obj(mask, memory_pool) + + arrays = [asarray(x) for x in arrays] + for arr in arrays: + c_array = pyarrow_unwrap_array(arr) + if c_array == nullptr: + raise TypeError(f"Expected Array, got {arr.__class__}") + c_arrays.push_back(c_array) + if names is not None: + for name in names: + c_names.push_back(tobytes(name)) + else: + for item in fields: + if isinstance(item, tuple): + py_field = field(*item) + else: + py_field = item + c_fields.push_back(py_field.sp_field) + + if (c_arrays.size() == 0 and c_names.size() == 0 and + c_fields.size() == 0): + # The C++ side doesn't allow this + if mask is None: + return array([], struct([])) + else: + return array([{}] * len(mask), struct([]), mask=mask) + + if names is not None: + # XXX Cannot pass "nullptr" for a shared_ptr argument: + # https://github.com/cython/cython/issues/3020 + c_result = CStructArray.MakeFromFieldNames( + c_arrays, c_names, c_mask, -1, 0) + else: + c_result = CStructArray.MakeFromFields( + c_arrays, c_fields, c_mask, -1, 0) + cdef Array result = pyarrow_wrap_array(GetResultValue(c_result)) + result.validate() + return result + + def sort(self, order="ascending", by=None, **kwargs): + """ + Sort the StructArray + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + by : str or None, default None + If to sort the array by one of its fields + or by the whole array. + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + result : StructArray + """ + if by is not None: + tosort = self._flattened_field(by) + else: + tosort = self + indices = _pc().sort_indices( + tosort, + options=_pc().SortOptions(sort_keys=[("", order)], **kwargs) + ) + return self.take(indices) + + +cdef class RunEndEncodedArray(Array): + """ + Concrete class for Arrow run-end encoded arrays. + """ + + @staticmethod + def _from_arrays(type, allow_none_for_type, logical_length, run_ends, values, logical_offset): + cdef: + int64_t _logical_length + Array _run_ends + Array _values + int64_t _logical_offset + shared_ptr[CDataType] c_type + shared_ptr[CRunEndEncodedArray] ree_array + + _logical_length = logical_length + _logical_offset = logical_offset + + type = ensure_type(type, allow_none=allow_none_for_type) + if type is not None: + _run_ends = asarray(run_ends, type=type.run_end_type) + _values = asarray(values, type=type.value_type) + c_type = pyarrow_unwrap_data_type(type) + with nogil: + ree_array = GetResultValue(CRunEndEncodedArray.Make( + c_type, _logical_length, _run_ends.sp_array, _values.sp_array, _logical_offset)) + else: + _run_ends = asarray(run_ends) + _values = asarray(values) + with nogil: + ree_array = GetResultValue(CRunEndEncodedArray.MakeFromArrays( + _logical_length, _run_ends.sp_array, _values.sp_array, _logical_offset)) + cdef Array result = pyarrow_wrap_array(ree_array) + result.validate(full=True) + return result + + @staticmethod + def from_arrays(run_ends, values, type=None): + """ + Construct RunEndEncodedArray from run_ends and values arrays. + + Parameters + ---------- + run_ends : Array (int16, int32, or int64 type) + The run_ends array. + values : Array (any type) + The values array. + type : pyarrow.DataType, optional + The run_end_encoded(run_end_type, value_type) array type. + + Returns + ------- + RunEndEncodedArray + """ + logical_length = run_ends[-1] if len(run_ends) > 0 else 0 + return RunEndEncodedArray._from_arrays(type, True, logical_length, + run_ends, values, 0) + + @staticmethod + def from_buffers(DataType type, length, buffers, null_count=-1, offset=0, + children=None): + """ + Construct a RunEndEncodedArray from all the parameters that make up an + Array. + + RunEndEncodedArrays do not have buffers, only children arrays, but this + implementation is needed to satisfy the Array interface. + + Parameters + ---------- + type : DataType + The run_end_encoded(run_end_type, value_type) type. + length : int + The logical length of the run-end encoded array. Expected to match + the last value of the run_ends array (children[0]) minus the offset. + buffers : List[Buffer] + Empty List or [None]. + null_count : int, default -1 + The number of null entries in the array. Run-end encoded arrays + are specified to not have valid bits and null_count always equals 0. + offset : int, default 0 + The array's logical offset (in values, not in bytes) from the + start of each buffer. + children : List[Array] + Nested type children containing the run_ends and values arrays. + + Returns + ------- + RunEndEncodedArray + """ + children = children or [] + + if type.num_fields != len(children): + raise ValueError("RunEndEncodedType's expected number of children " + "({0}) did not match the passed number " + "({1}).".format(type.num_fields, len(children))) + + # buffers are validated as if we needed to pass them to C++, but + # _make_from_arrays will take care of filling in the expected + # buffers array containing a single NULL buffer on the C++ side + if len(buffers) == 0: + buffers = [None] + if buffers[0] is not None: + raise ValueError("RunEndEncodedType expects None as validity " + "bitmap, buffers[0] is not None") + if type.num_buffers != len(buffers): + raise ValueError("RunEndEncodedType's expected number of buffers " + "({0}) did not match the passed number " + "({1}).".format(type.num_buffers, len(buffers))) + + # null_count is also validated as if we needed it + if null_count != -1 and null_count != 0: + raise ValueError("RunEndEncodedType's expected null_count (0) " + "did not match passed number ({0})".format(null_count)) + + return RunEndEncodedArray._from_arrays(type, False, length, children[0], + children[1], offset) + + @property + def run_ends(self): + """ + An array holding the logical indexes of each run-end. + + The physical offset to the array is applied. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return pyarrow_wrap_array(ree_array.run_ends()) + + @property + def values(self): + """ + An array holding the values of each run. + + The physical offset to the array is applied. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return pyarrow_wrap_array(ree_array.values()) + + def find_physical_offset(self): + """ + Find the physical offset of this REE array. + + This is the offset of the run that contains the value of the first + logical element of this array considering its offset. + + This function uses binary-search, so it has a O(log N) cost. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return ree_array.FindPhysicalOffset() + + def find_physical_length(self): + """ + Find the physical length of this REE array. + + The physical length of an REE is the number of physical values (and + run-ends) necessary to represent the logical range of values from offset + to length. + + This function uses binary-search, so it has a O(log N) cost. + """ + cdef CRunEndEncodedArray* ree_array = (self.ap) + return ree_array.FindPhysicalLength() + + +cdef class ExtensionArray(Array): + """ + Concrete class for Arrow extension arrays. + """ + + @property + def storage(self): + cdef: + CExtensionArray* ext_array = (self.ap) + + return pyarrow_wrap_array(ext_array.storage()) + + @staticmethod + def from_storage(BaseExtensionType typ, Array storage): + """ + Construct ExtensionArray from type and storage array. + + Parameters + ---------- + typ : DataType + The extension type for the result array. + storage : Array + The underlying storage for the result array. + + Returns + ------- + ext_array : ExtensionArray + """ + cdef: + shared_ptr[CExtensionArray] ext_array + + if storage.type != typ.storage_type: + raise TypeError("Incompatible storage type {0} " + "for extension type {1}".format(storage.type, typ)) + + ext_array = make_shared[CExtensionArray](typ.sp_type, storage.sp_array) + cdef Array result = pyarrow_wrap_array( ext_array) + result.validate() + return result + + +class FixedShapeTensorArray(ExtensionArray): + """ + Concrete class for fixed shape tensor extension arrays. + + Examples + -------- + Define the extension type for tensor array + + >>> import pyarrow as pa + >>> tensor_type = pa.fixed_shape_tensor(pa.int32(), [2, 2]) + + Create an extension array + + >>> arr = [[1, 2, 3, 4], [10, 20, 30, 40], [100, 200, 300, 400]] + >>> storage = pa.array(arr, pa.list_(pa.int32(), 4)) + >>> pa.ExtensionArray.from_storage(tensor_type, storage) + + [ + [ + 1, + 2, + 3, + 4 + ], + [ + 10, + 20, + 30, + 40 + ], + [ + 100, + 200, + 300, + 400 + ] + ] + """ + + def to_numpy_ndarray(self): + """ + Convert fixed shape tensor extension array to a numpy array (with dim+1). + + Note: ``permutation`` should be trivial (``None`` or ``[0, 1, ..., len(shape)-1]``). + """ + if self.type.permutation is None or self.type.permutation == list(range(len(self.type.shape))): + np_flat = np.asarray(self.storage.flatten()) + numpy_tensor = np_flat.reshape((len(self),) + tuple(self.type.shape)) + return numpy_tensor + else: + raise ValueError( + 'Only non-permuted tensors can be converted to numpy tensors.') + + @staticmethod + def from_numpy_ndarray(obj): + """ + Convert numpy tensors (ndarrays) to a fixed shape tensor extension array. + The first dimension of ndarray will become the length of the fixed + shape tensor array. + + Numpy array needs to be C-contiguous in memory + (``obj.flags["C_CONTIGUOUS"]==True``). + + Parameters + ---------- + obj : numpy.ndarray + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> arr = np.array( + ... [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]], + ... dtype=np.float32) + >>> pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + + [ + [ + 1, + 2, + 3, + 4, + 5, + 6 + ], + [ + 1, + 2, + 3, + 4, + 5, + 6 + ] + ] + """ + if not obj.flags["C_CONTIGUOUS"]: + raise ValueError('The data in the numpy array need to be in a single, ' + 'C-style contiguous segment.') + + arrow_type = from_numpy_dtype(obj.dtype) + shape = obj.shape[1:] + size = obj.size / obj.shape[0] + + return ExtensionArray.from_storage( + fixed_shape_tensor(arrow_type, shape), + FixedSizeListArray.from_arrays(np.ravel(obj, order='C'), size) + ) + + +cdef dict _array_classes = { + _Type_NA: NullArray, + _Type_BOOL: BooleanArray, + _Type_UINT8: UInt8Array, + _Type_UINT16: UInt16Array, + _Type_UINT32: UInt32Array, + _Type_UINT64: UInt64Array, + _Type_INT8: Int8Array, + _Type_INT16: Int16Array, + _Type_INT32: Int32Array, + _Type_INT64: Int64Array, + _Type_DATE32: Date32Array, + _Type_DATE64: Date64Array, + _Type_TIMESTAMP: TimestampArray, + _Type_TIME32: Time32Array, + _Type_TIME64: Time64Array, + _Type_DURATION: DurationArray, + _Type_INTERVAL_MONTH_DAY_NANO: MonthDayNanoIntervalArray, + _Type_HALF_FLOAT: HalfFloatArray, + _Type_FLOAT: FloatArray, + _Type_DOUBLE: DoubleArray, + _Type_LIST: ListArray, + _Type_LARGE_LIST: LargeListArray, + _Type_MAP: MapArray, + _Type_FIXED_SIZE_LIST: FixedSizeListArray, + _Type_SPARSE_UNION: UnionArray, + _Type_DENSE_UNION: UnionArray, + _Type_BINARY: BinaryArray, + _Type_STRING: StringArray, + _Type_LARGE_BINARY: LargeBinaryArray, + _Type_LARGE_STRING: LargeStringArray, + _Type_DICTIONARY: DictionaryArray, + _Type_FIXED_SIZE_BINARY: FixedSizeBinaryArray, + _Type_DECIMAL128: Decimal128Array, + _Type_DECIMAL256: Decimal256Array, + _Type_STRUCT: StructArray, + _Type_RUN_END_ENCODED: RunEndEncodedArray, + _Type_EXTENSION: ExtensionArray, +} + + +cdef inline shared_ptr[CBuffer] c_mask_inverted_from_obj(object mask, MemoryPool pool) except *: + """ + Convert mask array obj to c_mask while also inverting to signify 1 for valid and 0 for null + """ + cdef shared_ptr[CBuffer] c_mask + if mask is None: + c_mask = shared_ptr[CBuffer]() + elif isinstance(mask, Array): + if mask.type.id != Type_BOOL: + raise TypeError('Mask must be a pyarrow.Array of type boolean') + if mask.null_count != 0: + raise ValueError('Mask must not contain nulls') + inverted_mask = _pc().invert(mask, memory_pool=pool) + c_mask = pyarrow_unwrap_buffer(inverted_mask.buffers()[1]) + else: + raise TypeError('Mask must be a pyarrow.Array of type boolean') + return c_mask + + +cdef object get_array_class_from_type( + const shared_ptr[CDataType]& sp_data_type): + cdef CDataType* data_type = sp_data_type.get() + if data_type == NULL: + raise ValueError('Array data type was NULL') + + if data_type.id() == _Type_EXTENSION: + py_ext_data_type = pyarrow_wrap_data_type(sp_data_type) + return py_ext_data_type.__arrow_ext_class__() + else: + return _array_classes[data_type.id()] + + +cdef object get_values(object obj, bint* is_series): + if pandas_api.is_series(obj) or pandas_api.is_index(obj): + result = pandas_api.get_values(obj) + is_series[0] = True + elif isinstance(obj, np.ndarray): + result = obj + is_series[0] = False + else: + result = pandas_api.series(obj, copy=False).values + is_series[0] = False + + return result + + +def concat_arrays(arrays, MemoryPool memory_pool=None): + """ + Concatenate the given arrays. + + The contents of the input arrays are copied into the returned array. + + Raises + ------ + ArrowInvalid + If not all of the arrays have the same type. + + Parameters + ---------- + arrays : iterable of pyarrow.Array + Arrays to concatenate, must be identically typed. + memory_pool : MemoryPool, default None + For memory allocations. If None, the default pool is used. + + Examples + -------- + >>> import pyarrow as pa + >>> arr1 = pa.array([2, 4, 5, 100]) + >>> arr2 = pa.array([2, 4]) + >>> pa.concat_arrays([arr1, arr2]) + + [ + 2, + 4, + 5, + 100, + 2, + 4 + ] + + """ + cdef: + vector[shared_ptr[CArray]] c_arrays + shared_ptr[CArray] c_concatenated + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + for array in arrays: + if not isinstance(array, Array): + raise TypeError("Iterable should contain Array objects, " + "got {0} instead".format(type(array))) + c_arrays.push_back(pyarrow_unwrap_array(array)) + + with nogil: + c_concatenated = GetResultValue(Concatenate(c_arrays, pool)) + + return pyarrow_wrap_array(c_concatenated) + + +def _empty_array(DataType type): + """ + Create empty array of the given type. + """ + if type.id == Type_DICTIONARY: + arr = DictionaryArray.from_arrays( + _empty_array(type.index_type), _empty_array(type.value_type), + ordered=type.ordered) + else: + arr = array([], type=type) + return arr diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.pxi new file mode 100644 index 0000000000000000000000000000000000000000..ab251017db78706c97c7dee8044636c55c80167e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/benchmark.pxi @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +def benchmark_PandasObjectIsNull(list obj): + Benchmark_PandasObjectIsNull(obj) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/builder.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/builder.pxi new file mode 100644 index 0000000000000000000000000000000000000000..a34ea5412e14a8e5e7433a8b640f31fd41be1107 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/builder.pxi @@ -0,0 +1,82 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +cdef class StringBuilder(_Weakrefable): + """ + Builder class for UTF8 strings. + + This class exposes facilities for incrementally adding string values and + building the null bitmap for a pyarrow.Array (type='string'). + """ + cdef: + unique_ptr[CStringBuilder] builder + + def __cinit__(self, MemoryPool memory_pool=None): + cdef CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + self.builder.reset(new CStringBuilder(pool)) + + def append(self, value): + """ + Append a single value to the builder. + + The value can either be a string/bytes object or a null value + (np.nan or None). + + Parameters + ---------- + value : string/bytes or np.nan/None + The value to append to the string array builder. + """ + if value is None or value is np.nan: + self.builder.get().AppendNull() + elif isinstance(value, (bytes, str)): + self.builder.get().Append(tobytes(value)) + else: + raise TypeError('StringBuilder only accepts string objects') + + def append_values(self, values): + """ + Append all the values from an iterable. + + Parameters + ---------- + values : iterable of string/bytes or np.nan/None values + The values to append to the string array builder. + """ + for value in values: + self.append(value) + + def finish(self): + """ + Return result of builder as an Array object; also resets the builder. + + Returns + ------- + array : pyarrow.Array + """ + cdef shared_ptr[CArray] out + with nogil: + self.builder.get().Finish(&out) + return pyarrow_wrap_array(out) + + @property + def null_count(self): + return self.builder.get().null_count() + + def __len__(self): + return self.builder.get().length() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/cffi.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/cffi.py new file mode 100644 index 0000000000000000000000000000000000000000..961b61dee59fd2d211723fff0d288b2db668a335 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/cffi.py @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import + +import cffi + +c_source = """ + struct ArrowSchema { + // Array type description + const char* format; + const char* name; + const char* metadata; + int64_t flags; + int64_t n_children; + struct ArrowSchema** children; + struct ArrowSchema* dictionary; + + // Release callback + void (*release)(struct ArrowSchema*); + // Opaque producer-specific data + void* private_data; + }; + + struct ArrowArray { + // Array data description + int64_t length; + int64_t null_count; + int64_t offset; + int64_t n_buffers; + int64_t n_children; + const void** buffers; + struct ArrowArray** children; + struct ArrowArray* dictionary; + + // Release callback + void (*release)(struct ArrowArray*); + // Opaque producer-specific data + void* private_data; + }; + + struct ArrowArrayStream { + int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out); + int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out); + + const char* (*get_last_error)(struct ArrowArrayStream*); + + // Release callback + void (*release)(struct ArrowArrayStream*); + // Opaque producer-specific data + void* private_data; + }; + """ + +# TODO use out-of-line mode for faster import and avoid C parsing +ffi = cffi.FFI() +ffi.cdef(c_source) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/compat.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/compat.pxi new file mode 100644 index 0000000000000000000000000000000000000000..8cf106d5609b50dd84c082dcfd36aee5b16fbee4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/compat.pxi @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +def encode_file_path(path): + if isinstance(path, str): + # POSIX systems can handle utf-8. UTF8 is converted to utf16-le in + # libarrow + encoded_path = path.encode('utf-8') + else: + encoded_path = path + + # Windows file system requires utf-16le for file names; Arrow C++ libraries + # will convert utf8 to utf16 + return encoded_path + + +# Starting with Python 3.7, dicts are guaranteed to be insertion-ordered. +ordered_dict = dict + + +try: + import cloudpickle as pickle +except ImportError: + import pickle + + +def tobytes(o): + """ + Encode a unicode or bytes string to bytes. + + Parameters + ---------- + o : str or bytes + Input string. + """ + if isinstance(o, str): + return o.encode('utf8') + else: + return o + + +def frombytes(o, *, safe=False): + """ + Decode the given bytestring to unicode. + + Parameters + ---------- + o : bytes-like + Input object. + safe : bool, default False + If true, raise on encoding errors. + """ + if safe: + return o.decode('utf8', errors='replace') + else: + return o.decode('utf8') diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/compute.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/compute.py new file mode 100644 index 0000000000000000000000000000000000000000..205ab393b8b099bab03c3b19d5b57f985c5f5a2a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/compute.py @@ -0,0 +1,731 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pyarrow._compute import ( # noqa + Function, + FunctionOptions, + FunctionRegistry, + HashAggregateFunction, + HashAggregateKernel, + Kernel, + ScalarAggregateFunction, + ScalarAggregateKernel, + ScalarFunction, + ScalarKernel, + VectorFunction, + VectorKernel, + # Option classes + ArraySortOptions, + AssumeTimezoneOptions, + CastOptions, + CountOptions, + CumulativeOptions, + CumulativeSumOptions, + DayOfWeekOptions, + DictionaryEncodeOptions, + RunEndEncodeOptions, + ElementWiseAggregateOptions, + ExtractRegexOptions, + FilterOptions, + IndexOptions, + JoinOptions, + ListSliceOptions, + MakeStructOptions, + MapLookupOptions, + MatchSubstringOptions, + ModeOptions, + NullOptions, + PadOptions, + PairwiseOptions, + PartitionNthOptions, + QuantileOptions, + RandomOptions, + RankOptions, + ReplaceSliceOptions, + ReplaceSubstringOptions, + RoundBinaryOptions, + RoundOptions, + RoundTemporalOptions, + RoundToMultipleOptions, + ScalarAggregateOptions, + SelectKOptions, + SetLookupOptions, + SliceOptions, + SortOptions, + SplitOptions, + SplitPatternOptions, + StrftimeOptions, + StrptimeOptions, + StructFieldOptions, + TakeOptions, + TDigestOptions, + TrimOptions, + Utf8NormalizeOptions, + VarianceOptions, + WeekOptions, + # Functions + call_function, + function_registry, + get_function, + list_functions, + # Udf + call_tabular_function, + register_scalar_function, + register_tabular_function, + register_aggregate_function, + register_vector_function, + UdfContext, + # Expressions + Expression, +) + +from collections import namedtuple +import inspect +from textwrap import dedent +import warnings + +import pyarrow as pa +from pyarrow import _compute_docstrings +from pyarrow.vendored import docscrape + + +def _get_arg_names(func): + return func._doc.arg_names + + +_OptionsClassDoc = namedtuple('_OptionsClassDoc', ('params',)) + + +def _scrape_options_class_doc(options_class): + if not options_class.__doc__: + return None + doc = docscrape.NumpyDocString(options_class.__doc__) + return _OptionsClassDoc(doc['Parameters']) + + +def _decorate_compute_function(wrapper, exposed_name, func, options_class): + # Decorate the given compute function wrapper with useful metadata + # and documentation. + cpp_doc = func._doc + + wrapper.__arrow_compute_function__ = dict( + name=func.name, + arity=func.arity, + options_class=cpp_doc.options_class, + options_required=cpp_doc.options_required) + wrapper.__name__ = exposed_name + wrapper.__qualname__ = exposed_name + + doc_pieces = [] + + # 1. One-line summary + summary = cpp_doc.summary + if not summary: + arg_str = "arguments" if func.arity > 1 else "argument" + summary = ("Call compute function {!r} with the given {}" + .format(func.name, arg_str)) + + doc_pieces.append(f"{summary}.\n\n") + + # 2. Multi-line description + description = cpp_doc.description + if description: + doc_pieces.append(f"{description}\n\n") + + doc_addition = _compute_docstrings.function_doc_additions.get(func.name) + + # 3. Parameter description + doc_pieces.append(dedent("""\ + Parameters + ---------- + """)) + + # 3a. Compute function parameters + arg_names = _get_arg_names(func) + for arg_name in arg_names: + if func.kind in ('vector', 'scalar_aggregate'): + arg_type = 'Array-like' + else: + arg_type = 'Array-like or scalar-like' + doc_pieces.append(f"{arg_name} : {arg_type}\n") + doc_pieces.append(" Argument to compute function.\n") + + # 3b. Compute function option values + if options_class is not None: + options_class_doc = _scrape_options_class_doc(options_class) + if options_class_doc: + for p in options_class_doc.params: + doc_pieces.append(f"{p.name} : {p.type}\n") + for s in p.desc: + doc_pieces.append(f" {s}\n") + else: + warnings.warn(f"Options class {options_class.__name__} " + f"does not have a docstring", RuntimeWarning) + options_sig = inspect.signature(options_class) + for p in options_sig.parameters.values(): + doc_pieces.append(dedent("""\ + {0} : optional + Parameter for {1} constructor. Either `options` + or `{0}` can be passed, but not both at the same time. + """.format(p.name, options_class.__name__))) + doc_pieces.append(dedent(f"""\ + options : pyarrow.compute.{options_class.__name__}, optional + Alternative way of passing options. + """)) + + doc_pieces.append(dedent("""\ + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """)) + + # 4. Custom addition (e.g. examples) + if doc_addition is not None: + doc_pieces.append("\n{}\n".format(dedent(doc_addition).strip("\n"))) + + wrapper.__doc__ = "".join(doc_pieces) + return wrapper + + +def _get_options_class(func): + class_name = func._doc.options_class + if not class_name: + return None + try: + return globals()[class_name] + except KeyError: + warnings.warn("Python binding for {} not exposed" + .format(class_name), RuntimeWarning) + return None + + +def _handle_options(name, options_class, options, args, kwargs): + if args or kwargs: + if options is not None: + raise TypeError( + "Function {!r} called with both an 'options' argument " + "and additional arguments" + .format(name)) + return options_class(*args, **kwargs) + + if options is not None: + if isinstance(options, dict): + return options_class(**options) + elif isinstance(options, options_class): + return options + raise TypeError( + "Function {!r} expected a {} parameter, got {}" + .format(name, options_class, type(options))) + + return None + + +def _make_generic_wrapper(func_name, func, options_class, arity): + if options_class is None: + def wrapper(*args, memory_pool=None): + if arity is not Ellipsis and len(args) != arity: + raise TypeError( + f"{func_name} takes {arity} positional argument(s), " + f"but {len(args)} were given" + ) + if args and isinstance(args[0], Expression): + return Expression._call(func_name, list(args)) + return func.call(args, None, memory_pool) + else: + def wrapper(*args, memory_pool=None, options=None, **kwargs): + if arity is not Ellipsis: + if len(args) < arity: + raise TypeError( + f"{func_name} takes {arity} positional argument(s), " + f"but {len(args)} were given" + ) + option_args = args[arity:] + args = args[:arity] + else: + option_args = () + options = _handle_options(func_name, options_class, options, + option_args, kwargs) + if args and isinstance(args[0], Expression): + return Expression._call(func_name, list(args), options) + return func.call(args, options, memory_pool) + return wrapper + + +def _make_signature(arg_names, var_arg_names, options_class): + from inspect import Parameter + params = [] + for name in arg_names: + params.append(Parameter(name, Parameter.POSITIONAL_ONLY)) + for name in var_arg_names: + params.append(Parameter(name, Parameter.VAR_POSITIONAL)) + if options_class is not None: + options_sig = inspect.signature(options_class) + for p in options_sig.parameters.values(): + assert p.kind in (Parameter.POSITIONAL_OR_KEYWORD, + Parameter.KEYWORD_ONLY) + if var_arg_names: + # Cannot have a positional argument after a *args + p = p.replace(kind=Parameter.KEYWORD_ONLY) + params.append(p) + params.append(Parameter("options", Parameter.KEYWORD_ONLY, + default=None)) + params.append(Parameter("memory_pool", Parameter.KEYWORD_ONLY, + default=None)) + return inspect.Signature(params) + + +def _wrap_function(name, func): + options_class = _get_options_class(func) + arg_names = _get_arg_names(func) + has_vararg = arg_names and arg_names[-1].startswith('*') + if has_vararg: + var_arg_names = [arg_names.pop().lstrip('*')] + else: + var_arg_names = [] + + wrapper = _make_generic_wrapper( + name, func, options_class, arity=func.arity) + wrapper.__signature__ = _make_signature(arg_names, var_arg_names, + options_class) + return _decorate_compute_function(wrapper, name, func, options_class) + + +def _make_global_functions(): + """ + Make global functions wrapping each compute function. + + Note that some of the automatically-generated wrappers may be overridden + by custom versions below. + """ + g = globals() + reg = function_registry() + + # Avoid clashes with Python keywords + rewrites = {'and': 'and_', + 'or': 'or_'} + + for cpp_name in reg.list_functions(): + name = rewrites.get(cpp_name, cpp_name) + func = reg.get_function(cpp_name) + if func.kind == "hash_aggregate": + # Hash aggregate functions are not callable, + # so let's not expose them at module level. + continue + if func.kind == "scalar_aggregate" and func.arity == 0: + # Nullary scalar aggregate functions are not callable + # directly so let's not expose them at module level. + continue + assert name not in g, name + g[cpp_name] = g[name] = _wrap_function(name, func) + + +_make_global_functions() + + +def cast(arr, target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast array values to another data type. Can also be invoked as an array + instance method. + + Parameters + ---------- + arr : Array-like + target_type : DataType or str + Type to cast to + safe : bool, default True + Check for overflows or other unsafe conversions + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Examples + -------- + >>> from datetime import datetime + >>> import pyarrow as pa + >>> arr = pa.array([datetime(2010, 1, 1), datetime(2015, 1, 1)]) + >>> arr.type + TimestampType(timestamp[us]) + + You can use ``pyarrow.DataType`` objects to specify the target type: + + >>> cast(arr, pa.timestamp('ms')) + + [ + 2010-01-01 00:00:00.000, + 2015-01-01 00:00:00.000 + ] + + >>> cast(arr, pa.timestamp('ms')).type + TimestampType(timestamp[ms]) + + Alternatively, it is also supported to use the string aliases for these + types: + + >>> arr.cast('timestamp[ms]') + + [ + 2010-01-01 00:00:00.000, + 2015-01-01 00:00:00.000 + ] + >>> arr.cast('timestamp[ms]').type + TimestampType(timestamp[ms]) + + Returns + ------- + casted : Array + The cast result as a new Array + """ + safe_vars_passed = (safe is not None) or (target_type is not None) + + if safe_vars_passed and (options is not None): + raise ValueError("Must either pass values for 'target_type' and 'safe'" + " or pass a value for 'options'") + + if options is None: + target_type = pa.types.lib.ensure_type(target_type) + if safe is False: + options = CastOptions.unsafe(target_type) + else: + options = CastOptions.safe(target_type) + return call_function("cast", [arr], options, memory_pool) + + +def index(data, value, start=None, end=None, *, memory_pool=None): + """ + Find the index of the first occurrence of a given value. + + Parameters + ---------- + data : Array-like + value : Scalar-like object + The value to search for. + start : int, optional + end : int, optional + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + index : int + the index, or -1 if not found + """ + if start is not None: + if end is not None: + data = data.slice(start, end - start) + else: + data = data.slice(start) + elif end is not None: + data = data.slice(0, end) + + if not isinstance(value, pa.Scalar): + value = pa.scalar(value, type=data.type) + elif data.type != value.type: + value = pa.scalar(value.as_py(), type=data.type) + options = IndexOptions(value=value) + result = call_function('index', [data], options, memory_pool) + if start is not None and result.as_py() >= 0: + result = pa.scalar(result.as_py() + start, type=pa.int64()) + return result + + +def take(data, indices, *, boundscheck=True, memory_pool=None): + """ + Select values (or records) from array- or table-like data given integer + selection indices. + + The result will be of the same type(s) as the input, with elements taken + from the input array (or record batch / table fields) at the given + indices. If an index is null then the corresponding value in the output + will be null. + + Parameters + ---------- + data : Array, ChunkedArray, RecordBatch, or Table + indices : Array, ChunkedArray + Must be of integer type + boundscheck : boolean, default True + Whether to boundscheck the indices. If False and there is an out of + bounds index, will likely cause the process to crash. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : depends on inputs + Selected values for the given indices + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> indices = pa.array([0, None, 4, 3]) + >>> arr.take(indices) + + [ + "a", + null, + "e", + null + ] + """ + options = TakeOptions(boundscheck=boundscheck) + return call_function('take', [data, indices], options, memory_pool) + + +def fill_null(values, fill_value): + """Replace each null element in values with a corresponding + element from fill_value. + + If fill_value is scalar-like, then every null element in values + will be replaced with fill_value. If fill_value is array-like, + then the i-th element in values will be replaced with the i-th + element in fill_value. + + The fill_value's type must be the same as that of values, or it + must be able to be implicitly casted to the array's type. + + This is an alias for :func:`coalesce`. + + Parameters + ---------- + values : Array, ChunkedArray, or Scalar-like object + Each null element is replaced with the corresponding value + from fill_value. + fill_value : Array, ChunkedArray, or Scalar-like object + If not same type as values, will attempt to cast. + + Returns + ------- + result : depends on inputs + Values with all null elements replaced + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array([1, 2, None, 3], type=pa.int8()) + >>> fill_value = pa.scalar(5, type=pa.int8()) + >>> arr.fill_null(fill_value) + + [ + 1, + 2, + 5, + 3 + ] + >>> arr = pa.array([1, 2, None, 4, None]) + >>> arr.fill_null(pa.array([10, 20, 30, 40, 50])) + + [ + 1, + 2, + 30, + 4, + 50 + ] + """ + if not isinstance(fill_value, (pa.Array, pa.ChunkedArray, pa.Scalar)): + fill_value = pa.scalar(fill_value, type=values.type) + elif values.type != fill_value.type: + fill_value = pa.scalar(fill_value.as_py(), type=values.type) + + return call_function("coalesce", [values, fill_value]) + + +def top_k_unstable(values, k, sort_keys=None, *, memory_pool=None): + """ + Select the indices of the top-k ordered elements from array- or table-like + data. + + This is a specialization for :func:`select_k_unstable`. Output is not + guaranteed to be stable. + + Parameters + ---------- + values : Array, ChunkedArray, RecordBatch, or Table + Data to sort and get top indices from. + k : int + The number of `k` elements to keep. + sort_keys : List-like + Column key names to order by when input is table-like data. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : Array + Indices of the top-k ordered elements + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> pc.top_k_unstable(arr, k=3) + + [ + 5, + 4, + 2 + ] + """ + if sort_keys is None: + sort_keys = [] + if isinstance(values, (pa.Array, pa.ChunkedArray)): + sort_keys.append(("dummy", "descending")) + else: + sort_keys = map(lambda key_name: (key_name, "descending"), sort_keys) + options = SelectKOptions(k, sort_keys) + return call_function("select_k_unstable", [values], options, memory_pool) + + +def bottom_k_unstable(values, k, sort_keys=None, *, memory_pool=None): + """ + Select the indices of the bottom-k ordered elements from + array- or table-like data. + + This is a specialization for :func:`select_k_unstable`. Output is not + guaranteed to be stable. + + Parameters + ---------- + values : Array, ChunkedArray, RecordBatch, or Table + Data to sort and get bottom indices from. + k : int + The number of `k` elements to keep. + sort_keys : List-like + Column key names to order by when input is table-like data. + memory_pool : MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Returns + ------- + result : Array of indices + Indices of the bottom-k ordered elements + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.compute as pc + >>> arr = pa.array(["a", "b", "c", None, "e", "f"]) + >>> pc.bottom_k_unstable(arr, k=3) + + [ + 0, + 1, + 2 + ] + """ + if sort_keys is None: + sort_keys = [] + if isinstance(values, (pa.Array, pa.ChunkedArray)): + sort_keys.append(("dummy", "ascending")) + else: + sort_keys = map(lambda key_name: (key_name, "ascending"), sort_keys) + options = SelectKOptions(k, sort_keys) + return call_function("select_k_unstable", [values], options, memory_pool) + + +def random(n, *, initializer='system', options=None, memory_pool=None): + """ + Generate numbers in the range [0, 1). + + Generated values are uniformly-distributed, double-precision + in range [0, 1). Algorithm and seed can be changed via RandomOptions. + + Parameters + ---------- + n : int + Number of values to generate, must be greater than or equal to 0 + initializer : int or str + How to initialize the underlying random generator. + If an integer is given, it is used as a seed. + If "system" is given, the random generator is initialized with + a system-specific source of (hopefully true) randomness. + Other values are invalid. + options : pyarrow.compute.RandomOptions, optional + Alternative way of passing options. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """ + options = RandomOptions(initializer=initializer) + return call_function("random", [], options, memory_pool, length=n) + + +def field(*name_or_index): + """Reference a column of the dataset. + + Stores only the field's name. Type and other information is known only when + the expression is bound to a dataset having an explicit scheme. + + Nested references are allowed by passing multiple names or a tuple of + names. For example ``('foo', 'bar')`` references the field named "bar" + inside the field named "foo". + + Parameters + ---------- + *name_or_index : string, multiple strings, tuple or int + The name or index of the (possibly nested) field the expression + references to. + + Returns + ------- + field_expr : Expression + Reference to the given field + + Examples + -------- + >>> import pyarrow.compute as pc + >>> pc.field("a") + + >>> pc.field(1) + + >>> pc.field(("a", "b")) + >> pc.field("a", "b") + tobytes(path) + + check_status(Initialize(options)) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/csv.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/csv.py new file mode 100644 index 0000000000000000000000000000000000000000..1ae197f9f200f44d8a8a65851a89025f61c4d842 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/csv.py @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from pyarrow._csv import ( # noqa + ReadOptions, ParseOptions, ConvertOptions, ISO8601, + open_csv, read_csv, CSVStreamingReader, write_csv, + WriteOptions, CSVWriter, InvalidRow) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/cuda.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..18c530d4afe406366b6ff7c12cbc1c6813081e04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/cuda.py @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + + +from pyarrow._cuda import (Context, IpcMemHandle, CudaBuffer, + HostBuffer, BufferReader, BufferWriter, + new_host_buffer, + serialize_record_batch, read_message, + read_record_batch) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/dataset.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9301a5fee5ade572772d376d10a7d0d735bbdd80 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/dataset.py @@ -0,0 +1,1023 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Dataset is currently unstable. APIs subject to change without notice.""" + +import pyarrow as pa +from pyarrow.util import _is_iterable, _stringify_path, _is_path_like + +try: + from pyarrow._dataset import ( # noqa + CsvFileFormat, + CsvFragmentScanOptions, + JsonFileFormat, + JsonFragmentScanOptions, + Dataset, + DatasetFactory, + DirectoryPartitioning, + FeatherFileFormat, + FilenamePartitioning, + FileFormat, + FileFragment, + FileSystemDataset, + FileSystemDatasetFactory, + FileSystemFactoryOptions, + FileWriteOptions, + Fragment, + FragmentScanOptions, + HivePartitioning, + IpcFileFormat, + IpcFileWriteOptions, + InMemoryDataset, + Partitioning, + PartitioningFactory, + Scanner, + TaggedRecordBatch, + UnionDataset, + UnionDatasetFactory, + WrittenFile, + get_partition_keys, + get_partition_keys as _get_partition_keys, # keep for backwards compatibility + _filesystemdataset_write, + ) +except ImportError as exc: + raise ImportError( + f"The pyarrow installation is not built with support for 'dataset' ({str(exc)})" + ) from None + +# keep Expression functionality exposed here for backwards compatibility +from pyarrow.compute import Expression, scalar, field # noqa + + +_orc_available = False +_orc_msg = ( + "The pyarrow installation is not built with support for the ORC file " + "format." +) + +try: + from pyarrow._dataset_orc import OrcFileFormat + _orc_available = True +except ImportError: + pass + +_parquet_available = False +_parquet_msg = ( + "The pyarrow installation is not built with support for the Parquet file " + "format." +) + +try: + from pyarrow._dataset_parquet import ( # noqa + ParquetDatasetFactory, + ParquetFactoryOptions, + ParquetFileFormat, + ParquetFileFragment, + ParquetFileWriteOptions, + ParquetFragmentScanOptions, + ParquetReadOptions, + RowGroupInfo, + ) + _parquet_available = True +except ImportError: + pass + + +try: + from pyarrow._dataset_parquet_encryption import ( # noqa + ParquetDecryptionConfig, + ParquetEncryptionConfig, + ) +except ImportError: + pass + + +def __getattr__(name): + if name == "OrcFileFormat" and not _orc_available: + raise ImportError(_orc_msg) + + if name == "ParquetFileFormat" and not _parquet_available: + raise ImportError(_parquet_msg) + + raise AttributeError( + "module 'pyarrow.dataset' has no attribute '{0}'".format(name) + ) + + +def partitioning(schema=None, field_names=None, flavor=None, + dictionaries=None): + """ + Specify a partitioning scheme. + + The supported schemes include: + + - "DirectoryPartitioning": this scheme expects one segment in the file path + for each field in the specified schema (all fields are required to be + present). For example given schema the path + "/2009/11" would be parsed to ("year"_ == 2009 and "month"_ == 11). + - "HivePartitioning": a scheme for "/$key=$value/" nested directories as + found in Apache Hive. This is a multi-level, directory based partitioning + scheme. Data is partitioned by static values of a particular column in + the schema. Partition keys are represented in the form $key=$value in + directory names. Field order is ignored, as are missing or unrecognized + field names. + For example, given schema, a possible + path would be "/year=2009/month=11/day=15" (but the field order does not + need to match). + - "FilenamePartitioning": this scheme expects the partitions will have + filenames containing the field values separated by "_". + For example, given schema, a possible + partition filename "2009_11_part-0.parquet" would be parsed + to ("year"_ == 2009 and "month"_ == 11). + + Parameters + ---------- + schema : pyarrow.Schema, default None + The schema that describes the partitions present in the file path. + If not specified, and `field_names` and/or `flavor` are specified, + the schema will be inferred from the file path (and a + PartitioningFactory is returned). + field_names : list of str, default None + A list of strings (field names). If specified, the schema's types are + inferred from the file paths (only valid for DirectoryPartitioning). + flavor : str, default None + The default is DirectoryPartitioning. Specify ``flavor="hive"`` for + a HivePartitioning, and ``flavor="filename"`` for a + FilenamePartitioning. + dictionaries : dict[str, Array] + If the type of any field of `schema` is a dictionary type, the + corresponding entry of `dictionaries` must be an array containing + every value which may be taken by the corresponding column or an + error will be raised in parsing. Alternatively, pass `infer` to have + Arrow discover the dictionary values, in which case a + PartitioningFactory is returned. + + Returns + ------- + Partitioning or PartitioningFactory + The partitioning scheme + + Examples + -------- + + Specify the Schema for paths like "/2009/June": + + >>> import pyarrow as pa + >>> import pyarrow.dataset as ds + >>> part = ds.partitioning(pa.schema([("year", pa.int16()), + ... ("month", pa.string())])) + + or let the types be inferred by only specifying the field names: + + >>> part = ds.partitioning(field_names=["year", "month"]) + + For paths like "/2009/June", the year will be inferred as int32 while month + will be inferred as string. + + Specify a Schema with dictionary encoding, providing dictionary values: + + >>> part = ds.partitioning( + ... pa.schema([ + ... ("year", pa.int16()), + ... ("month", pa.dictionary(pa.int8(), pa.string())) + ... ]), + ... dictionaries={ + ... "month": pa.array(["January", "February", "March"]), + ... }) + + Alternatively, specify a Schema with dictionary encoding, but have Arrow + infer the dictionary values: + + >>> part = ds.partitioning( + ... pa.schema([ + ... ("year", pa.int16()), + ... ("month", pa.dictionary(pa.int8(), pa.string())) + ... ]), + ... dictionaries="infer") + + Create a Hive scheme for a path like "/year=2009/month=11": + + >>> part = ds.partitioning( + ... pa.schema([("year", pa.int16()), ("month", pa.int8())]), + ... flavor="hive") + + A Hive scheme can also be discovered from the directory structure (and + types will be inferred): + + >>> part = ds.partitioning(flavor="hive") + """ + if flavor is None: + # default flavor + if schema is not None: + if field_names is not None: + raise ValueError( + "Cannot specify both 'schema' and 'field_names'") + if dictionaries == 'infer': + return DirectoryPartitioning.discover(schema=schema) + return DirectoryPartitioning(schema, dictionaries) + elif field_names is not None: + if isinstance(field_names, list): + return DirectoryPartitioning.discover(field_names) + else: + raise ValueError( + "Expected list of field names, got {}".format( + type(field_names))) + else: + raise ValueError( + "For the default directory flavor, need to specify " + "a Schema or a list of field names") + if flavor == "filename": + if schema is not None: + if field_names is not None: + raise ValueError( + "Cannot specify both 'schema' and 'field_names'") + if dictionaries == 'infer': + return FilenamePartitioning.discover(schema=schema) + return FilenamePartitioning(schema, dictionaries) + elif field_names is not None: + if isinstance(field_names, list): + return FilenamePartitioning.discover(field_names) + else: + raise ValueError( + "Expected list of field names, got {}".format( + type(field_names))) + else: + raise ValueError( + "For the filename flavor, need to specify " + "a Schema or a list of field names") + elif flavor == 'hive': + if field_names is not None: + raise ValueError("Cannot specify 'field_names' for flavor 'hive'") + elif schema is not None: + if isinstance(schema, pa.Schema): + if dictionaries == 'infer': + return HivePartitioning.discover(schema=schema) + return HivePartitioning(schema, dictionaries) + else: + raise ValueError( + "Expected Schema for 'schema', got {}".format( + type(schema))) + else: + return HivePartitioning.discover() + else: + raise ValueError("Unsupported flavor") + + +def _ensure_partitioning(scheme): + """ + Validate input and return a Partitioning(Factory). + + It passes None through if no partitioning scheme is defined. + """ + if scheme is None: + pass + elif isinstance(scheme, str): + scheme = partitioning(flavor=scheme) + elif isinstance(scheme, list): + scheme = partitioning(field_names=scheme) + elif isinstance(scheme, (Partitioning, PartitioningFactory)): + pass + else: + ValueError("Expected Partitioning or PartitioningFactory, got {}" + .format(type(scheme))) + return scheme + + +def _ensure_format(obj): + if isinstance(obj, FileFormat): + return obj + elif obj == "parquet": + if not _parquet_available: + raise ValueError(_parquet_msg) + return ParquetFileFormat() + elif obj in {"ipc", "arrow"}: + return IpcFileFormat() + elif obj == "feather": + return FeatherFileFormat() + elif obj == "csv": + return CsvFileFormat() + elif obj == "orc": + if not _orc_available: + raise ValueError(_orc_msg) + return OrcFileFormat() + elif obj == "json": + return JsonFileFormat() + else: + raise ValueError("format '{}' is not supported".format(obj)) + + +def _ensure_multiple_sources(paths, filesystem=None): + """ + Treat a list of paths as files belonging to a single file system + + If the file system is local then also validates that all paths + are referencing existing *files* otherwise any non-file paths will be + silently skipped (for example on a remote filesystem). + + Parameters + ---------- + paths : list of path-like + Note that URIs are not allowed. + filesystem : FileSystem or str, optional + If an URI is passed, then its path component will act as a prefix for + the file paths. + + Returns + ------- + (FileSystem, list of str) + File system object and a list of normalized paths. + + Raises + ------ + TypeError + If the passed filesystem has wrong type. + IOError + If the file system is local and a referenced path is not available or + not a file. + """ + from pyarrow.fs import ( + LocalFileSystem, SubTreeFileSystem, _MockFileSystem, FileType, + _ensure_filesystem + ) + + if filesystem is None: + # fall back to local file system as the default + filesystem = LocalFileSystem() + else: + # construct a filesystem if it is a valid URI + filesystem = _ensure_filesystem(filesystem) + + is_local = ( + isinstance(filesystem, (LocalFileSystem, _MockFileSystem)) or + (isinstance(filesystem, SubTreeFileSystem) and + isinstance(filesystem.base_fs, LocalFileSystem)) + ) + + # allow normalizing irregular paths such as Windows local paths + paths = [filesystem.normalize_path(_stringify_path(p)) for p in paths] + + # validate that all of the paths are pointing to existing *files* + # possible improvement is to group the file_infos by type and raise for + # multiple paths per error category + if is_local: + for info in filesystem.get_file_info(paths): + file_type = info.type + if file_type == FileType.File: + continue + elif file_type == FileType.NotFound: + raise FileNotFoundError(info.path) + elif file_type == FileType.Directory: + raise IsADirectoryError( + 'Path {} points to a directory, but only file paths are ' + 'supported. To construct a nested or union dataset pass ' + 'a list of dataset objects instead.'.format(info.path) + ) + else: + raise IOError( + 'Path {} exists but its type is unknown (could be a ' + 'special file such as a Unix socket or character device, ' + 'or Windows NUL / CON / ...)'.format(info.path) + ) + + return filesystem, paths + + +def _ensure_single_source(path, filesystem=None): + """ + Treat path as either a recursively traversable directory or a single file. + + Parameters + ---------- + path : path-like + filesystem : FileSystem or str, optional + If an URI is passed, then its path component will act as a prefix for + the file paths. + + Returns + ------- + (FileSystem, list of str or fs.Selector) + File system object and either a single item list pointing to a file or + an fs.Selector object pointing to a directory. + + Raises + ------ + TypeError + If the passed filesystem has wrong type. + FileNotFoundError + If the referenced file or directory doesn't exist. + """ + from pyarrow.fs import FileType, FileSelector, _resolve_filesystem_and_path + + # at this point we already checked that `path` is a path-like + filesystem, path = _resolve_filesystem_and_path(path, filesystem) + + # ensure that the path is normalized before passing to dataset discovery + path = filesystem.normalize_path(path) + + # retrieve the file descriptor + file_info = filesystem.get_file_info(path) + + # depending on the path type either return with a recursive + # directory selector or as a list containing a single file + if file_info.type == FileType.Directory: + paths_or_selector = FileSelector(path, recursive=True) + elif file_info.type == FileType.File: + paths_or_selector = [path] + else: + raise FileNotFoundError(path) + + return filesystem, paths_or_selector + + +def _filesystem_dataset(source, schema=None, filesystem=None, + partitioning=None, format=None, + partition_base_dir=None, exclude_invalid_files=None, + selector_ignore_prefixes=None): + """ + Create a FileSystemDataset which can be used to build a Dataset. + + Parameters are documented in the dataset function. + + Returns + ------- + FileSystemDataset + """ + format = _ensure_format(format or 'parquet') + partitioning = _ensure_partitioning(partitioning) + + if isinstance(source, (list, tuple)): + fs, paths_or_selector = _ensure_multiple_sources(source, filesystem) + else: + fs, paths_or_selector = _ensure_single_source(source, filesystem) + + options = FileSystemFactoryOptions( + partitioning=partitioning, + partition_base_dir=partition_base_dir, + exclude_invalid_files=exclude_invalid_files, + selector_ignore_prefixes=selector_ignore_prefixes + ) + factory = FileSystemDatasetFactory(fs, paths_or_selector, format, options) + + return factory.finish(schema) + + +def _in_memory_dataset(source, schema=None, **kwargs): + if any(v is not None for v in kwargs.values()): + raise ValueError( + "For in-memory datasets, you cannot pass any additional arguments") + return InMemoryDataset(source, schema) + + +def _union_dataset(children, schema=None, **kwargs): + if any(v is not None for v in kwargs.values()): + raise ValueError( + "When passing a list of Datasets, you cannot pass any additional " + "arguments" + ) + + if schema is None: + # unify the children datasets' schemas + schema = pa.unify_schemas([child.schema for child in children]) + + for child in children: + if getattr(child, "_scan_options", None): + raise ValueError( + "Creating an UnionDataset from filtered or projected Datasets " + "is currently not supported. Union the unfiltered datasets " + "and apply the filter to the resulting union." + ) + + # create datasets with the requested schema + children = [child.replace_schema(schema) for child in children] + + return UnionDataset(schema, children) + + +def parquet_dataset(metadata_path, schema=None, filesystem=None, format=None, + partitioning=None, partition_base_dir=None): + """ + Create a FileSystemDataset from a `_metadata` file created via + `pyarrow.parquet.write_metadata`. + + Parameters + ---------- + metadata_path : path, + Path pointing to a single file parquet metadata file + schema : Schema, optional + Optionally provide the Schema for the Dataset, in which case it will + not be inferred from the source. + filesystem : FileSystem or URI string, default None + If a single path is given as source and filesystem is None, then the + filesystem will be inferred from the path. + If an URI string is passed, then a filesystem object is constructed + using the URI's optional path component as a directory prefix. See the + examples below. + Note that the URIs on Windows must follow 'file:///C:...' or + 'file:/C:...' patterns. + format : ParquetFileFormat + An instance of a ParquetFileFormat if special options needs to be + passed. + partitioning : Partitioning, PartitioningFactory, str, list of str + The partitioning scheme specified with the ``partitioning()`` + function. A flavor string can be used as shortcut, and with a list of + field names a DirectoryPartitioning will be inferred. + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + + Returns + ------- + FileSystemDataset + The dataset corresponding to the given metadata + """ + from pyarrow.fs import LocalFileSystem, _ensure_filesystem + + if format is None: + format = ParquetFileFormat() + elif not isinstance(format, ParquetFileFormat): + raise ValueError("format argument must be a ParquetFileFormat") + + if filesystem is None: + filesystem = LocalFileSystem() + else: + filesystem = _ensure_filesystem(filesystem) + + metadata_path = filesystem.normalize_path(_stringify_path(metadata_path)) + options = ParquetFactoryOptions( + partition_base_dir=partition_base_dir, + partitioning=_ensure_partitioning(partitioning) + ) + + factory = ParquetDatasetFactory( + metadata_path, filesystem, format, options=options) + return factory.finish(schema) + + +def dataset(source, schema=None, format=None, filesystem=None, + partitioning=None, partition_base_dir=None, + exclude_invalid_files=None, ignore_prefixes=None): + """ + Open a dataset. + + Datasets provides functionality to efficiently work with tabular, + potentially larger than memory and multi-file dataset. + + - A unified interface for different sources, like Parquet and Feather + - Discovery of sources (crawling directories, handle directory-based + partitioned datasets, basic schema normalization) + - Optimized reading with predicate pushdown (filtering rows), projection + (selecting columns), parallel reading or fine-grained managing of tasks. + + Note that this is the high-level API, to have more control over the dataset + construction use the low-level API classes (FileSystemDataset, + FilesystemDatasetFactory, etc.) + + Parameters + ---------- + source : path, list of paths, dataset, list of datasets, (list of) \ +RecordBatch or Table, iterable of RecordBatch, RecordBatchReader, or URI + Path pointing to a single file: + Open a FileSystemDataset from a single file. + Path pointing to a directory: + The directory gets discovered recursively according to a + partitioning scheme if given. + List of file paths: + Create a FileSystemDataset from explicitly given files. The files + must be located on the same filesystem given by the filesystem + parameter. + Note that in contrary of construction from a single file, passing + URIs as paths is not allowed. + List of datasets: + A nested UnionDataset gets constructed, it allows arbitrary + composition of other datasets. + Note that additional keyword arguments are not allowed. + (List of) batches or tables, iterable of batches, or RecordBatchReader: + Create an InMemoryDataset. If an iterable or empty list is given, + a schema must also be given. If an iterable or RecordBatchReader + is given, the resulting dataset can only be scanned once; further + attempts will raise an error. + schema : Schema, optional + Optionally provide the Schema for the Dataset, in which case it will + not be inferred from the source. + format : FileFormat or str + Currently "parquet", "ipc"/"arrow"/"feather", "csv", "json", and "orc" are + supported. For Feather, only version 2 files are supported. + filesystem : FileSystem or URI string, default None + If a single path is given as source and filesystem is None, then the + filesystem will be inferred from the path. + If an URI string is passed, then a filesystem object is constructed + using the URI's optional path component as a directory prefix. See the + examples below. + Note that the URIs on Windows must follow 'file:///C:...' or + 'file:/C:...' patterns. + partitioning : Partitioning, PartitioningFactory, str, list of str + The partitioning scheme specified with the ``partitioning()`` + function. A flavor string can be used as shortcut, and with a list of + field names a DirectoryPartitioning will be inferred. + partition_base_dir : str, optional + For the purposes of applying the partitioning, paths will be + stripped of the partition_base_dir. Files not matching the + partition_base_dir prefix will be skipped for partitioning discovery. + The ignored files will still be part of the Dataset, but will not + have partition information. + exclude_invalid_files : bool, optional (default True) + If True, invalid files will be excluded (file format specific check). + This will incur IO for each files in a serial and single threaded + fashion. Disabling this feature will skip the IO, but unsupported + files may be present in the Dataset (resulting in an error at scan + time). + ignore_prefixes : list, optional + Files matching any of these prefixes will be ignored by the + discovery process. This is matched to the basename of a path. + By default this is ['.', '_']. + Note that discovery happens only if a directory is passed as source. + + Returns + ------- + dataset : Dataset + Either a FileSystemDataset or a UnionDataset depending on the source + parameter. + + Examples + -------- + Creating an example Table: + + >>> import pyarrow as pa + >>> import pyarrow.parquet as pq + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> pq.write_table(table, "file.parquet") + + Opening a single file: + + >>> import pyarrow.dataset as ds + >>> dataset = ds.dataset("file.parquet", format="parquet") + >>> dataset.to_table() + pyarrow.Table + year: int64 + n_legs: int64 + animal: string + ---- + year: [[2020,2022,2021,2022,2019,2021]] + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + + Opening a single file with an explicit schema: + + >>> myschema = pa.schema([ + ... ('n_legs', pa.int64()), + ... ('animal', pa.string())]) + >>> dataset = ds.dataset("file.parquet", schema=myschema, format="parquet") + >>> dataset.to_table() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + + Opening a dataset for a single directory: + + >>> ds.write_dataset(table, "partitioned_dataset", format="parquet", + ... partitioning=['year']) + >>> dataset = ds.dataset("partitioned_dataset", format="parquet") + >>> dataset.to_table() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[5],[2],[4,100],[2,4]] + animal: [["Brittle stars"],["Flamingo"],...["Parrot","Horse"]] + + For a single directory from a S3 bucket: + + >>> ds.dataset("s3://mybucket/nyc-taxi/", + ... format="parquet") # doctest: +SKIP + + Opening a dataset from a list of relatives local paths: + + >>> dataset = ds.dataset([ + ... "partitioned_dataset/2019/part-0.parquet", + ... "partitioned_dataset/2020/part-0.parquet", + ... "partitioned_dataset/2021/part-0.parquet", + ... ], format='parquet') + >>> dataset.to_table() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[5],[2],[4,100]] + animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"]] + + With filesystem provided: + + >>> paths = [ + ... 'part0/data.parquet', + ... 'part1/data.parquet', + ... 'part3/data.parquet', + ... ] + >>> ds.dataset(paths, filesystem='file:///directory/prefix, + ... format='parquet') # doctest: +SKIP + + Which is equivalent with: + + >>> fs = SubTreeFileSystem("/directory/prefix", + ... LocalFileSystem()) # doctest: +SKIP + >>> ds.dataset(paths, filesystem=fs, format='parquet') # doctest: +SKIP + + With a remote filesystem URI: + + >>> paths = [ + ... 'nested/directory/part0/data.parquet', + ... 'nested/directory/part1/data.parquet', + ... 'nested/directory/part3/data.parquet', + ... ] + >>> ds.dataset(paths, filesystem='s3://bucket/', + ... format='parquet') # doctest: +SKIP + + Similarly to the local example, the directory prefix may be included in the + filesystem URI: + + >>> ds.dataset(paths, filesystem='s3://bucket/nested/directory', + ... format='parquet') # doctest: +SKIP + + Construction of a nested dataset: + + >>> ds.dataset([ + ... dataset("s3://old-taxi-data", format="parquet"), + ... dataset("local/path/to/data", format="ipc") + ... ]) # doctest: +SKIP + """ + # collect the keyword arguments for later reuse + kwargs = dict( + schema=schema, + filesystem=filesystem, + partitioning=partitioning, + format=format, + partition_base_dir=partition_base_dir, + exclude_invalid_files=exclude_invalid_files, + selector_ignore_prefixes=ignore_prefixes + ) + + if _is_path_like(source): + return _filesystem_dataset(source, **kwargs) + elif isinstance(source, (tuple, list)): + if all(_is_path_like(elem) for elem in source): + return _filesystem_dataset(source, **kwargs) + elif all(isinstance(elem, Dataset) for elem in source): + return _union_dataset(source, **kwargs) + elif all(isinstance(elem, (pa.RecordBatch, pa.Table)) + for elem in source): + return _in_memory_dataset(source, **kwargs) + else: + unique_types = set(type(elem).__name__ for elem in source) + type_names = ', '.join('{}'.format(t) for t in unique_types) + raise TypeError( + 'Expected a list of path-like or dataset objects, or a list ' + 'of batches or tables. The given list contains the following ' + 'types: {}'.format(type_names) + ) + elif isinstance(source, (pa.RecordBatch, pa.Table)): + return _in_memory_dataset(source, **kwargs) + else: + raise TypeError( + 'Expected a path-like, list of path-likes or a list of Datasets ' + 'instead of the given type: {}'.format(type(source).__name__) + ) + + +def _ensure_write_partitioning(part, schema, flavor): + if isinstance(part, PartitioningFactory): + raise ValueError("A PartitioningFactory cannot be used. " + "Did you call the partitioning function " + "without supplying a schema?") + + if isinstance(part, Partitioning) and flavor: + raise ValueError( + "Providing a partitioning_flavor with " + "a Partitioning object is not supported" + ) + elif isinstance(part, (tuple, list)): + # Name of fields were provided instead of a partitioning object. + # Create a partitioning factory with those field names. + part = partitioning( + schema=pa.schema([schema.field(f) for f in part]), + flavor=flavor + ) + elif part is None: + part = partitioning(pa.schema([]), flavor=flavor) + + if not isinstance(part, Partitioning): + raise ValueError( + "partitioning must be a Partitioning object or " + "a list of column names" + ) + + return part + + +def write_dataset(data, base_dir, *, basename_template=None, format=None, + partitioning=None, partitioning_flavor=None, schema=None, + filesystem=None, file_options=None, use_threads=True, + max_partitions=None, max_open_files=None, + max_rows_per_file=None, min_rows_per_group=None, + max_rows_per_group=None, file_visitor=None, + existing_data_behavior='error', create_dir=True): + """ + Write a dataset to a given format and partitioning. + + Parameters + ---------- + data : Dataset, Table/RecordBatch, RecordBatchReader, list of \ +Table/RecordBatch, or iterable of RecordBatch + The data to write. This can be a Dataset instance or + in-memory Arrow data. If an iterable is given, the schema must + also be given. + base_dir : str + The root directory where to write the dataset. + basename_template : str, optional + A template string used to generate basenames of written data files. + The token '{i}' will be replaced with an automatically incremented + integer. If not specified, it defaults to + "part-{i}." + format.default_extname + format : FileFormat or str + The format in which to write the dataset. Currently supported: + "parquet", "ipc"/"arrow"/"feather", and "csv". If a FileSystemDataset + is being written and `format` is not specified, it defaults to the + same format as the specified FileSystemDataset. When writing a + Table or RecordBatch, this keyword is required. + partitioning : Partitioning or list[str], optional + The partitioning scheme specified with the ``partitioning()`` + function or a list of field names. When providing a list of + field names, you can use ``partitioning_flavor`` to drive which + partitioning type should be used. + partitioning_flavor : str, optional + One of the partitioning flavors supported by + ``pyarrow.dataset.partitioning``. If omitted will use the + default of ``partitioning()`` which is directory partitioning. + schema : Schema, optional + filesystem : FileSystem, optional + file_options : pyarrow.dataset.FileWriteOptions, optional + FileFormat specific write options, created using the + ``FileFormat.make_write_options()`` function. + use_threads : bool, default True + Write files in parallel. If enabled, then maximum parallelism will be + used determined by the number of available CPU cores. + max_partitions : int, default 1024 + Maximum number of partitions any batch may be written into. + max_open_files : int, default 1024 + If greater than 0 then this will limit the maximum number of + files that can be left open. If an attempt is made to open + too many files then the least recently used file will be closed. + If this setting is set too low you may end up fragmenting your + data into many small files. + max_rows_per_file : int, default 0 + Maximum number of rows per file. If greater than 0 then this will + limit how many rows are placed in any single file. Otherwise there + will be no limit and one file will be created in each output + directory unless files need to be closed to respect max_open_files + min_rows_per_group : int, default 0 + Minimum number of rows per group. When the value is greater than 0, + the dataset writer will batch incoming data and only write the row + groups to the disk when sufficient rows have accumulated. + max_rows_per_group : int, default 1024 * 1024 + Maximum number of rows per group. If the value is greater than 0, + then the dataset writer may split up large incoming batches into + multiple row groups. If this value is set, then min_rows_per_group + should also be set. Otherwise it could end up with very small row + groups. + file_visitor : function + If set, this function will be called with a WrittenFile instance + for each file created during the call. This object will have both + a path attribute and a metadata attribute. + + The path attribute will be a string containing the path to + the created file. + + The metadata attribute will be the parquet metadata of the file. + This metadata will have the file path attribute set and can be used + to build a _metadata file. The metadata attribute will be None if + the format is not parquet. + + Example visitor which simple collects the filenames created:: + + visited_paths = [] + + def file_visitor(written_file): + visited_paths.append(written_file.path) + existing_data_behavior : 'error' | 'overwrite_or_ignore' | \ +'delete_matching' + Controls how the dataset will handle data that already exists in + the destination. The default behavior ('error') is to raise an error + if any data exists in the destination. + + 'overwrite_or_ignore' will ignore any existing data and will + overwrite files with the same name as an output file. Other + existing files will be ignored. This behavior, in combination + with a unique basename_template for each write, will allow for + an append workflow. + + 'delete_matching' is useful when you are writing a partitioned + dataset. The first time each partition directory is encountered + the entire directory will be deleted. This allows you to overwrite + old partitions completely. + create_dir : bool, default True + If False, directories will not be created. This can be useful for + filesystems that do not require directories. + """ + from pyarrow.fs import _resolve_filesystem_and_path + + if isinstance(data, (list, tuple)): + schema = schema or data[0].schema + data = InMemoryDataset(data, schema=schema) + elif isinstance(data, (pa.RecordBatch, pa.Table)): + schema = schema or data.schema + data = InMemoryDataset(data, schema=schema) + elif isinstance(data, pa.ipc.RecordBatchReader) or _is_iterable(data): + data = Scanner.from_batches(data, schema=schema) + schema = None + elif not isinstance(data, (Dataset, Scanner)): + raise ValueError( + "Only Dataset, Scanner, Table/RecordBatch, RecordBatchReader, " + "a list of Tables/RecordBatches, or iterable of batches are " + "supported." + ) + + if format is None and isinstance(data, FileSystemDataset): + format = data.format + else: + format = _ensure_format(format) + + if file_options is None: + file_options = format.make_write_options() + + if format != file_options.format: + raise TypeError("Supplied FileWriteOptions have format {}, " + "which doesn't match supplied FileFormat {}".format( + format, file_options)) + + if basename_template is None: + basename_template = "part-{i}." + format.default_extname + + if max_partitions is None: + max_partitions = 1024 + + if max_open_files is None: + max_open_files = 1024 + + if max_rows_per_file is None: + max_rows_per_file = 0 + + if max_rows_per_group is None: + max_rows_per_group = 1 << 20 + + if min_rows_per_group is None: + min_rows_per_group = 0 + + # at this point data is a Scanner or a Dataset, anything else + # was converted to one of those two. So we can grab the schema + # to build the partitioning object from Dataset. + if isinstance(data, Scanner): + partitioning_schema = data.projected_schema + else: + partitioning_schema = data.schema + partitioning = _ensure_write_partitioning(partitioning, + schema=partitioning_schema, + flavor=partitioning_flavor) + + filesystem, base_dir = _resolve_filesystem_and_path(base_dir, filesystem) + + if isinstance(data, Dataset): + scanner = data.scanner(use_threads=use_threads) + else: + # scanner was passed directly by the user, in which case a schema + # cannot be passed + if schema is not None: + raise ValueError("Cannot specify a schema when writing a Scanner") + scanner = data + + _filesystemdataset_write( + scanner, base_dir, basename_template, filesystem, partitioning, + file_options, max_partitions, file_visitor, existing_data_behavior, + max_open_files, max_rows_per_file, + min_rows_per_group, max_rows_per_group, create_dir + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/feather.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/feather.py new file mode 100644 index 0000000000000000000000000000000000000000..fbd0602597006734d66a9a965ea462fb35cbe178 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/feather.py @@ -0,0 +1,277 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import os + +from pyarrow.pandas_compat import _pandas_api # noqa +from pyarrow.lib import (Codec, Table, # noqa + concat_tables, schema) +import pyarrow.lib as ext +from pyarrow import _feather +from pyarrow._feather import FeatherError # noqa: F401 + + +class FeatherDataset: + """ + Encapsulates details of reading a list of Feather files. + + Parameters + ---------- + path_or_paths : List[str] + A list of file names + validate_schema : bool, default True + Check that individual file schemas are all the same / compatible + """ + + def __init__(self, path_or_paths, validate_schema=True): + self.paths = path_or_paths + self.validate_schema = validate_schema + + def read_table(self, columns=None): + """ + Read multiple feather files as a single pyarrow.Table + + Parameters + ---------- + columns : List[str] + Names of columns to read from the file + + Returns + ------- + pyarrow.Table + Content of the file as a table (of columns) + """ + _fil = read_table(self.paths[0], columns=columns) + self._tables = [_fil] + self.schema = _fil.schema + + for path in self.paths[1:]: + table = read_table(path, columns=columns) + if self.validate_schema: + self.validate_schemas(path, table) + self._tables.append(table) + return concat_tables(self._tables) + + def validate_schemas(self, piece, table): + if not self.schema.equals(table.schema): + raise ValueError('Schema in {!s} was different. \n' + '{!s}\n\nvs\n\n{!s}' + .format(piece, self.schema, + table.schema)) + + def read_pandas(self, columns=None, use_threads=True): + """ + Read multiple Parquet files as a single pandas DataFrame + + Parameters + ---------- + columns : List[str] + Names of columns to read from the file + use_threads : bool, default True + Use multiple threads when converting to pandas + + Returns + ------- + pandas.DataFrame + Content of the file as a pandas DataFrame (of columns) + """ + return self.read_table(columns=columns).to_pandas( + use_threads=use_threads) + + +def check_chunked_overflow(name, col): + if col.num_chunks == 1: + return + + if col.type in (ext.binary(), ext.string()): + raise ValueError("Column '{}' exceeds 2GB maximum capacity of " + "a Feather binary column. This restriction may be " + "lifted in the future".format(name)) + else: + # TODO(wesm): Not sure when else this might be reached + raise ValueError("Column '{}' of type {} was chunked on conversion " + "to Arrow and cannot be currently written to " + "Feather format".format(name, str(col.type))) + + +_FEATHER_SUPPORTED_CODECS = {'lz4', 'zstd', 'uncompressed'} + + +def write_feather(df, dest, compression=None, compression_level=None, + chunksize=None, version=2): + """ + Write a pandas.DataFrame to Feather format. + + Parameters + ---------- + df : pandas.DataFrame or pyarrow.Table + Data to write out as Feather format. + dest : str + Local destination path. + compression : string, default None + Can be one of {"zstd", "lz4", "uncompressed"}. The default of None uses + LZ4 for V2 files if it is available, otherwise uncompressed. + compression_level : int, default None + Use a compression level particular to the chosen compressor. If None + use the default compression level + chunksize : int, default None + For V2 files, the internal maximum size of Arrow RecordBatch chunks + when writing the Arrow IPC file format. None means use the default, + which is currently 64K + version : int, default 2 + Feather file version. Version 2 is the current. Version 1 is the more + limited legacy format + """ + if _pandas_api.have_pandas: + if (_pandas_api.has_sparse and + isinstance(df, _pandas_api.pd.SparseDataFrame)): + df = df.to_dense() + + if _pandas_api.is_data_frame(df): + # Feather v1 creates a new column in the resultant Table to + # store index information if index type is not RangeIndex + + if version == 1: + preserve_index = False + elif version == 2: + preserve_index = None + else: + raise ValueError("Version value should either be 1 or 2") + + table = Table.from_pandas(df, preserve_index=preserve_index) + + if version == 1: + # Version 1 does not chunking + for i, name in enumerate(table.schema.names): + col = table[i] + check_chunked_overflow(name, col) + else: + table = df + + if version == 1: + if len(table.column_names) > len(set(table.column_names)): + raise ValueError("cannot serialize duplicate column names") + + if compression is not None: + raise ValueError("Feather V1 files do not support compression " + "option") + + if chunksize is not None: + raise ValueError("Feather V1 files do not support chunksize " + "option") + else: + if compression is None and Codec.is_available('lz4_frame'): + compression = 'lz4' + elif (compression is not None and + compression not in _FEATHER_SUPPORTED_CODECS): + raise ValueError('compression="{}" not supported, must be ' + 'one of {}'.format(compression, + _FEATHER_SUPPORTED_CODECS)) + + try: + _feather.write_feather(table, dest, compression=compression, + compression_level=compression_level, + chunksize=chunksize, version=version) + except Exception: + if isinstance(dest, str): + try: + os.remove(dest) + except os.error: + pass + raise + + +def read_feather(source, columns=None, use_threads=True, + memory_map=False, **kwargs): + """ + Read a pandas.DataFrame from Feather format. To read as pyarrow.Table use + feather.read_table. + + Parameters + ---------- + source : str file path, or file-like object + You can use MemoryMappedFile as source, for explicitly use memory map. + columns : sequence, optional + Only read a specific set of columns. If not provided, all columns are + read. + use_threads : bool, default True + Whether to parallelize reading using multiple threads. If false the + restriction is used in the conversion to Pandas as well as in the + reading from Feather format. + memory_map : boolean, default False + Use memory mapping when opening file on disk, when source is a str. + **kwargs + Additional keyword arguments passed on to `pyarrow.Table.to_pandas`. + + Returns + ------- + df : pandas.DataFrame + The contents of the Feather file as a pandas.DataFrame + """ + return (read_table( + source, columns=columns, memory_map=memory_map, + use_threads=use_threads).to_pandas(use_threads=use_threads, **kwargs)) + + +def read_table(source, columns=None, memory_map=False, use_threads=True): + """ + Read a pyarrow.Table from Feather format + + Parameters + ---------- + source : str file path, or file-like object + You can use MemoryMappedFile as source, for explicitly use memory map. + columns : sequence, optional + Only read a specific set of columns. If not provided, all columns are + read. + memory_map : boolean, default False + Use memory mapping when opening file on disk, when source is a str + use_threads : bool, default True + Whether to parallelize reading using multiple threads. + + Returns + ------- + table : pyarrow.Table + The contents of the Feather file as a pyarrow.Table + """ + reader = _feather.FeatherReader( + source, use_memory_map=memory_map, use_threads=use_threads) + + if columns is None: + return reader.read() + + column_types = [type(column) for column in columns] + if all(map(lambda t: t == int, column_types)): + table = reader.read_indices(columns) + elif all(map(lambda t: t == str, column_types)): + table = reader.read_names(columns) + else: + column_type_names = [t.__name__ for t in column_types] + raise TypeError("Columns must be indices or names. " + "Got columns {} of types {}" + .format(columns, column_type_names)) + + # Feather v1 already respects the column selection + if reader.version < 3: + return table + # Feather v2 reads with sorted / deduplicated selection + elif sorted(set(columns)) == columns: + return table + else: + # follow exact order / selection of names + return table.select(columns) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/fs.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/fs.py new file mode 100644 index 0000000000000000000000000000000000000000..ead750ca44ef8858472796d31fb6f4b4470d3eb6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/fs.py @@ -0,0 +1,444 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +FileSystem abstraction to interact with various local and remote filesystems. +""" + +from pyarrow.util import _is_path_like, _stringify_path + +from pyarrow._fs import ( # noqa + FileSelector, + FileType, + FileInfo, + FileSystem, + LocalFileSystem, + SubTreeFileSystem, + _MockFileSystem, + FileSystemHandler, + PyFileSystem, + _copy_files, + _copy_files_selector, +) + +# For backward compatibility. +FileStats = FileInfo + +_not_imported = [] + +try: + from pyarrow._hdfs import HadoopFileSystem # noqa +except ImportError: + _not_imported.append("HadoopFileSystem") + +try: + from pyarrow._gcsfs import GcsFileSystem # noqa +except ImportError: + _not_imported.append("GcsFileSystem") + +try: + from pyarrow._s3fs import ( # noqa + AwsDefaultS3RetryStrategy, AwsStandardS3RetryStrategy, + S3FileSystem, S3LogLevel, S3RetryStrategy, ensure_s3_initialized, + finalize_s3, ensure_s3_finalized, initialize_s3, resolve_s3_region) +except ImportError: + _not_imported.append("S3FileSystem") +else: + # GH-38364: we don't initialize S3 eagerly as that could lead + # to crashes at shutdown even when S3 isn't used. + # Instead, S3 is initialized lazily using `ensure_s3_initialized` + # in assorted places. + import atexit + atexit.register(ensure_s3_finalized) + + +def __getattr__(name): + if name in _not_imported: + raise ImportError( + "The pyarrow installation is not built with support for " + "'{0}'".format(name) + ) + + raise AttributeError( + "module 'pyarrow.fs' has no attribute '{0}'".format(name) + ) + + +def _filesystem_from_str(uri): + # instantiate the file system from an uri, if the uri has a path + # component then it will be treated as a path prefix + filesystem, prefix = FileSystem.from_uri(uri) + prefix = filesystem.normalize_path(prefix) + if prefix: + # validate that the prefix is pointing to a directory + prefix_info = filesystem.get_file_info([prefix])[0] + if prefix_info.type != FileType.Directory: + raise ValueError( + "The path component of the filesystem URI must point to a " + "directory but it has a type: `{}`. The path component " + "is `{}` and the given filesystem URI is `{}`".format( + prefix_info.type.name, prefix_info.path, uri + ) + ) + filesystem = SubTreeFileSystem(prefix, filesystem) + return filesystem + + +def _ensure_filesystem( + filesystem, use_mmap=False, allow_legacy_filesystem=False +): + if isinstance(filesystem, FileSystem): + return filesystem + elif isinstance(filesystem, str): + if use_mmap: + raise ValueError( + "Specifying to use memory mapping not supported for " + "filesystem specified as an URI string" + ) + return _filesystem_from_str(filesystem) + + # handle fsspec-compatible filesystems + try: + import fsspec + except ImportError: + pass + else: + if isinstance(filesystem, fsspec.AbstractFileSystem): + if type(filesystem).__name__ == 'LocalFileSystem': + # In case its a simple LocalFileSystem, use native arrow one + return LocalFileSystem(use_mmap=use_mmap) + return PyFileSystem(FSSpecHandler(filesystem)) + + # map old filesystems to new ones + import pyarrow.filesystem as legacyfs + + if isinstance(filesystem, legacyfs.LocalFileSystem): + return LocalFileSystem(use_mmap=use_mmap) + # TODO handle HDFS? + if allow_legacy_filesystem and isinstance(filesystem, legacyfs.FileSystem): + return filesystem + + raise TypeError( + "Unrecognized filesystem: {}. `filesystem` argument must be a " + "FileSystem instance or a valid file system URI'".format( + type(filesystem)) + ) + + +def _resolve_filesystem_and_path( + path, filesystem=None, allow_legacy_filesystem=False, memory_map=False +): + """ + Return filesystem/path from path which could be an URI or a plain + filesystem path. + """ + if not _is_path_like(path): + if filesystem is not None: + raise ValueError( + "'filesystem' passed but the specified path is file-like, so" + " there is nothing to open with 'filesystem'." + ) + return filesystem, path + + if filesystem is not None: + filesystem = _ensure_filesystem( + filesystem, use_mmap=memory_map, + allow_legacy_filesystem=allow_legacy_filesystem + ) + if isinstance(filesystem, LocalFileSystem): + path = _stringify_path(path) + elif not isinstance(path, str): + raise TypeError( + "Expected string path; path-like objects are only allowed " + "with a local filesystem" + ) + if not allow_legacy_filesystem: + path = filesystem.normalize_path(path) + return filesystem, path + + path = _stringify_path(path) + + # if filesystem is not given, try to automatically determine one + # first check if the file exists as a local (relative) file path + # if not then try to parse the path as an URI + filesystem = LocalFileSystem(use_mmap=memory_map) + + try: + file_info = filesystem.get_file_info(path) + except ValueError: # ValueError means path is likely an URI + file_info = None + exists_locally = False + else: + exists_locally = (file_info.type != FileType.NotFound) + + # if the file or directory doesn't exists locally, then assume that + # the path is an URI describing the file system as well + if not exists_locally: + try: + filesystem, path = FileSystem.from_uri(path) + except ValueError as e: + # neither an URI nor a locally existing path, so assume that + # local path was given and propagate a nicer file not found error + # instead of a more confusing scheme parsing error + if "empty scheme" not in str(e) \ + and "Cannot parse URI" not in str(e): + raise + else: + path = filesystem.normalize_path(path) + + return filesystem, path + + +def copy_files(source, destination, + source_filesystem=None, destination_filesystem=None, + *, chunk_size=1024*1024, use_threads=True): + """ + Copy files between FileSystems. + + This functions allows you to recursively copy directories of files from + one file system to another, such as from S3 to your local machine. + + Parameters + ---------- + source : string + Source file path or URI to a single file or directory. + If a directory, files will be copied recursively from this path. + destination : string + Destination file path or URI. If `source` is a file, `destination` + is also interpreted as the destination file (not directory). + Directories will be created as necessary. + source_filesystem : FileSystem, optional + Source filesystem, needs to be specified if `source` is not a URI, + otherwise inferred. + destination_filesystem : FileSystem, optional + Destination filesystem, needs to be specified if `destination` is not + a URI, otherwise inferred. + chunk_size : int, default 1MB + The maximum size of block to read before flushing to the + destination file. A larger chunk_size will use more memory while + copying but may help accommodate high latency FileSystems. + use_threads : bool, default True + Whether to use multiple threads to accelerate copying. + + Examples + -------- + Inspect an S3 bucket's files: + + >>> s3, path = fs.FileSystem.from_uri( + ... "s3://registry.opendata.aws/roda/ndjson/") + >>> selector = fs.FileSelector(path) + >>> s3.get_file_info(selector) + [>> fs.copy_files("s3://registry.opendata.aws/roda/ndjson/index.ndjson", + ... "file:///{}/index_copy.ndjson".format(local_path)) + + >>> fs.LocalFileSystem().get_file_info(str(local_path)+ + ... '/index_copy.ndjson') + + + Copy file using a FileSystem object: + + >>> fs.copy_files("registry.opendata.aws/roda/ndjson/index.ndjson", + ... "file:///{}/index_copy.ndjson".format(local_path), + ... source_filesystem=fs.S3FileSystem()) + """ + source_fs, source_path = _resolve_filesystem_and_path( + source, source_filesystem + ) + destination_fs, destination_path = _resolve_filesystem_and_path( + destination, destination_filesystem + ) + + file_info = source_fs.get_file_info(source_path) + if file_info.type == FileType.Directory: + source_sel = FileSelector(source_path, recursive=True) + _copy_files_selector(source_fs, source_sel, + destination_fs, destination_path, + chunk_size, use_threads) + else: + _copy_files(source_fs, source_path, + destination_fs, destination_path, + chunk_size, use_threads) + + +class FSSpecHandler(FileSystemHandler): + """ + Handler for fsspec-based Python filesystems. + + https://filesystem-spec.readthedocs.io/en/latest/index.html + + Parameters + ---------- + fs : FSSpec-compliant filesystem instance + + Examples + -------- + >>> PyFileSystem(FSSpecHandler(fsspec_fs)) # doctest: +SKIP + """ + + def __init__(self, fs): + self.fs = fs + + def __eq__(self, other): + if isinstance(other, FSSpecHandler): + return self.fs == other.fs + return NotImplemented + + def __ne__(self, other): + if isinstance(other, FSSpecHandler): + return self.fs != other.fs + return NotImplemented + + def get_type_name(self): + protocol = self.fs.protocol + if isinstance(protocol, list): + protocol = protocol[0] + return "fsspec+{0}".format(protocol) + + def normalize_path(self, path): + return path + + @staticmethod + def _create_file_info(path, info): + size = info["size"] + if info["type"] == "file": + ftype = FileType.File + elif info["type"] == "directory": + ftype = FileType.Directory + # some fsspec filesystems include a file size for directories + size = None + else: + ftype = FileType.Unknown + return FileInfo(path, ftype, size=size, mtime=info.get("mtime", None)) + + def get_file_info(self, paths): + infos = [] + for path in paths: + try: + info = self.fs.info(path) + except FileNotFoundError: + infos.append(FileInfo(path, FileType.NotFound)) + else: + infos.append(self._create_file_info(path, info)) + return infos + + def get_file_info_selector(self, selector): + if not self.fs.isdir(selector.base_dir): + if self.fs.exists(selector.base_dir): + raise NotADirectoryError(selector.base_dir) + else: + if selector.allow_not_found: + return [] + else: + raise FileNotFoundError(selector.base_dir) + + if selector.recursive: + maxdepth = None + else: + maxdepth = 1 + + infos = [] + selected_files = self.fs.find( + selector.base_dir, maxdepth=maxdepth, withdirs=True, detail=True + ) + for path, info in selected_files.items(): + _path = path.strip("/") + base_dir = selector.base_dir.strip("/") + # Need to exclude base directory from selected files if present + # (fsspec filesystems, see GH-37555) + if _path != base_dir: + infos.append(self._create_file_info(path, info)) + + return infos + + def create_dir(self, path, recursive): + # mkdir also raises FileNotFoundError when base directory is not found + try: + self.fs.mkdir(path, create_parents=recursive) + except FileExistsError: + pass + + def delete_dir(self, path): + self.fs.rm(path, recursive=True) + + def _delete_dir_contents(self, path, missing_dir_ok): + try: + subpaths = self.fs.listdir(path, detail=False) + except FileNotFoundError: + if missing_dir_ok: + return + raise + for subpath in subpaths: + if self.fs.isdir(subpath): + self.fs.rm(subpath, recursive=True) + elif self.fs.isfile(subpath): + self.fs.rm(subpath) + + def delete_dir_contents(self, path, missing_dir_ok): + if path.strip("/") == "": + raise ValueError( + "delete_dir_contents called on path '", path, "'") + self._delete_dir_contents(path, missing_dir_ok) + + def delete_root_dir_contents(self): + self._delete_dir_contents("/") + + def delete_file(self, path): + # fs.rm correctly raises IsADirectoryError when `path` is a directory + # instead of a file and `recursive` is not set to True + if not self.fs.exists(path): + raise FileNotFoundError(path) + self.fs.rm(path) + + def move(self, src, dest): + self.fs.mv(src, dest, recursive=True) + + def copy_file(self, src, dest): + # fs.copy correctly raises IsADirectoryError when `src` is a directory + # instead of a file + self.fs.copy(src, dest) + + # TODO can we read/pass metadata (e.g. Content-Type) in the methods below? + + def open_input_stream(self, path): + from pyarrow import PythonFile + + if not self.fs.isfile(path): + raise FileNotFoundError(path) + + return PythonFile(self.fs.open(path, mode="rb"), mode="r") + + def open_input_file(self, path): + from pyarrow import PythonFile + + if not self.fs.isfile(path): + raise FileNotFoundError(path) + + return PythonFile(self.fs.open(path, mode="rb"), mode="r") + + def open_output_stream(self, path, metadata): + from pyarrow import PythonFile + + return PythonFile(self.fs.open(path, mode="wb"), mode="w") + + def open_append_stream(self, path, metadata): + from pyarrow import PythonFile + + return PythonFile(self.fs.open(path, mode="ab"), mode="w") diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/gandiva.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/gandiva.pyx new file mode 100644 index 0000000000000000000000000000000000000000..2202ec64f29628d76143759220eb61102d1bea97 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/gandiva.pyx @@ -0,0 +1,760 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from libcpp.memory cimport shared_ptr +from libcpp.string cimport string as c_string +from libcpp.vector cimport vector as c_vector +from libcpp.unordered_set cimport unordered_set as c_unordered_set +from libc.stdint cimport int64_t, int32_t + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (DataType, Field, MemoryPool, RecordBatch, + Schema, check_status, pyarrow_wrap_array, + pyarrow_wrap_data_type, ensure_type, _Weakrefable, + pyarrow_wrap_field) + +from pyarrow.includes.libgandiva cimport ( + CCondition, CGandivaExpression, + CNode, CProjector, CFilter, + CSelectionVector, + _ensure_selection_mode, + CConfiguration, + CConfigurationBuilder, + TreeExprBuilder_MakeExpression, + TreeExprBuilder_MakeFunction, + TreeExprBuilder_MakeBoolLiteral, + TreeExprBuilder_MakeUInt8Literal, + TreeExprBuilder_MakeUInt16Literal, + TreeExprBuilder_MakeUInt32Literal, + TreeExprBuilder_MakeUInt64Literal, + TreeExprBuilder_MakeInt8Literal, + TreeExprBuilder_MakeInt16Literal, + TreeExprBuilder_MakeInt32Literal, + TreeExprBuilder_MakeInt64Literal, + TreeExprBuilder_MakeFloatLiteral, + TreeExprBuilder_MakeDoubleLiteral, + TreeExprBuilder_MakeStringLiteral, + TreeExprBuilder_MakeBinaryLiteral, + TreeExprBuilder_MakeField, + TreeExprBuilder_MakeIf, + TreeExprBuilder_MakeAnd, + TreeExprBuilder_MakeOr, + TreeExprBuilder_MakeCondition, + TreeExprBuilder_MakeInExpressionInt32, + TreeExprBuilder_MakeInExpressionInt64, + TreeExprBuilder_MakeInExpressionTime32, + TreeExprBuilder_MakeInExpressionTime64, + TreeExprBuilder_MakeInExpressionDate32, + TreeExprBuilder_MakeInExpressionDate64, + TreeExprBuilder_MakeInExpressionTimeStamp, + TreeExprBuilder_MakeInExpressionString, + SelectionVector_MakeInt16, + SelectionVector_MakeInt32, + SelectionVector_MakeInt64, + Projector_Make, + Filter_Make, + CFunctionSignature, + GetRegisteredFunctionSignatures) + + +cdef class Node(_Weakrefable): + cdef: + shared_ptr[CNode] node + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use the " + "TreeExprBuilder API directly" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CNode] node): + cdef Node self = Node.__new__(Node) + self.node = node + return self + + def __str__(self): + return self.node.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def return_type(self): + return pyarrow_wrap_data_type(self.node.get().return_type()) + + +cdef class Expression(_Weakrefable): + cdef: + shared_ptr[CGandivaExpression] expression + + cdef void init(self, shared_ptr[CGandivaExpression] expression): + self.expression = expression + + def __str__(self): + return self.expression.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def root(self): + return Node.create(self.expression.get().root()) + + def result(self): + return pyarrow_wrap_field(self.expression.get().result()) + + +cdef class Condition(_Weakrefable): + cdef: + shared_ptr[CCondition] condition + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use the " + "TreeExprBuilder API instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CCondition] condition): + cdef Condition self = Condition.__new__(Condition) + self.condition = condition + return self + + def __str__(self): + return self.condition.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def root(self): + return Node.create(self.condition.get().root()) + + def result(self): + return pyarrow_wrap_field(self.condition.get().result()) + + +cdef class SelectionVector(_Weakrefable): + cdef: + shared_ptr[CSelectionVector] selection_vector + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly." + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CSelectionVector] selection_vector): + cdef SelectionVector self = SelectionVector.__new__(SelectionVector) + self.selection_vector = selection_vector + return self + + def to_array(self): + cdef shared_ptr[CArray] result = self.selection_vector.get().ToArray() + return pyarrow_wrap_array(result) + + +cdef class Projector(_Weakrefable): + cdef: + shared_ptr[CProjector] projector + MemoryPool pool + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "make_projector instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CProjector] projector, MemoryPool pool): + cdef Projector self = Projector.__new__(Projector) + self.projector = projector + self.pool = pool + return self + + @property + def llvm_ir(self): + return self.projector.get().DumpIR().decode() + + def evaluate(self, RecordBatch batch, SelectionVector selection=None): + """ + Evaluate the specified record batch and return the arrays at the + filtered positions. + + Parameters + ---------- + batch : pyarrow.RecordBatch + selection : pyarrow.gandiva.SelectionVector + + Returns + ------- + list[pyarrow.Array] + """ + cdef vector[shared_ptr[CArray]] results + if selection is None: + check_status(self.projector.get().Evaluate( + batch.sp_batch.get()[0], self.pool.pool, &results)) + else: + check_status( + self.projector.get().Evaluate( + batch.sp_batch.get()[0], selection.selection_vector.get(), + self.pool.pool, &results)) + cdef shared_ptr[CArray] result + arrays = [] + for result in results: + arrays.append(pyarrow_wrap_array(result)) + return arrays + + +cdef class Filter(_Weakrefable): + cdef: + shared_ptr[CFilter] filter + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "make_filter instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CFilter] filter): + cdef Filter self = Filter.__new__(Filter) + self.filter = filter + return self + + @property + def llvm_ir(self): + return self.filter.get().DumpIR().decode() + + def evaluate(self, RecordBatch batch, MemoryPool pool, dtype='int32'): + """ + Evaluate the specified record batch and return a selection vector. + + Parameters + ---------- + batch : pyarrow.RecordBatch + pool : MemoryPool + dtype : DataType or str, default int32 + + Returns + ------- + pyarrow.gandiva.SelectionVector + """ + cdef: + DataType type = ensure_type(dtype) + shared_ptr[CSelectionVector] selection + + if type.id == _Type_INT16: + check_status(SelectionVector_MakeInt16( + batch.num_rows, pool.pool, &selection)) + elif type.id == _Type_INT32: + check_status(SelectionVector_MakeInt32( + batch.num_rows, pool.pool, &selection)) + elif type.id == _Type_INT64: + check_status(SelectionVector_MakeInt64( + batch.num_rows, pool.pool, &selection)) + else: + raise ValueError("'dtype' of the selection vector should be " + "one of 'int16', 'int32' and 'int64'.") + + check_status(self.filter.get().Evaluate( + batch.sp_batch.get()[0], selection)) + return SelectionVector.create(selection) + + +cdef class TreeExprBuilder(_Weakrefable): + + def make_literal(self, value, dtype): + """ + Create a node on a literal. + + Parameters + ---------- + value : a literal value + dtype : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef: + DataType type = ensure_type(dtype) + shared_ptr[CNode] r + + if type.id == _Type_BOOL: + r = TreeExprBuilder_MakeBoolLiteral(value) + elif type.id == _Type_UINT8: + r = TreeExprBuilder_MakeUInt8Literal(value) + elif type.id == _Type_UINT16: + r = TreeExprBuilder_MakeUInt16Literal(value) + elif type.id == _Type_UINT32: + r = TreeExprBuilder_MakeUInt32Literal(value) + elif type.id == _Type_UINT64: + r = TreeExprBuilder_MakeUInt64Literal(value) + elif type.id == _Type_INT8: + r = TreeExprBuilder_MakeInt8Literal(value) + elif type.id == _Type_INT16: + r = TreeExprBuilder_MakeInt16Literal(value) + elif type.id == _Type_INT32: + r = TreeExprBuilder_MakeInt32Literal(value) + elif type.id == _Type_INT64: + r = TreeExprBuilder_MakeInt64Literal(value) + elif type.id == _Type_FLOAT: + r = TreeExprBuilder_MakeFloatLiteral(value) + elif type.id == _Type_DOUBLE: + r = TreeExprBuilder_MakeDoubleLiteral(value) + elif type.id == _Type_STRING: + r = TreeExprBuilder_MakeStringLiteral(value.encode('UTF-8')) + elif type.id == _Type_BINARY: + r = TreeExprBuilder_MakeBinaryLiteral(value) + else: + raise TypeError("Didn't recognize dtype " + str(dtype)) + + return Node.create(r) + + def make_expression(self, Node root_node not None, + Field return_field not None): + """ + Create an expression with the specified root_node, + and the result written to result_field. + + Parameters + ---------- + root_node : pyarrow.gandiva.Node + return_field : pyarrow.Field + + Returns + ------- + pyarrow.gandiva.Expression + """ + cdef shared_ptr[CGandivaExpression] r = TreeExprBuilder_MakeExpression( + root_node.node, return_field.sp_field) + cdef Expression expression = Expression() + expression.init(r) + return expression + + def make_function(self, name, children, DataType return_type): + """ + Create a node with a function. + + Parameters + ---------- + name : str + children : pyarrow.gandiva.NodeVector + return_type : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeFunction( + name.encode(), c_children, return_type.sp_type) + return Node.create(r) + + def make_field(self, Field field not None): + """ + Create a node with an Arrow field. + + Parameters + ---------- + field : pyarrow.Field + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeField(field.sp_field) + return Node.create(r) + + def make_if(self, Node condition not None, Node this_node not None, + Node else_node not None, DataType return_type not None): + """ + Create a node with an if-else expression. + + Parameters + ---------- + condition : pyarrow.gandiva.Node + this_node : pyarrow.gandiva.Node + else_node : pyarrow.gandiva.Node + return_type : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeIf( + condition.node, this_node.node, else_node.node, + return_type.sp_type) + return Node.create(r) + + def make_and(self, children): + """ + Create a Node with a boolean AND expression. + + Parameters + ---------- + children : list[pyarrow.gandiva.Node] + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeAnd(c_children) + return Node.create(r) + + def make_or(self, children): + """ + Create a Node with a boolean OR expression. + + Parameters + ---------- + children : list[pyarrow.gandiva.Node] + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeOr(c_children) + return Node.create(r) + + def _make_in_expression_int32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionInt32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_int64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionInt64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_time32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTime32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_time64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTime64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_date32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionDate32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_date64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionDate64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_timestamp(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTimeStamp(node.node, c_values) + return Node.create(r) + + def _make_in_expression_binary(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[c_string] c_values + cdef c_string v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionString(node.node, c_values) + return Node.create(r) + + def _make_in_expression_string(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[c_string] c_values + cdef c_string _v + for v in values: + _v = v.encode('UTF-8') + c_values.insert(_v) + r = TreeExprBuilder_MakeInExpressionString(node.node, c_values) + return Node.create(r) + + def make_in_expression(self, Node node not None, values, dtype): + """ + Create a Node with an IN expression. + + Parameters + ---------- + node : pyarrow.gandiva.Node + values : iterable + dtype : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef DataType type = ensure_type(dtype) + + if type.id == _Type_INT32: + return self._make_in_expression_int32(node, values) + elif type.id == _Type_INT64: + return self._make_in_expression_int64(node, values) + elif type.id == _Type_TIME32: + return self._make_in_expression_time32(node, values) + elif type.id == _Type_TIME64: + return self._make_in_expression_time64(node, values) + elif type.id == _Type_TIMESTAMP: + return self._make_in_expression_timestamp(node, values) + elif type.id == _Type_DATE32: + return self._make_in_expression_date32(node, values) + elif type.id == _Type_DATE64: + return self._make_in_expression_date64(node, values) + elif type.id == _Type_BINARY: + return self._make_in_expression_binary(node, values) + elif type.id == _Type_STRING: + return self._make_in_expression_string(node, values) + else: + raise TypeError("Data type " + str(dtype) + " not supported.") + + def make_condition(self, Node condition not None): + """ + Create a condition with the specified node. + + Parameters + ---------- + condition : pyarrow.gandiva.Node + + Returns + ------- + pyarrow.gandiva.Condition + """ + cdef shared_ptr[CCondition] r = TreeExprBuilder_MakeCondition( + condition.node) + return Condition.create(r) + +cdef class Configuration(_Weakrefable): + cdef: + shared_ptr[CConfiguration] configuration + + def __cinit__(self, bint optimize=True, bint dump_ir=False): + """ + Initialize the configuration with specified options. + + Parameters + ---------- + optimize : bool, default True + Whether to enable optimizations. + dump_ir : bool, default False + Whether to dump LLVM IR. + """ + self.configuration = CConfigurationBuilder().build() + self.configuration.get().set_optimize(optimize) + self.configuration.get().set_dump_ir(dump_ir) + + @staticmethod + cdef create(shared_ptr[CConfiguration] configuration): + """ + Create a Configuration instance from an existing CConfiguration pointer. + + Parameters + ---------- + configuration : shared_ptr[CConfiguration] + Existing CConfiguration pointer. + + Returns + ------- + Configuration instance + """ + cdef Configuration self = Configuration.__new__(Configuration) + self.configuration = configuration + return self + + +cpdef make_projector(Schema schema, children, MemoryPool pool, + str selection_mode="NONE", + Configuration configuration=None): + """ + Construct a projection using expressions. + + A projector is built for a specific schema and vector of expressions. + Once the projector is built, it can be used to evaluate many row batches. + + Parameters + ---------- + schema : pyarrow.Schema + Schema for the record batches, and the expressions. + children : list[pyarrow.gandiva.Expression] + List of projectable expression objects. + pool : pyarrow.MemoryPool + Memory pool used to allocate output arrays. + selection_mode : str, default "NONE" + Possible values are NONE, UINT16, UINT32, UINT64. + configuration : pyarrow.gandiva.Configuration, default None + Configuration for the projector. + + Returns + ------- + Projector instance + """ + cdef: + Expression child + c_vector[shared_ptr[CGandivaExpression]] c_children + shared_ptr[CProjector] result + + if configuration is None: + configuration = Configuration() + + for child in children: + if child is None: + raise TypeError("Expressions must not be None") + c_children.push_back(child.expression) + + check_status( + Projector_Make(schema.sp_schema, c_children, + _ensure_selection_mode(selection_mode), + configuration.configuration, + &result)) + return Projector.create(result, pool) + + +cpdef make_filter(Schema schema, Condition condition, + Configuration configuration=None): + """ + Construct a filter based on a condition. + + A filter is built for a specific schema and condition. Once the filter is + built, it can be used to evaluate many row batches. + + Parameters + ---------- + schema : pyarrow.Schema + Schema for the record batches, and the condition. + condition : pyarrow.gandiva.Condition + Filter condition. + configuration : pyarrow.gandiva.Configuration, default None + Configuration for the filter. + + Returns + ------- + Filter instance + """ + cdef shared_ptr[CFilter] result + if condition is None: + raise TypeError("Condition must not be None") + + if configuration is None: + configuration = Configuration() + + check_status( + Filter_Make(schema.sp_schema, condition.condition, configuration.configuration, &result)) + return Filter.create(result) + + +cdef class FunctionSignature(_Weakrefable): + """ + Signature of a Gandiva function including name, parameter types + and return type. + """ + + cdef: + shared_ptr[CFunctionSignature] signature + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly." + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CFunctionSignature] signature): + cdef FunctionSignature self = FunctionSignature.__new__( + FunctionSignature) + self.signature = signature + return self + + def return_type(self): + return pyarrow_wrap_data_type(self.signature.get().ret_type()) + + def param_types(self): + result = [] + cdef vector[shared_ptr[CDataType]] types = \ + self.signature.get().param_types() + for t in types: + result.append(pyarrow_wrap_data_type(t)) + return result + + def name(self): + return self.signature.get().base_name().decode() + + def __repr__(self): + signature = self.signature.get().ToString().decode() + return "FunctionSignature(" + signature + ")" + + +def get_registered_function_signatures(): + """ + Return the function in Gandiva's ExpressionRegistry. + + Returns + ------- + registry: a list of registered function signatures + """ + results = [] + + cdef vector[shared_ptr[CFunctionSignature]] signatures = \ + GetRegisteredFunctionSignatures() + + for signature in signatures: + results.append(FunctionSignature.create(signature)) + + return results diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/hdfs.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/hdfs.py new file mode 100644 index 0000000000000000000000000000000000000000..2e6c387a8fde31a005550732a8e5c6396ec06d6f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/hdfs.py @@ -0,0 +1,240 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import os +import posixpath +import sys +import warnings + +from pyarrow.util import doc, _DEPR_MSG +from pyarrow.filesystem import FileSystem +import pyarrow._hdfsio as _hdfsio + + +class HadoopFileSystem(_hdfsio.HadoopFileSystem, FileSystem): + """ + DEPRECATED: FileSystem interface for HDFS cluster. + + See pyarrow.hdfs.connect for full connection details + + .. deprecated:: 2.0 + ``pyarrow.hdfs.HadoopFileSystem`` is deprecated, + please use ``pyarrow.fs.HadoopFileSystem`` instead. + """ + + def __init__(self, host="default", port=0, user=None, kerb_ticket=None, + driver='libhdfs', extra_conf=None): + warnings.warn( + _DEPR_MSG.format( + "hdfs.HadoopFileSystem", "2.0.0", "fs.HadoopFileSystem"), + FutureWarning, stacklevel=2) + if driver == 'libhdfs': + _maybe_set_hadoop_classpath() + + self._connect(host, port, user, kerb_ticket, extra_conf) + + def __reduce__(self): + return (HadoopFileSystem, (self.host, self.port, self.user, + self.kerb_ticket, self.extra_conf)) + + def _isfilestore(self): + """ + Return True if this is a Unix-style file store with directories. + """ + return True + + @doc(FileSystem.isdir) + def isdir(self, path): + return super().isdir(path) + + @doc(FileSystem.isfile) + def isfile(self, path): + return super().isfile(path) + + @doc(FileSystem.delete) + def delete(self, path, recursive=False): + return super().delete(path, recursive) + + def mkdir(self, path, **kwargs): + """ + Create directory in HDFS. + + Parameters + ---------- + path : str + Directory path to create, including any parent directories. + + Notes + ----- + libhdfs does not support create_parents=False, so we ignore this here + """ + return super().mkdir(path) + + @doc(FileSystem.rename) + def rename(self, path, new_path): + return super().rename(path, new_path) + + @doc(FileSystem.exists) + def exists(self, path): + return super().exists(path) + + def ls(self, path, detail=False): + """ + Retrieve directory contents and metadata, if requested. + + Parameters + ---------- + path : str + HDFS path to retrieve contents of. + detail : bool, default False + If False, only return list of paths. + + Returns + ------- + result : list of dicts (detail=True) or strings (detail=False) + """ + return super().ls(path, detail) + + def walk(self, top_path): + """ + Directory tree generator for HDFS, like os.walk. + + Parameters + ---------- + top_path : str + Root directory for tree traversal. + + Returns + ------- + Generator yielding 3-tuple (dirpath, dirnames, filename) + """ + contents = self.ls(top_path, detail=True) + + directories, files = _libhdfs_walk_files_dirs(top_path, contents) + yield top_path, directories, files + for dirname in directories: + yield from self.walk(self._path_join(top_path, dirname)) + + +def _maybe_set_hadoop_classpath(): + import re + + if re.search(r'hadoop-common[^/]+.jar', os.environ.get('CLASSPATH', '')): + return + + if 'HADOOP_HOME' in os.environ: + if sys.platform != 'win32': + classpath = _derive_hadoop_classpath() + else: + hadoop_bin = '{}/bin/hadoop'.format(os.environ['HADOOP_HOME']) + classpath = _hadoop_classpath_glob(hadoop_bin) + else: + classpath = _hadoop_classpath_glob('hadoop') + + os.environ['CLASSPATH'] = classpath.decode('utf-8') + + +def _derive_hadoop_classpath(): + import subprocess + + find_args = ('find', '-L', os.environ['HADOOP_HOME'], '-name', '*.jar') + find = subprocess.Popen(find_args, stdout=subprocess.PIPE) + xargs_echo = subprocess.Popen(('xargs', 'echo'), + stdin=find.stdout, + stdout=subprocess.PIPE) + jars = subprocess.check_output(('tr', "' '", "':'"), + stdin=xargs_echo.stdout) + hadoop_conf = os.environ["HADOOP_CONF_DIR"] \ + if "HADOOP_CONF_DIR" in os.environ \ + else os.environ["HADOOP_HOME"] + "/etc/hadoop" + return (hadoop_conf + ":").encode("utf-8") + jars + + +def _hadoop_classpath_glob(hadoop_bin): + import subprocess + + hadoop_classpath_args = (hadoop_bin, 'classpath', '--glob') + return subprocess.check_output(hadoop_classpath_args) + + +def _libhdfs_walk_files_dirs(top_path, contents): + files = [] + directories = [] + for c in contents: + scrubbed_name = posixpath.split(c['name'])[1] + if c['kind'] == 'file': + files.append(scrubbed_name) + else: + directories.append(scrubbed_name) + + return directories, files + + +def connect(host="default", port=0, user=None, kerb_ticket=None, + extra_conf=None): + """ + DEPRECATED: Connect to an HDFS cluster. + + All parameters are optional and should only be set if the defaults need + to be overridden. + + Authentication should be automatic if the HDFS cluster uses Kerberos. + However, if a username is specified, then the ticket cache will likely + be required. + + .. deprecated:: 2.0 + ``pyarrow.hdfs.connect`` is deprecated, + please use ``pyarrow.fs.HadoopFileSystem`` instead. + + Parameters + ---------- + host : NameNode. Set to "default" for fs.defaultFS from core-site.xml. + port : NameNode's port. Set to 0 for default or logical (HA) nodes. + user : Username when connecting to HDFS; None implies login user. + kerb_ticket : Path to Kerberos ticket cache. + extra_conf : dict, default None + extra Key/Value pairs for config; Will override any + hdfs-site.xml properties + + Notes + ----- + The first time you call this method, it will take longer than usual due + to JNI spin-up time. + + Returns + ------- + filesystem : HadoopFileSystem + """ + warnings.warn( + _DEPR_MSG.format("hdfs.connect", "2.0.0", "fs.HadoopFileSystem"), + FutureWarning, stacklevel=2 + ) + return _connect( + host=host, port=port, user=user, kerb_ticket=kerb_ticket, + extra_conf=extra_conf + ) + + +def _connect(host="default", port=0, user=None, kerb_ticket=None, + extra_conf=None): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fs = HadoopFileSystem(host=host, port=port, user=user, + kerb_ticket=kerb_ticket, + extra_conf=extra_conf) + return fs diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array.h new file mode 100644 index 0000000000000000000000000000000000000000..4d72ea9506a414fd6e50d5c7d0af437084045e05 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array.h @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Kitchen-sink public API for arrow::Array data structures. C++ library code +// (especially header files) in Apache Arrow should use more specific headers +// unless it's a file that uses most or all Array types in which case using +// arrow/array.h is fine. + +#pragma once + +/// \defgroup numeric-arrays Concrete classes for numeric arrays +/// @{ +/// @} + +/// \defgroup binary-arrays Concrete classes for binary/string arrays +/// @{ +/// @} + +/// \defgroup nested-arrays Concrete classes for nested arrays +/// @{ +/// @} + +/// \defgroup run-end-encoded-arrays Concrete classes for run-end encoded arrays +/// @{ +/// @} + +#include "arrow/array/array_base.h" // IWYU pragma: keep +#include "arrow/array/array_binary.h" // IWYU pragma: keep +#include "arrow/array/array_decimal.h" // IWYU pragma: keep +#include "arrow/array/array_dict.h" // IWYU pragma: keep +#include "arrow/array/array_nested.h" // IWYU pragma: keep +#include "arrow/array/array_primitive.h" // IWYU pragma: keep +#include "arrow/array/array_run_end.h" // IWYU pragma: keep +#include "arrow/array/data.h" // IWYU pragma: keep +#include "arrow/array/util.h" // IWYU pragma: keep diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..a84c98b6b24917faf53a821c5c3e5f62471bb9aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h @@ -0,0 +1,484 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_generate.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Buffer builder classes + +/// \class BufferBuilder +/// \brief A class for incrementally building a contiguous chunk of in-memory +/// data +class ARROW_EXPORT BufferBuilder { + public: + explicit BufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : pool_(pool), + data_(/*ensure never null to make ubsan happy and avoid check penalties below*/ + util::MakeNonNull()), + capacity_(0), + size_(0), + alignment_(alignment) {} + + /// \brief Constructs new Builder that will start using + /// the provided buffer until Finish/Reset are called. + /// The buffer is not resized. + explicit BufferBuilder(std::shared_ptr buffer, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : buffer_(std::move(buffer)), + pool_(pool), + data_(buffer_->mutable_data()), + capacity_(buffer_->capacity()), + size_(buffer_->size()), + alignment_(alignment) {} + + /// \brief Resize the buffer to the nearest multiple of 64 bytes + /// + /// \param new_capacity the new capacity of the of the builder. Will be + /// rounded up to a multiple of 64 bytes for padding + /// \param shrink_to_fit if new capacity is smaller than the existing, + /// reallocate internal buffer. Set to false to avoid reallocations when + /// shrinking the builder. + /// \return Status + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + if (buffer_ == NULLPTR) { + ARROW_ASSIGN_OR_RAISE(buffer_, + AllocateResizableBuffer(new_capacity, alignment_, pool_)); + } else { + ARROW_RETURN_NOT_OK(buffer_->Resize(new_capacity, shrink_to_fit)); + } + capacity_ = buffer_->capacity(); + data_ = buffer_->mutable_data(); + return Status::OK(); + } + + /// \brief Ensure that builder can accommodate the additional number of bytes + /// without the need to perform allocations + /// + /// \param[in] additional_bytes number of additional bytes to make space for + /// \return Status + Status Reserve(const int64_t additional_bytes) { + auto min_capacity = size_ + additional_bytes; + if (min_capacity <= capacity_) { + return Status::OK(); + } + return Resize(GrowByFactor(capacity_, min_capacity), false); + } + + /// \brief Return a capacity expanded by the desired growth factor + static int64_t GrowByFactor(int64_t current_capacity, int64_t new_capacity) { + // Doubling capacity except for large Reserve requests. 2x growth strategy + // (versus 1.5x) seems to have slightly better performance when using + // jemalloc, but significantly better performance when using the system + // allocator. See ARROW-6450 for further discussion + return std::max(new_capacity, current_capacity * 2); + } + + /// \brief Append the given data to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(const void* data, const int64_t length) { + if (ARROW_PREDICT_FALSE(size_ + length > capacity_)) { + ARROW_RETURN_NOT_OK(Resize(GrowByFactor(capacity_, size_ + length), false)); + } + UnsafeAppend(data, length); + return Status::OK(); + } + + /// \brief Append the given data to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(std::string_view v) { return Append(v.data(), v.size()); } + + /// \brief Append copies of a value to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(const int64_t num_copies, uint8_t value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies)); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + // Advance pointer and zero out memory + Status Advance(const int64_t length) { return Append(length, 0); } + + // Advance pointer, but don't allocate or zero memory + void UnsafeAdvance(const int64_t length) { size_ += length; } + + // Unsafe methods don't check existing size + void UnsafeAppend(const void* data, const int64_t length) { + memcpy(data_ + size_, data, static_cast(length)); + size_ += length; + } + + void UnsafeAppend(std::string_view v) { + UnsafeAppend(v.data(), static_cast(v.size())); + } + + void UnsafeAppend(const int64_t num_copies, uint8_t value) { + memset(data_ + size_, value, static_cast(num_copies)); + size_ += num_copies; + } + + /// \brief Return result of builder as a Buffer object. + /// + /// The builder is reset and can be reused afterwards. + /// + /// \param[out] out the finalized Buffer object + /// \param shrink_to_fit if the buffer size is smaller than its capacity, + /// reallocate to fit more tightly in memory. Set to false to avoid + /// a reallocation, at the expense of potentially more memory consumption. + /// \return Status + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + ARROW_RETURN_NOT_OK(Resize(size_, shrink_to_fit)); + if (size_ != 0) buffer_->ZeroPadding(); + *out = buffer_; + if (*out == NULLPTR) { + ARROW_ASSIGN_OR_RAISE(*out, AllocateBuffer(0, alignment_, pool_)); + } + Reset(); + return Status::OK(); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using BufferBuilder + /// mostly for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + size_ = final_length; + return Finish(shrink_to_fit); + } + + void Reset() { + buffer_ = NULLPTR; + capacity_ = size_ = 0; + } + + /// \brief Set size to a smaller value without modifying builder + /// contents. For reusable BufferBuilder classes + /// \param[in] position must be non-negative and less than or equal + /// to the current length() + void Rewind(int64_t position) { size_ = position; } + + int64_t capacity() const { return capacity_; } + int64_t length() const { return size_; } + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return data_; } + template + const T* data_as() const { + return reinterpret_cast(data_); + } + template + T* mutable_data_as() { + return reinterpret_cast(data_); + } + + private: + std::shared_ptr buffer_; + MemoryPool* pool_; + uint8_t* data_; + int64_t capacity_; + int64_t size_; + int64_t alignment_; +}; + +template +class TypedBufferBuilder; + +/// \brief A BufferBuilder for building a buffer of arithmetic elements +template +class TypedBufferBuilder< + T, typename std::enable_if::value || + std::is_standard_layout::value>::type> { + public: + explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : bytes_builder_(pool, alignment) {} + + explicit TypedBufferBuilder(std::shared_ptr buffer, + MemoryPool* pool = default_memory_pool()) + : bytes_builder_(std::move(buffer), pool) {} + + explicit TypedBufferBuilder(BufferBuilder builder) + : bytes_builder_(std::move(builder)) {} + + BufferBuilder* bytes_builder() { return &bytes_builder_; } + + Status Append(T value) { + return bytes_builder_.Append(reinterpret_cast(&value), sizeof(T)); + } + + Status Append(const T* values, int64_t num_elements) { + return bytes_builder_.Append(reinterpret_cast(values), + num_elements * sizeof(T)); + } + + Status Append(const int64_t num_copies, T value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies + length())); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + void UnsafeAppend(T value) { + bytes_builder_.UnsafeAppend(reinterpret_cast(&value), sizeof(T)); + } + + void UnsafeAppend(const T* values, int64_t num_elements) { + bytes_builder_.UnsafeAppend(reinterpret_cast(values), + num_elements * sizeof(T)); + } + + template + void UnsafeAppend(Iter values_begin, Iter values_end) { + auto num_elements = static_cast(std::distance(values_begin, values_end)); + auto data = mutable_data() + length(); + bytes_builder_.UnsafeAdvance(num_elements * sizeof(T)); + std::copy(values_begin, values_end, data); + } + + void UnsafeAppend(const int64_t num_copies, T value) { + auto data = mutable_data() + length(); + bytes_builder_.UnsafeAdvance(num_copies * sizeof(T)); + std::fill(data, data + num_copies, value); + } + + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + return bytes_builder_.Resize(new_capacity * sizeof(T), shrink_to_fit); + } + + Status Reserve(const int64_t additional_elements) { + return bytes_builder_.Reserve(additional_elements * sizeof(T)); + } + + Status Advance(const int64_t length) { + return bytes_builder_.Advance(length * sizeof(T)); + } + + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + return bytes_builder_.Finish(out, shrink_to_fit); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using TypedBufferBuilder + /// only for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + return bytes_builder_.FinishWithLength(final_length * sizeof(T), shrink_to_fit); + } + + void Reset() { bytes_builder_.Reset(); } + + int64_t length() const { return bytes_builder_.length() / sizeof(T); } + int64_t capacity() const { return bytes_builder_.capacity() / sizeof(T); } + const T* data() const { return reinterpret_cast(bytes_builder_.data()); } + T* mutable_data() { return reinterpret_cast(bytes_builder_.mutable_data()); } + + private: + BufferBuilder bytes_builder_; +}; + +/// \brief A BufferBuilder for building a buffer containing a bitmap +template <> +class TypedBufferBuilder { + public: + explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : bytes_builder_(pool, alignment) {} + + explicit TypedBufferBuilder(BufferBuilder builder) + : bytes_builder_(std::move(builder)) {} + + BufferBuilder* bytes_builder() { return &bytes_builder_; } + + Status Append(bool value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(value); + return Status::OK(); + } + + Status Append(const uint8_t* valid_bytes, int64_t num_elements) { + ARROW_RETURN_NOT_OK(Reserve(num_elements)); + UnsafeAppend(valid_bytes, num_elements); + return Status::OK(); + } + + Status Append(const int64_t num_copies, bool value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies)); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + void UnsafeAppend(bool value) { + bit_util::SetBitTo(mutable_data(), bit_length_, value); + if (!value) { + ++false_count_; + } + ++bit_length_; + } + + /// \brief Append bits from an array of bytes (one value per byte) + void UnsafeAppend(const uint8_t* bytes, int64_t num_elements) { + if (num_elements == 0) return; + int64_t i = 0; + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] { + bool value = bytes[i++]; + false_count_ += !value; + return value; + }); + bit_length_ += num_elements; + } + + /// \brief Append bits from a packed bitmap + void UnsafeAppend(const uint8_t* bitmap, int64_t offset, int64_t num_elements) { + if (num_elements == 0) return; + internal::CopyBitmap(bitmap, offset, num_elements, mutable_data(), bit_length_); + false_count_ += num_elements - internal::CountSetBits(bitmap, offset, num_elements); + bit_length_ += num_elements; + } + + void UnsafeAppend(const int64_t num_copies, bool value) { + bit_util::SetBitsTo(mutable_data(), bit_length_, num_copies, value); + false_count_ += num_copies * !value; + bit_length_ += num_copies; + } + + template + void UnsafeAppend(const int64_t num_elements, Generator&& gen) { + if (num_elements == 0) return; + + if (count_falses) { + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] { + bool value = gen(); + false_count_ += !value; + return value; + }); + } else { + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, + std::forward(gen)); + } + bit_length_ += num_elements; + } + + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + const int64_t old_byte_capacity = bytes_builder_.capacity(); + ARROW_RETURN_NOT_OK( + bytes_builder_.Resize(bit_util::BytesForBits(new_capacity), shrink_to_fit)); + // Resize() may have chosen a larger capacity (e.g. for padding), + // so ask it again before calling memset(). + const int64_t new_byte_capacity = bytes_builder_.capacity(); + if (new_byte_capacity > old_byte_capacity) { + // The additional buffer space is 0-initialized for convenience, + // so that other methods can simply bump the length. + memset(mutable_data() + old_byte_capacity, 0, + static_cast(new_byte_capacity - old_byte_capacity)); + } + return Status::OK(); + } + + Status Reserve(const int64_t additional_elements) { + return Resize( + BufferBuilder::GrowByFactor(bit_length_, bit_length_ + additional_elements), + false); + } + + Status Advance(const int64_t length) { + ARROW_RETURN_NOT_OK(Reserve(length)); + bit_length_ += length; + false_count_ += length; + return Status::OK(); + } + + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + // set bytes_builder_.size_ == byte size of data + bytes_builder_.UnsafeAdvance(bit_util::BytesForBits(bit_length_) - + bytes_builder_.length()); + bit_length_ = false_count_ = 0; + return bytes_builder_.Finish(out, shrink_to_fit); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using TypedBufferBuilder + /// only for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + const auto final_byte_length = bit_util::BytesForBits(final_length); + bytes_builder_.UnsafeAdvance(final_byte_length - bytes_builder_.length()); + bit_length_ = false_count_ = 0; + return bytes_builder_.FinishWithLength(final_byte_length, shrink_to_fit); + } + + void Reset() { + bytes_builder_.Reset(); + bit_length_ = false_count_ = 0; + } + + int64_t length() const { return bit_length_; } + int64_t capacity() const { return bytes_builder_.capacity() * 8; } + const uint8_t* data() const { return bytes_builder_.data(); } + uint8_t* mutable_data() { return bytes_builder_.mutable_data(); } + int64_t false_count() const { return false_count_; } + + private: + BufferBuilder bytes_builder_; + int64_t bit_length_ = 0; + int64_t false_count_ = 0; +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h new file mode 100644 index 0000000000000000000000000000000000000000..f0aa14c1e0612d1872a5959998651a12668f449f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array/builder_adaptive.h" // IWYU pragma: keep +#include "arrow/array/builder_base.h" // IWYU pragma: keep +#include "arrow/array/builder_binary.h" // IWYU pragma: keep +#include "arrow/array/builder_decimal.h" // IWYU pragma: keep +#include "arrow/array/builder_dict.h" // IWYU pragma: keep +#include "arrow/array/builder_nested.h" // IWYU pragma: keep +#include "arrow/array/builder_primitive.h" // IWYU pragma: keep +#include "arrow/array/builder_run_end.h" // IWYU pragma: keep +#include "arrow/array/builder_time.h" // IWYU pragma: keep +#include "arrow/array/builder_union.h" // IWYU pragma: keep +#include "arrow/status.h" +#include "arrow/util/visibility.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/device.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/device.h new file mode 100644 index 0000000000000000000000000000000000000000..efb0a5ab400a1f34c17258ea112e983097159dbf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/device.h @@ -0,0 +1,366 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief EXPERIMENTAL: Device type enum which matches up with C Data Device types +enum class DeviceAllocationType : char { + kCPU = 1, + kCUDA = 2, + kCUDA_HOST = 3, + kOPENCL = 4, + kVULKAN = 7, + kMETAL = 8, + kVPI = 9, + kROCM = 10, + kROCM_HOST = 11, + kEXT_DEV = 12, + kCUDA_MANAGED = 13, + kONEAPI = 14, + kWEBGPU = 15, + kHEXAGON = 16, +}; + +class MemoryManager; + +/// \brief EXPERIMENTAL: Abstract interface for hardware devices +/// +/// This object represents a device with access to some memory spaces. +/// When handling a Buffer or raw memory address, it allows deciding in which +/// context the raw memory address should be interpreted +/// (e.g. CPU-accessible memory, or embedded memory on some particular GPU). +class ARROW_EXPORT Device : public std::enable_shared_from_this, + public util::EqualityComparable { + public: + virtual ~Device(); + + /// \brief A shorthand for this device's type. + /// + /// The returned value is different for each device class, but is the + /// same for all instances of a given class. It can be used as a replacement + /// for RTTI. + virtual const char* type_name() const = 0; + + /// \brief A human-readable description of the device. + /// + /// The returned value should be detailed enough to distinguish between + /// different instances, where necessary. + virtual std::string ToString() const = 0; + + /// \brief Whether this instance points to the same device as another one. + virtual bool Equals(const Device&) const = 0; + + /// \brief A device ID to identify this device if there are multiple of this type. + /// + /// If there is no "device_id" equivalent (such as for the main CPU device on + /// non-numa systems) returns -1. + virtual int64_t device_id() const { return -1; } + + /// \brief Whether this device is the main CPU device. + /// + /// This shorthand method is very useful when deciding whether a memory address + /// is CPU-accessible. + bool is_cpu() const { return is_cpu_; } + + /// \brief Return a MemoryManager instance tied to this device + /// + /// The returned instance uses default parameters for this device type's + /// MemoryManager implementation. Some devices also allow constructing + /// MemoryManager instances with non-default parameters. + virtual std::shared_ptr default_memory_manager() = 0; + + /// \brief Return the DeviceAllocationType of this device + virtual DeviceAllocationType device_type() const = 0; + + class SyncEvent; + + /// \brief EXPERIMENTAL: An opaque wrapper for Device-specific streams + /// + /// In essence this is just a wrapper around a void* to represent the + /// standard concept of a stream/queue on a device. Derived classes + /// should be trivially constructible from it's device-specific counterparts. + class ARROW_EXPORT Stream { + public: + using release_fn_t = std::function; + + virtual ~Stream() = default; + + virtual const void* get_raw() const { return stream_.get(); } + + /// \brief Make the stream wait on the provided event. + /// + /// Tells the stream that it should wait until the synchronization + /// event is completed without blocking the CPU. + virtual Status WaitEvent(const SyncEvent&) = 0; + + /// \brief Blocks the current thread until a stream's remaining tasks are completed + virtual Status Synchronize() const = 0; + + protected: + explicit Stream(void* stream, release_fn_t release_stream) + : stream_{stream, release_stream} {} + + std::unique_ptr stream_; + }; + + virtual Result> MakeStream() { return NULLPTR; } + + /// \brief Create a new device stream + /// + /// This should create the appropriate stream type for the device, + /// derived from Device::Stream to allow for stream ordered events + /// and memory allocations. + virtual Result> MakeStream(unsigned int flags) { + return NULLPTR; + } + + /// @brief Wrap an existing device stream alongside a release function + /// + /// @param device_stream a pointer to the stream to wrap + /// @param release_fn a function to call during destruction, `nullptr` or + /// a no-op function can be passed to indicate ownership is maintained + /// externally + virtual Result> WrapStream(void* device_stream, + Stream::release_fn_t release_fn) { + return NULLPTR; + } + + /// \brief EXPERIMENTAL: An object that provides event/stream sync primitives + class ARROW_EXPORT SyncEvent { + public: + using release_fn_t = std::function; + + virtual ~SyncEvent() = default; + + void* get_raw() { return sync_event_.get(); } + + /// @brief Block until sync event is completed. + virtual Status Wait() = 0; + + /// @brief Record the wrapped event on the stream so it triggers + /// the event when the stream gets to that point in its queue. + virtual Status Record(const Stream&) = 0; + + protected: + /// If creating this with a passed in event, the caller must ensure + /// that the event lives until clear_event is called on this as it + /// won't own it. + explicit SyncEvent(void* sync_event, release_fn_t release_sync_event) + : sync_event_{sync_event, release_sync_event} {} + + std::unique_ptr sync_event_; + }; + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Device); + explicit Device(bool is_cpu = false) : is_cpu_(is_cpu) {} + + bool is_cpu_; +}; + +/// \brief EXPERIMENTAL: An object that provides memory management primitives +/// +/// A MemoryManager is always tied to a particular Device instance. +/// It can also have additional parameters (such as a MemoryPool to +/// allocate CPU memory). +class ARROW_EXPORT MemoryManager : public std::enable_shared_from_this { + public: + virtual ~MemoryManager(); + + /// \brief The device this MemoryManager is tied to + const std::shared_ptr& device() const { return device_; } + + /// \brief Whether this MemoryManager is tied to the main CPU device. + /// + /// This shorthand method is very useful when deciding whether a memory address + /// is CPU-accessible. + bool is_cpu() const { return device_->is_cpu(); } + + /// \brief Create a RandomAccessFile to read a particular buffer. + /// + /// The given buffer must be tied to this MemoryManager. + /// + /// See also the Buffer::GetReader shorthand. + virtual Result> GetBufferReader( + std::shared_ptr buf) = 0; + + /// \brief Create a OutputStream to write to a particular buffer. + /// + /// The given buffer must be mutable and tied to this MemoryManager. + /// The returned stream object writes into the buffer's underlying memory + /// (but it won't resize it). + /// + /// See also the Buffer::GetWriter shorthand. + virtual Result> GetBufferWriter( + std::shared_ptr buf) = 0; + + /// \brief Allocate a (mutable) Buffer + /// + /// The buffer will be allocated in the device's memory. + virtual Result> AllocateBuffer(int64_t size) = 0; + + /// \brief Copy a Buffer to a destination MemoryManager + /// + /// See also the Buffer::Copy shorthand. + static Result> CopyBuffer( + const std::shared_ptr& source, const std::shared_ptr& to); + + /// \brief Copy a non-owned Buffer to a destination MemoryManager + /// + /// This is useful for cases where the source memory area is externally managed + /// (its lifetime not tied to the source Buffer), otherwise please use CopyBuffer(). + static Result> CopyNonOwned( + const Buffer& source, const std::shared_ptr& to); + + /// \brief Make a no-copy Buffer view in a destination MemoryManager + /// + /// See also the Buffer::View shorthand. + static Result> ViewBuffer( + const std::shared_ptr& source, const std::shared_ptr& to); + + /// \brief Create a new SyncEvent. + /// + /// This version should construct the appropriate event for the device and + /// provide the unique_ptr with the correct deleter for the event type. + /// If the device does not require or work with any synchronization, it is + /// allowed for it to return a nullptr. + virtual Result> MakeDeviceSyncEvent(); + + /// \brief Wrap an event into a SyncEvent. + /// + /// @param sync_event passed in sync_event (should be a pointer to the appropriate type) + /// @param release_sync_event destructor to free sync_event. `nullptr` may be + /// passed to indicate that no destruction/freeing is necessary + virtual Result> WrapDeviceSyncEvent( + void* sync_event, Device::SyncEvent::release_fn_t release_sync_event); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(MemoryManager); + + explicit MemoryManager(const std::shared_ptr& device) : device_(device) {} + + // Default implementations always return nullptr, should be overridden + // by subclasses that support data transfer. + // (returning nullptr means unsupported copy / view) + // In CopyBufferFrom and ViewBufferFrom, the `from` parameter is guaranteed to + // be equal to `buf->memory_manager()`. + virtual Result> CopyBufferFrom( + const std::shared_ptr& buf, const std::shared_ptr& from); + virtual Result> CopyBufferTo( + const std::shared_ptr& buf, const std::shared_ptr& to); + virtual Result> CopyNonOwnedFrom( + const Buffer& buf, const std::shared_ptr& from); + virtual Result> CopyNonOwnedTo( + const Buffer& buf, const std::shared_ptr& to); + virtual Result> ViewBufferFrom( + const std::shared_ptr& buf, const std::shared_ptr& from); + virtual Result> ViewBufferTo( + const std::shared_ptr& buf, const std::shared_ptr& to); + + std::shared_ptr device_; +}; + +// ---------------------------------------------------------------------- +// CPU backend implementation + +class ARROW_EXPORT CPUDevice : public Device { + public: + const char* type_name() const override; + std::string ToString() const override; + bool Equals(const Device&) const override; + DeviceAllocationType device_type() const override { return DeviceAllocationType::kCPU; } + + std::shared_ptr default_memory_manager() override; + + /// \brief Return the global CPUDevice instance + static std::shared_ptr Instance(); + + /// \brief Create a MemoryManager + /// + /// The returned MemoryManager will use the given MemoryPool for allocations. + static std::shared_ptr memory_manager(MemoryPool* pool); + + protected: + CPUDevice() : Device(true) {} +}; + +class ARROW_EXPORT CPUMemoryManager : public MemoryManager { + public: + Result> GetBufferReader( + std::shared_ptr buf) override; + Result> GetBufferWriter( + std::shared_ptr buf) override; + + Result> AllocateBuffer(int64_t size) override; + + /// \brief Return the MemoryPool associated with this MemoryManager. + MemoryPool* pool() const { return pool_; } + + protected: + CPUMemoryManager(const std::shared_ptr& device, MemoryPool* pool) + : MemoryManager(device), pool_(pool) {} + + static std::shared_ptr Make(const std::shared_ptr& device, + MemoryPool* pool = default_memory_pool()); + + Result> CopyBufferFrom( + const std::shared_ptr& buf, + const std::shared_ptr& from) override; + Result> CopyBufferTo( + const std::shared_ptr& buf, + const std::shared_ptr& to) override; + Result> CopyNonOwnedFrom( + const Buffer& buf, const std::shared_ptr& from) override; + Result> CopyNonOwnedTo( + const Buffer& buf, const std::shared_ptr& to) override; + Result> ViewBufferFrom( + const std::shared_ptr& buf, + const std::shared_ptr& from) override; + Result> ViewBufferTo( + const std::shared_ptr& buf, + const std::shared_ptr& to) override; + + MemoryPool* pool_; + + friend std::shared_ptr CPUDevice::memory_manager(MemoryPool* pool); + ARROW_FRIEND_EXPORT friend std::shared_ptr default_cpu_memory_manager(); +}; + +/// \brief Return the default CPU MemoryManager instance +/// +/// The returned singleton instance uses the default MemoryPool. +/// This function is a faster spelling of +/// `CPUDevice::Instance()->default_memory_manager()`. +ARROW_EXPORT +std::shared_ptr default_cpu_memory_manager(); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..712a828041c7664b823077f23b41d4610f8aa8f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h @@ -0,0 +1,272 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { + +/////////////////////////////////////////////////////////////////////// +// Helper tracking memory statistics + +class MemoryPoolStats { + public: + MemoryPoolStats() : bytes_allocated_(0), max_memory_(0) {} + + int64_t max_memory() const { return max_memory_.load(); } + + int64_t bytes_allocated() const { return bytes_allocated_.load(); } + + int64_t total_bytes_allocated() const { return total_allocated_bytes_.load(); } + + int64_t num_allocations() const { return num_allocs_.load(); } + + inline void UpdateAllocatedBytes(int64_t diff, bool is_free = false) { + auto allocated = bytes_allocated_.fetch_add(diff) + diff; + // "maximum" allocated memory is ill-defined in multi-threaded code, + // so don't try to be too rigorous here + if (diff > 0 && allocated > max_memory_) { + max_memory_ = allocated; + } + + // Reallocations might just expand/contract the allocation in place or might + // copy to a new location. We can't really know, so we just represent the + // optimistic case. + if (diff > 0) { + total_allocated_bytes_ += diff; + } + + // We count any reallocation as a allocation. + if (!is_free) { + num_allocs_ += 1; + } + } + + protected: + std::atomic bytes_allocated_ = 0; + std::atomic max_memory_ = 0; + std::atomic total_allocated_bytes_ = 0; + std::atomic num_allocs_ = 0; +}; + +} // namespace internal + +/// Base class for memory allocation on the CPU. +/// +/// Besides tracking the number of allocated bytes, the allocator also should +/// take care of the required 64-byte alignment. +class ARROW_EXPORT MemoryPool { + public: + virtual ~MemoryPool() = default; + + /// \brief EXPERIMENTAL. Create a new instance of the default MemoryPool + static std::unique_ptr CreateDefault(); + + /// Allocate a new memory region of at least size bytes. + /// + /// The allocated region shall be 64-byte aligned. + Status Allocate(int64_t size, uint8_t** out) { + return Allocate(size, kDefaultBufferAlignment, out); + } + + /// Allocate a new memory region of at least size bytes aligned to alignment. + virtual Status Allocate(int64_t size, int64_t alignment, uint8_t** out) = 0; + + /// Resize an already allocated memory section. + /// + /// As by default most default allocators on a platform don't support aligned + /// reallocation, this function can involve a copy of the underlying data. + virtual Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment, + uint8_t** ptr) = 0; + Status Reallocate(int64_t old_size, int64_t new_size, uint8_t** ptr) { + return Reallocate(old_size, new_size, kDefaultBufferAlignment, ptr); + } + + /// Free an allocated region. + /// + /// @param buffer Pointer to the start of the allocated memory region + /// @param size Allocated size located at buffer. An allocator implementation + /// may use this for tracking the amount of allocated bytes as well as for + /// faster deallocation if supported by its backend. + /// @param alignment The alignment of the allocation. Defaults to 64 bytes. + virtual void Free(uint8_t* buffer, int64_t size, int64_t alignment) = 0; + void Free(uint8_t* buffer, int64_t size) { + Free(buffer, size, kDefaultBufferAlignment); + } + + /// Return unused memory to the OS + /// + /// Only applies to allocators that hold onto unused memory. This will be + /// best effort, a memory pool may not implement this feature or may be + /// unable to fulfill the request due to fragmentation. + virtual void ReleaseUnused() {} + + /// The number of bytes that were allocated and not yet free'd through + /// this allocator. + virtual int64_t bytes_allocated() const = 0; + + /// Return peak memory allocation in this memory pool + /// + /// \return Maximum bytes allocated. If not known (or not implemented), + /// returns -1 + virtual int64_t max_memory() const; + + /// The number of bytes that were allocated. + virtual int64_t total_bytes_allocated() const = 0; + + /// The number of allocations or reallocations that were requested. + virtual int64_t num_allocations() const = 0; + + /// The name of the backend used by this MemoryPool (e.g. "system" or "jemalloc"). + virtual std::string backend_name() const = 0; + + protected: + MemoryPool() = default; +}; + +class ARROW_EXPORT LoggingMemoryPool : public MemoryPool { + public: + explicit LoggingMemoryPool(MemoryPool* pool); + ~LoggingMemoryPool() override = default; + + using MemoryPool::Allocate; + using MemoryPool::Free; + using MemoryPool::Reallocate; + + Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override; + Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment, + uint8_t** ptr) override; + void Free(uint8_t* buffer, int64_t size, int64_t alignment) override; + + int64_t bytes_allocated() const override; + + int64_t max_memory() const override; + + int64_t total_bytes_allocated() const override; + + int64_t num_allocations() const override; + + std::string backend_name() const override; + + private: + MemoryPool* pool_; +}; + +/// Derived class for memory allocation. +/// +/// Tracks the number of bytes and maximum memory allocated through its direct +/// calls. Actual allocation is delegated to MemoryPool class. +class ARROW_EXPORT ProxyMemoryPool : public MemoryPool { + public: + explicit ProxyMemoryPool(MemoryPool* pool); + ~ProxyMemoryPool() override; + + using MemoryPool::Allocate; + using MemoryPool::Free; + using MemoryPool::Reallocate; + + Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override; + Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment, + uint8_t** ptr) override; + void Free(uint8_t* buffer, int64_t size, int64_t alignment) override; + + int64_t bytes_allocated() const override; + + int64_t max_memory() const override; + + int64_t total_bytes_allocated() const override; + + int64_t num_allocations() const override; + + std::string backend_name() const override; + + private: + class ProxyMemoryPoolImpl; + std::unique_ptr impl_; +}; + +/// \brief Return a process-wide memory pool based on the system allocator. +ARROW_EXPORT MemoryPool* system_memory_pool(); + +/// \brief Return a process-wide memory pool based on jemalloc. +/// +/// May return NotImplemented if jemalloc is not available. +ARROW_EXPORT Status jemalloc_memory_pool(MemoryPool** out); + +/// \brief Set jemalloc memory page purging behavior for future-created arenas +/// to the indicated number of milliseconds. See dirty_decay_ms and +/// muzzy_decay_ms options in jemalloc for a description of what these do. The +/// default is configured to 1000 (1 second) which releases memory more +/// aggressively to the operating system than the jemalloc default of 10 +/// seconds. If you set the value to 0, dirty / muzzy pages will be released +/// immediately rather than with a time decay, but this may reduce application +/// performance. +ARROW_EXPORT +Status jemalloc_set_decay_ms(int ms); + +/// \brief Get basic statistics from jemalloc's mallctl. +/// See the MALLCTL NAMESPACE section in jemalloc project documentation for +/// available stats. +ARROW_EXPORT +Result jemalloc_get_stat(const char* name); + +/// \brief Reset the counter for peak bytes allocated in the calling thread to zero. +/// This affects subsequent calls to thread.peak.read, but not the values returned by +/// thread.allocated or thread.deallocated. +ARROW_EXPORT +Status jemalloc_peak_reset(); + +/// \brief Print summary statistics in human-readable form to stderr. +/// See malloc_stats_print documentation in jemalloc project documentation for +/// available opt flags. +ARROW_EXPORT +Status jemalloc_stats_print(const char* opts = ""); + +/// \brief Print summary statistics in human-readable form using a callback +/// See malloc_stats_print documentation in jemalloc project documentation for +/// available opt flags. +ARROW_EXPORT +Status jemalloc_stats_print(std::function write_cb, + const char* opts = ""); + +/// \brief Get summary statistics in human-readable form. +/// See malloc_stats_print documentation in jemalloc project documentation for +/// available opt flags. +ARROW_EXPORT +Result jemalloc_stats_string(const char* opts = ""); + +/// \brief Return a process-wide memory pool based on mimalloc. +/// +/// May return NotImplemented if mimalloc is not available. +ARROW_EXPORT Status mimalloc_memory_pool(MemoryPool** out); + +/// \brief Return the names of the backends supported by this Arrow build. +ARROW_EXPORT std::vector SupportedMemoryBackendNames(); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a66fc3fb5629db2350de3139dd01e3a62196df1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h @@ -0,0 +1,367 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \class RecordBatch +/// \brief Collection of equal-length arrays matching a particular Schema +/// +/// A record batch is table-like data structure that is semantically a sequence +/// of fields, each a contiguous Arrow array +class ARROW_EXPORT RecordBatch { + public: + virtual ~RecordBatch() = default; + + /// \param[in] schema The record batch schema + /// \param[in] num_rows length of fields in the record batch. Each array + /// should have the same length as num_rows + /// \param[in] columns the record batch fields as vector of arrays + static std::shared_ptr Make(std::shared_ptr schema, + int64_t num_rows, + std::vector> columns); + + /// \brief Construct record batch from vector of internal data structures + /// \since 0.5.0 + /// + /// This class is intended for internal use, or advanced users. + /// + /// \param schema the record batch schema + /// \param num_rows the number of semantic rows in the record batch. This + /// should be equal to the length of each field + /// \param columns the data for the batch's columns + static std::shared_ptr Make( + std::shared_ptr schema, int64_t num_rows, + std::vector> columns); + + /// \brief Create an empty RecordBatch of a given schema + /// + /// The output RecordBatch will be created with DataTypes from + /// the given schema. + /// + /// \param[in] schema the schema of the empty RecordBatch + /// \param[in] pool the memory pool to allocate memory from + /// \return the resulting RecordBatch + static Result> MakeEmpty( + std::shared_ptr schema, MemoryPool* pool = default_memory_pool()); + + /// \brief Convert record batch to struct array + /// + /// Create a struct array whose child arrays are the record batch's columns. + /// Note that the record batch's top-level field metadata cannot be reflected + /// in the resulting struct array. + Result> ToStructArray() const; + + /// \brief Construct record batch from struct array + /// + /// This constructs a record batch using the child arrays of the given + /// array, which must be a struct array. + /// + /// \param[in] array the source array, must be a StructArray + /// \param[in] pool the memory pool to allocate new validity bitmaps + /// + /// This operation will usually be zero-copy. However, if the struct array has an + /// offset or a validity bitmap then these will need to be pushed into the child arrays. + /// Pushing the offset is zero-copy but pushing the validity bitmap is not. + static Result> FromStructArray( + const std::shared_ptr& array, MemoryPool* pool = default_memory_pool()); + + /// \brief Determine if two record batches are exactly equal + /// + /// \param[in] other the RecordBatch to compare with + /// \param[in] check_metadata if true, check that Schema metadata is the same + /// \param[in] opts the options for equality comparisons + /// \return true if batches are equal + bool Equals(const RecordBatch& other, bool check_metadata = false, + const EqualOptions& opts = EqualOptions::Defaults()) const; + + /// \brief Determine if two record batches are approximately equal + /// + /// \param[in] other the RecordBatch to compare with + /// \param[in] opts the options for equality comparisons + /// \return true if batches are approximately equal + bool ApproxEquals(const RecordBatch& other, + const EqualOptions& opts = EqualOptions::Defaults()) const; + + /// \return the record batch's schema + const std::shared_ptr& schema() const { return schema_; } + + /// \brief Replace the schema with another schema with the same types, but potentially + /// different field names and/or metadata. + Result> ReplaceSchema( + std::shared_ptr schema) const; + + /// \brief Retrieve all columns at once + virtual const std::vector>& columns() const = 0; + + /// \brief Retrieve an array from the record batch + /// \param[in] i field index, does not boundscheck + /// \return an Array object + virtual std::shared_ptr column(int i) const = 0; + + /// \brief Retrieve an array from the record batch + /// \param[in] name field name + /// \return an Array or null if no field was found + std::shared_ptr GetColumnByName(const std::string& name) const; + + /// \brief Retrieve an array's internal data from the record batch + /// \param[in] i field index, does not boundscheck + /// \return an internal ArrayData object + virtual std::shared_ptr column_data(int i) const = 0; + + /// \brief Retrieve all arrays' internal data from the record batch. + virtual const ArrayDataVector& column_data() const = 0; + + /// \brief Add column to the record batch, producing a new RecordBatch + /// + /// \param[in] i field index, which will be boundschecked + /// \param[in] field field to be added + /// \param[in] column column to be added + virtual Result> AddColumn( + int i, const std::shared_ptr& field, + const std::shared_ptr& column) const = 0; + + /// \brief Add new nullable column to the record batch, producing a new + /// RecordBatch. + /// + /// For non-nullable columns, use the Field-based version of this method. + /// + /// \param[in] i field index, which will be boundschecked + /// \param[in] field_name name of field to be added + /// \param[in] column column to be added + virtual Result> AddColumn( + int i, std::string field_name, const std::shared_ptr& column) const; + + /// \brief Replace a column in the record batch, producing a new RecordBatch + /// + /// \param[in] i field index, does boundscheck + /// \param[in] field field to be replaced + /// \param[in] column column to be replaced + virtual Result> SetColumn( + int i, const std::shared_ptr& field, + const std::shared_ptr& column) const = 0; + + /// \brief Remove column from the record batch, producing a new RecordBatch + /// + /// \param[in] i field index, does boundscheck + virtual Result> RemoveColumn(int i) const = 0; + + virtual std::shared_ptr ReplaceSchemaMetadata( + const std::shared_ptr& metadata) const = 0; + + /// \brief Name in i-th column + const std::string& column_name(int i) const; + + /// \return the number of columns in the table + int num_columns() const; + + /// \return the number of rows (the corresponding length of each column) + int64_t num_rows() const { return num_rows_; } + + /// \brief Slice each of the arrays in the record batch + /// \param[in] offset the starting offset to slice, through end of batch + /// \return new record batch + virtual std::shared_ptr Slice(int64_t offset) const; + + /// \brief Slice each of the arrays in the record batch + /// \param[in] offset the starting offset to slice + /// \param[in] length the number of elements to slice from offset + /// \return new record batch + virtual std::shared_ptr Slice(int64_t offset, int64_t length) const = 0; + + /// \return PrettyPrint representation suitable for debugging + std::string ToString() const; + + /// \brief Return new record batch with specified columns + Result> SelectColumns( + const std::vector& indices) const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the record batch's schema and internal data. + /// + /// This is O(k) where k is the total number of fields and array descendents. + /// + /// \return Status + virtual Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the record batch's schema and internal data. + /// + /// This is potentially O(k*n) where n is the number of rows. + /// + /// \return Status + virtual Status ValidateFull() const; + + protected: + RecordBatch(const std::shared_ptr& schema, int64_t num_rows); + + std::shared_ptr schema_; + int64_t num_rows_; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(RecordBatch); +}; + +struct ARROW_EXPORT RecordBatchWithMetadata { + std::shared_ptr batch; + std::shared_ptr custom_metadata; +}; + +/// \brief Abstract interface for reading stream of record batches +class ARROW_EXPORT RecordBatchReader { + public: + using ValueType = std::shared_ptr; + + virtual ~RecordBatchReader(); + + /// \return the shared schema of the record batches in the stream + virtual std::shared_ptr schema() const = 0; + + /// \brief Read the next record batch in the stream. Return null for batch + /// when reaching end of stream + /// + /// \param[out] batch the next loaded batch, null at end of stream + /// \return Status + virtual Status ReadNext(std::shared_ptr* batch) = 0; + + virtual Result ReadNext() { + return Status::NotImplemented("ReadNext with custom metadata"); + } + + /// \brief Iterator interface + Result> Next() { + std::shared_ptr batch; + ARROW_RETURN_NOT_OK(ReadNext(&batch)); + return batch; + } + + /// \brief finalize reader + virtual Status Close() { return Status::OK(); } + + class RecordBatchReaderIterator { + public: + using iterator_category = std::input_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = std::shared_ptr; + using pointer = value_type const*; + using reference = value_type const&; + + RecordBatchReaderIterator() : batch_(RecordBatchEnd()), reader_(NULLPTR) {} + + explicit RecordBatchReaderIterator(RecordBatchReader* reader) + : batch_(RecordBatchEnd()), reader_(reader) { + Next(); + } + + bool operator==(const RecordBatchReaderIterator& other) const { + return batch_ == other.batch_; + } + + bool operator!=(const RecordBatchReaderIterator& other) const { + return !(*this == other); + } + + Result> operator*() { + ARROW_RETURN_NOT_OK(batch_.status()); + + return batch_; + } + + RecordBatchReaderIterator& operator++() { + Next(); + return *this; + } + + RecordBatchReaderIterator operator++(int) { + RecordBatchReaderIterator tmp(*this); + Next(); + return tmp; + } + + private: + std::shared_ptr RecordBatchEnd() { + return std::shared_ptr(NULLPTR); + } + + void Next() { + if (reader_ == NULLPTR) { + batch_ = RecordBatchEnd(); + return; + } + batch_ = reader_->Next(); + } + + Result> batch_; + RecordBatchReader* reader_; + }; + /// \brief Return an iterator to the first record batch in the stream + RecordBatchReaderIterator begin() { return RecordBatchReaderIterator(this); } + + /// \brief Return an iterator to the end of the stream + RecordBatchReaderIterator end() { return RecordBatchReaderIterator(); } + + /// \brief Consume entire stream as a vector of record batches + Result ToRecordBatches(); + + /// \brief Read all batches and concatenate as arrow::Table + Result> ToTable(); + + /// \brief Create a RecordBatchReader from a vector of RecordBatch. + /// + /// \param[in] batches the vector of RecordBatch to read from + /// \param[in] schema schema to conform to. Will be inferred from the first + /// element if not provided. + static Result> Make( + RecordBatchVector batches, std::shared_ptr schema = NULLPTR); + + /// \brief Create a RecordBatchReader from an Iterator of RecordBatch. + /// + /// \param[in] batches an iterator of RecordBatch to read from. + /// \param[in] schema schema that each record batch in iterator will conform to. + static Result> MakeFromIterator( + Iterator> batches, std::shared_ptr schema); +}; + +/// \brief Concatenate record batches +/// +/// The columns of the new batch are formed by concatenate the same columns of each input +/// batch. Concatenate multiple batches into a new batch requires that the schema must be +/// consistent. It supports merging batches without columns (only length, scenarios such +/// as count(*)). +/// +/// \param[in] batches a vector of record batches to be concatenated +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return the concatenated record batch +ARROW_EXPORT +Result> ConcatenateRecordBatches( + const RecordBatchVector& batches, MemoryPool* pool = default_memory_pool()); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/status.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/status.h new file mode 100644 index 0000000000000000000000000000000000000000..ac384fc389a491515ef2790666ea740f74b9c5c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/status.h @@ -0,0 +1,471 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// A Status encapsulates the result of an operation. It may indicate success, +// or it may indicate an error with an associated error message. +// +// Multiple threads can invoke const methods on a Status without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same Status must use +// external synchronization. + +// Adapted from Apache Kudu, TensorFlow + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/visibility.h" + +#ifdef ARROW_EXTRA_ERROR_CONTEXT + +/// \brief Return with given status if condition is met. +#define ARROW_RETURN_IF_(condition, status, expr) \ + do { \ + if (ARROW_PREDICT_FALSE(condition)) { \ + ::arrow::Status _st = (status); \ + _st.AddContextLine(__FILE__, __LINE__, expr); \ + return _st; \ + } \ + } while (0) + +#else + +#define ARROW_RETURN_IF_(condition, status, _) \ + do { \ + if (ARROW_PREDICT_FALSE(condition)) { \ + return (status); \ + } \ + } while (0) + +#endif // ARROW_EXTRA_ERROR_CONTEXT + +#define ARROW_RETURN_IF(condition, status) \ + ARROW_RETURN_IF_(condition, status, ARROW_STRINGIFY(status)) + +/// \brief Propagate any non-successful Status to the caller +#define ARROW_RETURN_NOT_OK(status) \ + do { \ + ::arrow::Status __s = ::arrow::internal::GenericToStatus(status); \ + ARROW_RETURN_IF_(!__s.ok(), __s, ARROW_STRINGIFY(status)); \ + } while (false) + +/// \brief Given `expr` and `warn_msg`; log `warn_msg` if `expr` is a non-ok status +#define ARROW_WARN_NOT_OK(expr, warn_msg) \ + do { \ + ::arrow::Status _s = (expr); \ + if (ARROW_PREDICT_FALSE(!_s.ok())) { \ + _s.Warn(warn_msg); \ + } \ + } while (false) + +#define RETURN_NOT_OK_ELSE(s, else_) \ + do { \ + ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \ + if (!_s.ok()) { \ + else_; \ + return _s; \ + } \ + } while (false) + +// This is an internal-use macro and should not be used in public headers. +#ifndef RETURN_NOT_OK +#define RETURN_NOT_OK(s) ARROW_RETURN_NOT_OK(s) +#endif + +namespace arrow { + +enum class StatusCode : char { + OK = 0, + OutOfMemory = 1, + KeyError = 2, + TypeError = 3, + Invalid = 4, + IOError = 5, + CapacityError = 6, + IndexError = 7, + Cancelled = 8, + UnknownError = 9, + NotImplemented = 10, + SerializationError = 11, + RError = 13, + // Gandiva range of errors + CodeGenError = 40, + ExpressionValidationError = 41, + ExecutionError = 42, + // Continue generic codes. + AlreadyExists = 45 +}; + +/// \brief An opaque class that allows subsystems to retain +/// additional information inside the Status. +class ARROW_EXPORT StatusDetail { + public: + virtual ~StatusDetail() = default; + /// \brief Return a unique id for the type of the StatusDetail + /// (effectively a poor man's substitute for RTTI). + virtual const char* type_id() const = 0; + /// \brief Produce a human-readable description of this status. + virtual std::string ToString() const = 0; + + bool operator==(const StatusDetail& other) const noexcept { + return std::string(type_id()) == other.type_id() && ToString() == other.ToString(); + } +}; + +/// \brief Status outcome object (success or error) +/// +/// The Status object is an object holding the outcome of an operation. +/// The outcome is represented as a StatusCode, either success +/// (StatusCode::OK) or an error (any other of the StatusCode enumeration values). +/// +/// Additionally, if an error occurred, a specific error message is generally +/// attached. +class ARROW_EXPORT [[nodiscard]] Status : public util::EqualityComparable, + public util::ToStringOstreamable { + public: + // Create a success status. + constexpr Status() noexcept : state_(NULLPTR) {} + ~Status() noexcept { + // ARROW-2400: On certain compilers, splitting off the slow path improves + // performance significantly. + if (ARROW_PREDICT_FALSE(state_ != NULL)) { + DeleteState(); + } + } + + Status(StatusCode code, const std::string& msg); + /// \brief Pluggable constructor for use by sub-systems. detail cannot be null. + Status(StatusCode code, std::string msg, std::shared_ptr detail); + + // Copy the specified status. + inline Status(const Status& s); + inline Status& operator=(const Status& s); + + // Move the specified status. + inline Status(Status&& s) noexcept; + inline Status& operator=(Status&& s) noexcept; + + inline bool Equals(const Status& s) const; + + // AND the statuses. + inline Status operator&(const Status& s) const noexcept; + inline Status operator&(Status&& s) const noexcept; + inline Status& operator&=(const Status& s) noexcept; + inline Status& operator&=(Status&& s) noexcept; + + /// Return a success status + static Status OK() { return Status(); } + + template + static Status FromArgs(StatusCode code, Args&&... args) { + return Status(code, util::StringBuilder(std::forward(args)...)); + } + + template + static Status FromDetailAndArgs(StatusCode code, std::shared_ptr detail, + Args&&... args) { + return Status(code, util::StringBuilder(std::forward(args)...), + std::move(detail)); + } + + /// Return an error status for out-of-memory conditions + template + static Status OutOfMemory(Args&&... args) { + return Status::FromArgs(StatusCode::OutOfMemory, std::forward(args)...); + } + + /// Return an error status for failed key lookups (e.g. column name in a table) + template + static Status KeyError(Args&&... args) { + return Status::FromArgs(StatusCode::KeyError, std::forward(args)...); + } + + /// Return an error status for type errors (such as mismatching data types) + template + static Status TypeError(Args&&... args) { + return Status::FromArgs(StatusCode::TypeError, std::forward(args)...); + } + + /// Return an error status for unknown errors + template + static Status UnknownError(Args&&... args) { + return Status::FromArgs(StatusCode::UnknownError, std::forward(args)...); + } + + /// Return an error status when an operation or a combination of operation and + /// data types is unimplemented + template + static Status NotImplemented(Args&&... args) { + return Status::FromArgs(StatusCode::NotImplemented, std::forward(args)...); + } + + /// Return an error status for invalid data (for example a string that fails parsing) + template + static Status Invalid(Args&&... args) { + return Status::FromArgs(StatusCode::Invalid, std::forward(args)...); + } + + /// Return an error status for cancelled operation + template + static Status Cancelled(Args&&... args) { + return Status::FromArgs(StatusCode::Cancelled, std::forward(args)...); + } + + /// Return an error status when an index is out of bounds + template + static Status IndexError(Args&&... args) { + return Status::FromArgs(StatusCode::IndexError, std::forward(args)...); + } + + /// Return an error status when a container's capacity would exceed its limits + template + static Status CapacityError(Args&&... args) { + return Status::FromArgs(StatusCode::CapacityError, std::forward(args)...); + } + + /// Return an error status when some IO-related operation failed + template + static Status IOError(Args&&... args) { + return Status::FromArgs(StatusCode::IOError, std::forward(args)...); + } + + /// Return an error status when some (de)serialization operation failed + template + static Status SerializationError(Args&&... args) { + return Status::FromArgs(StatusCode::SerializationError, std::forward(args)...); + } + + template + static Status RError(Args&&... args) { + return Status::FromArgs(StatusCode::RError, std::forward(args)...); + } + + template + static Status CodeGenError(Args&&... args) { + return Status::FromArgs(StatusCode::CodeGenError, std::forward(args)...); + } + + template + static Status ExpressionValidationError(Args&&... args) { + return Status::FromArgs(StatusCode::ExpressionValidationError, + std::forward(args)...); + } + + template + static Status ExecutionError(Args&&... args) { + return Status::FromArgs(StatusCode::ExecutionError, std::forward(args)...); + } + + template + static Status AlreadyExists(Args&&... args) { + return Status::FromArgs(StatusCode::AlreadyExists, std::forward(args)...); + } + + /// Return true iff the status indicates success. + constexpr bool ok() const { return (state_ == NULLPTR); } + + /// Return true iff the status indicates an out-of-memory error. + constexpr bool IsOutOfMemory() const { return code() == StatusCode::OutOfMemory; } + /// Return true iff the status indicates a key lookup error. + constexpr bool IsKeyError() const { return code() == StatusCode::KeyError; } + /// Return true iff the status indicates invalid data. + constexpr bool IsInvalid() const { return code() == StatusCode::Invalid; } + /// Return true iff the status indicates a cancelled operation. + constexpr bool IsCancelled() const { return code() == StatusCode::Cancelled; } + /// Return true iff the status indicates an IO-related failure. + constexpr bool IsIOError() const { return code() == StatusCode::IOError; } + /// Return true iff the status indicates a container reaching capacity limits. + constexpr bool IsCapacityError() const { return code() == StatusCode::CapacityError; } + /// Return true iff the status indicates an out of bounds index. + constexpr bool IsIndexError() const { return code() == StatusCode::IndexError; } + /// Return true iff the status indicates a type error. + constexpr bool IsTypeError() const { return code() == StatusCode::TypeError; } + /// Return true iff the status indicates an unknown error. + constexpr bool IsUnknownError() const { return code() == StatusCode::UnknownError; } + /// Return true iff the status indicates an unimplemented operation. + constexpr bool IsNotImplemented() const { return code() == StatusCode::NotImplemented; } + /// Return true iff the status indicates a (de)serialization failure + constexpr bool IsSerializationError() const { + return code() == StatusCode::SerializationError; + } + /// Return true iff the status indicates a R-originated error. + constexpr bool IsRError() const { return code() == StatusCode::RError; } + + constexpr bool IsCodeGenError() const { return code() == StatusCode::CodeGenError; } + + constexpr bool IsExpressionValidationError() const { + return code() == StatusCode::ExpressionValidationError; + } + + constexpr bool IsExecutionError() const { return code() == StatusCode::ExecutionError; } + constexpr bool IsAlreadyExists() const { return code() == StatusCode::AlreadyExists; } + + /// \brief Return a string representation of this status suitable for printing. + /// + /// The string "OK" is returned for success. + std::string ToString() const; + + /// \brief Return a string representation of this status without + /// context lines suitable for printing. + /// + /// The string "OK" is returned for success. + std::string ToStringWithoutContextLines() const; + + /// \brief Return a string representation of the status code, without the message + /// text or POSIX code information. + std::string CodeAsString() const; + static std::string CodeAsString(StatusCode); + + /// \brief Return the StatusCode value attached to this status. + constexpr StatusCode code() const { return ok() ? StatusCode::OK : state_->code; } + + /// \brief Return the specific error message attached to this status. + const std::string& message() const { + static const std::string no_message = ""; + return ok() ? no_message : state_->msg; + } + + /// \brief Return the status detail attached to this message. + const std::shared_ptr& detail() const { + static std::shared_ptr no_detail = NULLPTR; + return state_ ? state_->detail : no_detail; + } + + /// \brief Return a new Status copying the existing status, but + /// updating with the existing detail. + Status WithDetail(std::shared_ptr new_detail) const { + return Status(code(), message(), std::move(new_detail)); + } + + /// \brief Return a new Status with changed message, copying the + /// existing status code and detail. + template + Status WithMessage(Args&&... args) const { + return FromArgs(code(), std::forward(args)...).WithDetail(detail()); + } + + void Warn() const; + void Warn(const std::string& message) const; + + [[noreturn]] void Abort() const; + [[noreturn]] void Abort(const std::string& message) const; + +#ifdef ARROW_EXTRA_ERROR_CONTEXT + void AddContextLine(const char* filename, int line, const char* expr); +#endif + + private: + struct State { + StatusCode code; + std::string msg; + std::shared_ptr detail; + }; + // OK status has a `NULL` state_. Otherwise, `state_` points to + // a `State` structure containing the error code and message(s) + State* state_; + + void DeleteState() { + delete state_; + state_ = NULLPTR; + } + void CopyFrom(const Status& s); + inline void MoveFrom(Status& s); +}; + +void Status::MoveFrom(Status& s) { + delete state_; + state_ = s.state_; + s.state_ = NULLPTR; +} + +Status::Status(const Status& s) + : state_((s.state_ == NULLPTR) ? NULLPTR : new State(*s.state_)) {} + +Status& Status::operator=(const Status& s) { + // The following condition catches both aliasing (when this == &s), + // and the common case where both s and *this are ok. + if (state_ != s.state_) { + CopyFrom(s); + } + return *this; +} + +Status::Status(Status&& s) noexcept : state_(s.state_) { s.state_ = NULLPTR; } + +Status& Status::operator=(Status&& s) noexcept { + MoveFrom(s); + return *this; +} + +bool Status::Equals(const Status& s) const { + if (state_ == s.state_) { + return true; + } + + if (ok() || s.ok()) { + return false; + } + + if (detail() != s.detail()) { + if ((detail() && !s.detail()) || (!detail() && s.detail())) { + return false; + } + return *detail() == *s.detail(); + } + + return code() == s.code() && message() == s.message(); +} + +/// \cond FALSE +// (note: emits warnings on Doxygen < 1.8.15, +// see https://github.com/doxygen/doxygen/issues/6295) +Status Status::operator&(const Status& s) const noexcept { + if (ok()) { + return s; + } else { + return *this; + } +} + +Status Status::operator&(Status&& s) const noexcept { + if (ok()) { + return std::move(s); + } else { + return *this; + } +} + +Status& Status::operator&=(const Status& s) noexcept { + if (ok() && !s.ok()) { + CopyFrom(s); + } + return *this; +} + +Status& Status::operator&=(Status&& s) noexcept { + if (ok() && !s.ok()) { + MoveFrom(s); + } + return *this; +} +/// \endcond + +namespace internal { + +// Extract Status from Status or Result +// Useful for the status check macros such as RETURN_NOT_OK. +inline const Status& GenericToStatus(const Status& st) { return st; } +inline Status GenericToStatus(Status&& st) { return std::move(st); } + +} // namespace internal + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl.h new file mode 100644 index 0000000000000000000000000000000000000000..b542ee5c348683c622c500b5b3564ac5947e7840 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl.h @@ -0,0 +1,462 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/builder_nested.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/chunked_array.h" +#include "arrow/compute/api.h" +#include "arrow/status.h" +#include "arrow/table.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" + +namespace arrow { + +class Schema; + +namespace stl { + +namespace internal { + +template +struct is_optional_like : public std::false_type {}; + +template +struct is_dereferencable : public std::false_type {}; + +template +struct is_dereferencable())>> + : public std::true_type {}; + +template +struct is_optional_like< + T, typename std::enable_if< + std::is_constructible::value && is_dereferencable::value && + !std::is_array::type>::value>::type> + : public std::true_type {}; + +template +using BareTupleElement = + typename std::decay::type>::type; + +} // namespace internal + +template +using enable_if_optional_like = + typename std::enable_if::value, R>::type; + +/// Traits meta class to map standard C/C++ types to equivalent Arrow types. +template +struct ConversionTraits {}; + +/// Returns builder type for given standard C/C++ type. +template +using CBuilderType = + typename TypeTraits::ArrowType>::BuilderType; + +/// Default implementation of AppendListValues. +/// +/// This function can be specialized by user to take advantage of appending +/// contiguous ranges while appending. This default implementation will call +/// ConversionTraits::AppendRow() for each value in the range. +template +inline Status AppendListValues(CBuilderType& value_builder, + Range&& cell_range) { + for (auto const& value : cell_range) { + ARROW_RETURN_NOT_OK(ConversionTraits::AppendRow(value_builder, value)); + } + return Status::OK(); +} + +#define ARROW_STL_CONVERSION(CType_, ArrowType_) \ + template <> \ + struct ConversionTraits : public CTypeTraits { \ + static Status AppendRow(typename TypeTraits::BuilderType& builder, \ + CType_ cell) { \ + return builder.Append(cell); \ + } \ + static CType_ GetEntry(const typename TypeTraits::ArrayType& array, \ + size_t j) { \ + return array.Value(j); \ + } \ + }; \ + \ + template <> \ + inline Status AppendListValues&>( \ + typename TypeTraits::BuilderType & value_builder, \ + const std::vector& cell_range) { \ + return value_builder.AppendValues(cell_range); \ + } + +ARROW_STL_CONVERSION(bool, BooleanType) +ARROW_STL_CONVERSION(int8_t, Int8Type) +ARROW_STL_CONVERSION(int16_t, Int16Type) +ARROW_STL_CONVERSION(int32_t, Int32Type) +ARROW_STL_CONVERSION(int64_t, Int64Type) +ARROW_STL_CONVERSION(uint8_t, UInt8Type) +ARROW_STL_CONVERSION(uint16_t, UInt16Type) +ARROW_STL_CONVERSION(uint32_t, UInt32Type) +ARROW_STL_CONVERSION(uint64_t, UInt64Type) +ARROW_STL_CONVERSION(float, FloatType) +ARROW_STL_CONVERSION(double, DoubleType) + +template <> +struct ConversionTraits : public CTypeTraits { + static Status AppendRow(StringBuilder& builder, const std::string& cell) { + return builder.Append(cell); + } + static std::string GetEntry(const StringArray& array, size_t j) { + return array.GetString(j); + } +}; + +/// Append cell range elements as a single value to the list builder. +/// +/// Cell range will be added to child builder using AppendListValues() +/// if provided. AppendListValues() has a default implementation, but +/// it can be specialized by users. +template +Status AppendCellRange(ListBuilderType& builder, Range&& cell_range) { + constexpr bool is_list_builder = std::is_same::value; + constexpr bool is_large_list_builder = + std::is_same::value; + static_assert( + is_list_builder || is_large_list_builder, + "Builder type must be either ListBuilder or LargeListBuilder for appending " + "multiple rows."); + + using ChildBuilderType = CBuilderType; + ARROW_RETURN_NOT_OK(builder.Append()); + auto& value_builder = + ::arrow::internal::checked_cast(*builder.value_builder()); + + // XXX: Remove appended value before returning if status isn't OK? + return AppendListValues(value_builder, std::forward(cell_range)); +} + +template +struct ConversionTraits> + : public CTypeTraits> { + static Status AppendRow(ListBuilder& builder, const std::vector& cell) { + return AppendCellRange(builder, cell); + } + + static std::vector GetEntry(const ListArray& array, size_t j) { + using ElementArrayType = + typename TypeTraits::ArrowType>::ArrayType; + + const ElementArrayType& value_array = + ::arrow::internal::checked_cast(*array.values()); + + std::vector vec(array.value_length(j)); + for (int64_t i = 0; i < array.value_length(j); i++) { + vec[i] = + ConversionTraits::GetEntry(value_array, array.value_offset(j) + i); + } + return vec; + } +}; + +template +struct ConversionTraits> + : public CTypeTraits())>::type> { + using OptionalInnerType = + typename std::decay())>::type; + using typename CTypeTraits::ArrowType; + using CTypeTraits::type_singleton; + + static Status AppendRow(typename TypeTraits::BuilderType& builder, + const Optional& cell) { + if (cell) { + return ConversionTraits::AppendRow(builder, *cell); + } else { + return builder.AppendNull(); + } + } +}; + +/// Build an arrow::Schema based upon the types defined in a std::tuple-like structure. +/// +/// While the type information is available at compile-time, we still need to add the +/// column names at runtime, thus these methods are not constexpr. +template ::value> +struct SchemaFromTuple { + using Element = internal::BareTupleElement; + + // Implementations that take a vector-like object for the column names. + + /// Recursively build a vector of arrow::Field from the defined types. + /// + /// In most cases MakeSchema is the better entrypoint for the Schema creation. + static std::vector> MakeSchemaRecursion( + const std::vector& names) { + std::vector> ret = + SchemaFromTuple::MakeSchemaRecursion(names); + auto type = ConversionTraits::type_singleton(); + ret.push_back(field(names[N - 1], type, internal::is_optional_like::value)); + return ret; + } + + /// Build a Schema from the types of the tuple-like structure passed in as template + /// parameter assign the column names at runtime. + /// + /// An example usage of this API can look like the following: + /// + /// \code{.cpp} + /// using TupleType = std::tuple>; + /// std::shared_ptr schema = + /// SchemaFromTuple::MakeSchema({"int_column", "list_of_strings_column"}); + /// \endcode + static std::shared_ptr MakeSchema(const std::vector& names) { + return std::make_shared(MakeSchemaRecursion(names)); + } + + // Implementations that take a tuple-like object for the column names. + + /// Recursively build a vector of arrow::Field from the defined types. + /// + /// In most cases MakeSchema is the better entrypoint for the Schema creation. + template + static std::vector> MakeSchemaRecursionT( + const NamesTuple& names) { + using std::get; + + std::vector> ret = + SchemaFromTuple::MakeSchemaRecursionT(names); + std::shared_ptr type = ConversionTraits::type_singleton(); + ret.push_back( + field(get(names), type, internal::is_optional_like::value)); + return ret; + } + + /// Build a Schema from the types of the tuple-like structure passed in as template + /// parameter assign the column names at runtime. + /// + /// An example usage of this API can look like the following: + /// + /// \code{.cpp} + /// using TupleType = std::tuple>; + /// std::shared_ptr schema = + /// SchemaFromTuple::MakeSchema({"int_column", "list_of_strings_column"}); + /// \endcode + template + static std::shared_ptr MakeSchema(const NamesTuple& names) { + return std::make_shared(MakeSchemaRecursionT(names)); + } +}; + +template +struct SchemaFromTuple { + static std::vector> MakeSchemaRecursion( + const std::vector& names) { + std::vector> ret; + ret.reserve(names.size()); + return ret; + } + + template + static std::vector> MakeSchemaRecursionT( + const NamesTuple& names) { + std::vector> ret; + ret.reserve(std::tuple_size::value); + return ret; + } +}; + +namespace internal { + +template ::value> +struct CreateBuildersRecursive { + static Status Make(MemoryPool* pool, + std::vector>* builders) { + using Element = BareTupleElement; + std::shared_ptr type = ConversionTraits::type_singleton(); + ARROW_RETURN_NOT_OK(MakeBuilder(pool, type, &builders->at(N - 1))); + + return CreateBuildersRecursive::Make(pool, builders); + } +}; + +template +struct CreateBuildersRecursive { + static Status Make(MemoryPool*, std::vector>*) { + return Status::OK(); + } +}; + +template ::value> +struct RowIterator { + static Status Append(const std::vector>& builders, + const Tuple& row) { + using std::get; + using Element = BareTupleElement; + using BuilderType = + typename TypeTraits::ArrowType>::BuilderType; + + BuilderType& builder = + ::arrow::internal::checked_cast(*builders[N - 1]); + ARROW_RETURN_NOT_OK(ConversionTraits::AppendRow(builder, get(row))); + + return RowIterator::Append(builders, row); + } +}; + +template +struct RowIterator { + static Status Append(const std::vector>& builders, + const Tuple& row) { + return Status::OK(); + } +}; + +template ::value> +struct EnsureColumnTypes { + static Status Cast(const Table& table, std::shared_ptr
* table_owner, + const compute::CastOptions& cast_options, compute::ExecContext* ctx, + std::reference_wrapper* result) { + using Element = BareTupleElement; + std::shared_ptr expected_type = ConversionTraits::type_singleton(); + + if (!table.schema()->field(N - 1)->type()->Equals(*expected_type)) { + ARROW_ASSIGN_OR_RAISE( + Datum casted, + compute::Cast(table.column(N - 1), expected_type, cast_options, ctx)); + auto new_field = table.schema()->field(N - 1)->WithType(expected_type); + ARROW_ASSIGN_OR_RAISE(*table_owner, + table.SetColumn(N - 1, new_field, casted.chunked_array())); + *result = **table_owner; + } + + return EnsureColumnTypes::Cast(result->get(), table_owner, cast_options, + ctx, result); + } +}; + +template +struct EnsureColumnTypes { + static Status Cast(const Table& table, std::shared_ptr
* table_owner, + const compute::CastOptions& cast_options, compute::ExecContext* ctx, + std::reference_wrapper* result) { + return Status::OK(); + } +}; + +template ::value> +struct TupleSetter { + static void Fill(const Table& table, Range* rows) { + using std::get; + using Element = typename std::tuple_element::type; + using ArrayType = + typename TypeTraits::ArrowType>::ArrayType; + + auto iter = rows->begin(); + const ChunkedArray& chunked_array = *table.column(N - 1); + for (int i = 0; i < chunked_array.num_chunks(); i++) { + const ArrayType& array = + ::arrow::internal::checked_cast(*chunked_array.chunk(i)); + for (int64_t j = 0; j < array.length(); j++) { + get(*iter++) = ConversionTraits::GetEntry(array, j); + } + } + + return TupleSetter::Fill(table, rows); + } +}; + +template +struct TupleSetter { + static void Fill(const Table& table, Range* rows) {} +}; + +} // namespace internal + +template +Status TableFromTupleRange(MemoryPool* pool, Range&& rows, + const std::vector& names, + std::shared_ptr
* table) { + using row_type = typename std::iterator_traits::value_type; + constexpr std::size_t n_columns = std::tuple_size::value; + + std::shared_ptr schema = SchemaFromTuple::MakeSchema(names); + + std::vector> builders(n_columns); + ARROW_RETURN_NOT_OK(internal::CreateBuildersRecursive::Make(pool, &builders)); + + for (auto const& row : rows) { + ARROW_RETURN_NOT_OK(internal::RowIterator::Append(builders, row)); + } + + std::vector> arrays; + for (auto const& builder : builders) { + std::shared_ptr array; + ARROW_RETURN_NOT_OK(builder->Finish(&array)); + arrays.emplace_back(array); + } + + *table = Table::Make(std::move(schema), std::move(arrays)); + + return Status::OK(); +} + +template +Status TupleRangeFromTable(const Table& table, const compute::CastOptions& cast_options, + compute::ExecContext* ctx, Range* rows) { + using row_type = typename std::decay::type; + constexpr std::size_t n_columns = std::tuple_size::value; + + if (table.schema()->num_fields() != n_columns) { + return Status::Invalid( + "Number of columns in the table does not match the width of the target: ", + table.schema()->num_fields(), " != ", n_columns); + } + + if (std::size(*rows) != static_cast(table.num_rows())) { + return Status::Invalid( + "Number of rows in the table does not match the size of the target: ", + table.num_rows(), " != ", std::size(*rows)); + } + + // Check that all columns have the correct type, otherwise cast them. + std::shared_ptr
table_owner; + std::reference_wrapper current_table(table); + + ARROW_RETURN_NOT_OK(internal::EnsureColumnTypes::Cast( + table, &table_owner, cast_options, ctx, ¤t_table)); + + internal::TupleSetter::Fill(current_table.get(), rows); + + return Status::OK(); +} + +} // namespace stl +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl_allocator.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..a1f4ae9feb82b2bc4d9b067822fb7a5f07454d5d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl_allocator.h @@ -0,0 +1,164 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace stl { + +/// \brief A STL allocator delegating allocations to a Arrow MemoryPool +template +class allocator { + public: + using value_type = T; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + template + struct rebind { + using other = allocator; + }; + + /// \brief Construct an allocator from the default MemoryPool + allocator() noexcept : pool_(default_memory_pool()) {} + /// \brief Construct an allocator from the given MemoryPool + explicit allocator(MemoryPool* pool) noexcept : pool_(pool) {} + + template + allocator(const allocator& rhs) noexcept : pool_(rhs.pool()) {} + + ~allocator() { pool_ = NULLPTR; } + + pointer address(reference r) const noexcept { return std::addressof(r); } + + const_pointer address(const_reference r) const noexcept { return std::addressof(r); } + + pointer allocate(size_type n, const void* /*hint*/ = NULLPTR) { + uint8_t* data; + Status s = pool_->Allocate(n * sizeof(T), &data); + if (!s.ok()) throw std::bad_alloc(); + return reinterpret_cast(data); + } + + void deallocate(pointer p, size_type n) { + pool_->Free(reinterpret_cast(p), n * sizeof(T)); + } + + size_type size_max() const noexcept { return size_type(-1) / sizeof(T); } + + template + void construct(U* p, Args&&... args) { + new (reinterpret_cast(p)) U(std::forward(args)...); + } + + template + void destroy(U* p) { + p->~U(); + } + + MemoryPool* pool() const noexcept { return pool_; } + + private: + MemoryPool* pool_; +}; + +/// \brief A MemoryPool implementation delegating allocations to a STL allocator +/// +/// Note that STL allocators don't provide a resizing operation, and therefore +/// any buffer resizes will do a full reallocation and copy. +template > +class STLMemoryPool : public MemoryPool { + public: + /// \brief Construct a memory pool from the given allocator + explicit STLMemoryPool(const Allocator& alloc) : alloc_(alloc) {} + + using MemoryPool::Allocate; + using MemoryPool::Free; + using MemoryPool::Reallocate; + + Status Allocate(int64_t size, int64_t /*alignment*/, uint8_t** out) override { + try { + *out = alloc_.allocate(size); + } catch (std::bad_alloc& e) { + return Status::OutOfMemory(e.what()); + } + stats_.UpdateAllocatedBytes(size); + return Status::OK(); + } + + Status Reallocate(int64_t old_size, int64_t new_size, int64_t /*alignment*/, + uint8_t** ptr) override { + uint8_t* old_ptr = *ptr; + try { + *ptr = alloc_.allocate(new_size); + } catch (std::bad_alloc& e) { + return Status::OutOfMemory(e.what()); + } + memcpy(*ptr, old_ptr, std::min(old_size, new_size)); + alloc_.deallocate(old_ptr, old_size); + stats_.UpdateAllocatedBytes(new_size - old_size); + return Status::OK(); + } + + void Free(uint8_t* buffer, int64_t size, int64_t /*alignment*/) override { + alloc_.deallocate(buffer, size); + stats_.UpdateAllocatedBytes(-size, /*is_free=*/true); + } + + int64_t bytes_allocated() const override { return stats_.bytes_allocated(); } + + int64_t max_memory() const override { return stats_.max_memory(); } + + int64_t total_bytes_allocated() const override { + return stats_.total_bytes_allocated(); + } + + int64_t num_allocations() const override { return stats_.num_allocations(); } + + std::string backend_name() const override { return "stl"; } + + private: + Allocator alloc_; + arrow::internal::MemoryPoolStats stats_; +}; + +template +bool operator==(const allocator& lhs, const allocator& rhs) noexcept { + return lhs.pool() == rhs.pool(); +} + +template +bool operator!=(const allocator& lhs, const allocator& rhs) noexcept { + return !(lhs == rhs); +} + +} // namespace stl +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl_iterator.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5f2acfb071b299db4ac46f5f69a1db31d60ccac3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/stl_iterator.h @@ -0,0 +1,304 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/chunked_array.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace stl { + +namespace detail { + +template +struct DefaultValueAccessor { + using ValueType = decltype(std::declval().GetView(0)); + + ValueType operator()(const ArrayType& array, int64_t index) { + return array.GetView(index); + } +}; + +} // namespace detail + +template > +class ArrayIterator { + public: + using value_type = std::optional; + using difference_type = int64_t; + using pointer = value_type*; + using reference = value_type&; + using iterator_category = std::random_access_iterator_tag; + + // Some algorithms need to default-construct an iterator + ArrayIterator() : array_(NULLPTR), index_(0) {} + + explicit ArrayIterator(const ArrayType& array, int64_t index = 0) + : array_(&array), index_(index) {} + + // Value access + value_type operator*() const { + assert(array_); + return array_->IsNull(index_) ? value_type{} : array_->GetView(index_); + } + + value_type operator[](difference_type n) const { + assert(array_); + return array_->IsNull(index_ + n) ? value_type{} : array_->GetView(index_ + n); + } + + int64_t index() const { return index_; } + + // Forward / backward + ArrayIterator& operator++() { + ++index_; + return *this; + } + ArrayIterator& operator--() { + --index_; + return *this; + } + ArrayIterator operator++(int) { + ArrayIterator tmp(*this); + ++index_; + return tmp; + } + ArrayIterator operator--(int) { + ArrayIterator tmp(*this); + --index_; + return tmp; + } + + // Arithmetic + difference_type operator-(const ArrayIterator& other) const { + return index_ - other.index_; + } + ArrayIterator operator+(difference_type n) const { + return ArrayIterator(*array_, index_ + n); + } + ArrayIterator operator-(difference_type n) const { + return ArrayIterator(*array_, index_ - n); + } + friend inline ArrayIterator operator+(difference_type diff, + const ArrayIterator& other) { + return ArrayIterator(*other.array_, diff + other.index_); + } + friend inline ArrayIterator operator-(difference_type diff, + const ArrayIterator& other) { + return ArrayIterator(*other.array_, diff - other.index_); + } + ArrayIterator& operator+=(difference_type n) { + index_ += n; + return *this; + } + ArrayIterator& operator-=(difference_type n) { + index_ -= n; + return *this; + } + + // Comparisons + bool operator==(const ArrayIterator& other) const { return index_ == other.index_; } + bool operator!=(const ArrayIterator& other) const { return index_ != other.index_; } + bool operator<(const ArrayIterator& other) const { return index_ < other.index_; } + bool operator>(const ArrayIterator& other) const { return index_ > other.index_; } + bool operator<=(const ArrayIterator& other) const { return index_ <= other.index_; } + bool operator>=(const ArrayIterator& other) const { return index_ >= other.index_; } + + private: + const ArrayType* array_; + int64_t index_; +}; + +template > +class ChunkedArrayIterator { + public: + using value_type = std::optional; + using difference_type = int64_t; + using pointer = value_type*; + using reference = value_type&; + using iterator_category = std::random_access_iterator_tag; + + // Some algorithms need to default-construct an iterator + ChunkedArrayIterator() noexcept : chunked_array_(NULLPTR), index_(0) {} + + explicit ChunkedArrayIterator(const ChunkedArray& chunked_array, + int64_t index = 0) noexcept + : chunked_array_(&chunked_array), index_(index) {} + + // Value access + value_type operator*() const { + auto chunk_location = GetChunkLocation(index_); + ArrayIterator target_iterator{ + arrow::internal::checked_cast( + *chunked_array_->chunk(static_cast(chunk_location.chunk_index)))}; + return target_iterator[chunk_location.index_in_chunk]; + } + + value_type operator[](difference_type n) const { return *(*this + n); } + + int64_t index() const { return index_; } + + // Forward / backward + ChunkedArrayIterator& operator++() { + (*this) += 1; + return *this; + } + ChunkedArrayIterator& operator--() { + (*this) -= 1; + return *this; + } + + ChunkedArrayIterator operator++(int) { + ChunkedArrayIterator tmp(*this); + ++*this; + return tmp; + } + ChunkedArrayIterator operator--(int) { + ChunkedArrayIterator tmp(*this); + --*this; + return tmp; + } + + // Arithmetic + difference_type operator-(const ChunkedArrayIterator& other) const { + return index_ - other.index_; + } + ChunkedArrayIterator operator+(difference_type n) const { + assert(chunked_array_); + return ChunkedArrayIterator(*chunked_array_, index_ + n); + } + ChunkedArrayIterator operator-(difference_type n) const { + assert(chunked_array_); + return ChunkedArrayIterator(*chunked_array_, index_ - n); + } + friend inline ChunkedArrayIterator operator+(difference_type diff, + const ChunkedArrayIterator& other) { + assert(other.chunked_array_); + return ChunkedArrayIterator(*other.chunked_array_, diff + other.index_); + } + friend inline ChunkedArrayIterator operator-(difference_type diff, + const ChunkedArrayIterator& other) { + assert(other.chunked_array_); + return ChunkedArrayIterator(*other.chunked_array_, diff - other.index_); + } + ChunkedArrayIterator& operator+=(difference_type n) { + index_ += n; + return *this; + } + ChunkedArrayIterator& operator-=(difference_type n) { + (*this) += -n; + return *this; + } + + // Comparisons + bool operator==(const ChunkedArrayIterator& other) const { + return index_ == other.index_; + } + bool operator!=(const ChunkedArrayIterator& other) const { + return index_ != other.index_; + } + bool operator<(const ChunkedArrayIterator& other) const { + return index_ < other.index_; + } + bool operator>(const ChunkedArrayIterator& other) const { + return index_ > other.index_; + } + bool operator<=(const ChunkedArrayIterator& other) const { + return index_ <= other.index_; + } + bool operator>=(const ChunkedArrayIterator& other) const { + return index_ >= other.index_; + } + + private: + arrow::internal::ChunkLocation GetChunkLocation(int64_t index) const { + assert(chunked_array_); + return chunked_array_->chunk_resolver_.Resolve(index); + } + + const ChunkedArray* chunked_array_; + int64_t index_; +}; + +/// Return an iterator to the beginning of the chunked array +template ::ArrayType> +ChunkedArrayIterator Begin(const ChunkedArray& chunked_array) { + return ChunkedArrayIterator(chunked_array); +} + +/// Return an iterator to the end of the chunked array +template ::ArrayType> +ChunkedArrayIterator End(const ChunkedArray& chunked_array) { + return ChunkedArrayIterator(chunked_array, chunked_array.length()); +} + +template +struct ChunkedArrayRange { + const ChunkedArray* chunked_array; + + ChunkedArrayIterator begin() { + return stl::ChunkedArrayIterator(*chunked_array); + } + ChunkedArrayIterator end() { + return stl::ChunkedArrayIterator(*chunked_array, chunked_array->length()); + } +}; + +/// Return an iterable range over the chunked array +template ::ArrayType> +ChunkedArrayRange Iterate(const ChunkedArray& chunked_array) { + return stl::ChunkedArrayRange{&chunked_array}; +} + +} // namespace stl +} // namespace arrow + +namespace std { + +template +struct iterator_traits<::arrow::stl::ArrayIterator> { + using IteratorType = ::arrow::stl::ArrayIterator; + using difference_type = typename IteratorType::difference_type; + using value_type = typename IteratorType::value_type; + using pointer = typename IteratorType::pointer; + using reference = typename IteratorType::reference; + using iterator_category = typename IteratorType::iterator_category; +}; + +template +struct iterator_traits<::arrow::stl::ChunkedArrayIterator> { + using IteratorType = ::arrow::stl::ChunkedArrayIterator; + using difference_type = typename IteratorType::difference_type; + using value_type = typename IteratorType::value_type; + using pointer = typename IteratorType::pointer; + using reference = typename IteratorType::reference; + using iterator_category = typename IteratorType::iterator_category; +}; + +} // namespace std diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/table.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/table.h new file mode 100644 index 0000000000000000000000000000000000000000..551880f2375860dcbf3709ca69022eb59e863652 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/table.h @@ -0,0 +1,367 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/chunked_array.h" // IWYU pragma: keep +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class KeyValueMetadata; +class MemoryPool; + +/// \class Table +/// \brief Logical table as sequence of chunked arrays +class ARROW_EXPORT Table { + public: + virtual ~Table() = default; + + /// \brief Construct a Table from schema and columns + /// + /// If columns is zero-length, the table's number of rows is zero + /// + /// \param[in] schema The table schema (column types) + /// \param[in] columns The table's columns as chunked arrays + /// \param[in] num_rows number of rows in table, -1 (default) to infer from columns + static std::shared_ptr
Make(std::shared_ptr schema, + std::vector> columns, + int64_t num_rows = -1); + + /// \brief Construct a Table from schema and arrays + /// + /// \param[in] schema The table schema (column types) + /// \param[in] arrays The table's columns as arrays + /// \param[in] num_rows number of rows in table, -1 (default) to infer from columns + static std::shared_ptr
Make(std::shared_ptr schema, + const std::vector>& arrays, + int64_t num_rows = -1); + + /// \brief Create an empty Table of a given schema + /// + /// The output Table will be created with a single empty chunk per column. + /// + /// \param[in] schema the schema of the empty Table + /// \param[in] pool the memory pool to allocate memory from + /// \return the resulting Table + static Result> MakeEmpty( + std::shared_ptr schema, MemoryPool* pool = default_memory_pool()); + + /// \brief Construct a Table from a RecordBatchReader. + /// + /// \param[in] reader the arrow::RecordBatchReader that produces batches + static Result> FromRecordBatchReader(RecordBatchReader* reader); + + /// \brief Construct a Table from RecordBatches, using schema supplied by the first + /// RecordBatch. + /// + /// \param[in] batches a std::vector of record batches + static Result> FromRecordBatches( + const std::vector>& batches); + + /// \brief Construct a Table from RecordBatches, using supplied schema. There may be + /// zero record batches + /// + /// \param[in] schema the arrow::Schema for each batch + /// \param[in] batches a std::vector of record batches + static Result> FromRecordBatches( + std::shared_ptr schema, + const std::vector>& batches); + + /// \brief Construct a Table from a chunked StructArray. One column will be produced + /// for each field of the StructArray. + /// + /// \param[in] array a chunked StructArray + static Result> FromChunkedStructArray( + const std::shared_ptr& array); + + /// \brief Return the table schema + const std::shared_ptr& schema() const { return schema_; } + + /// \brief Return a column by index + virtual std::shared_ptr column(int i) const = 0; + + /// \brief Return vector of all columns for table + virtual const std::vector>& columns() const = 0; + + /// Return a column's field by index + std::shared_ptr field(int i) const { return schema_->field(i); } + + /// \brief Return vector of all fields for table + std::vector> fields() const; + + /// \brief Construct a zero-copy slice of the table with the + /// indicated offset and length + /// + /// \param[in] offset the index of the first row in the constructed + /// slice + /// \param[in] length the number of rows of the slice. If there are not enough + /// rows in the table, the length will be adjusted accordingly + /// + /// \return a new object wrapped in std::shared_ptr
+ virtual std::shared_ptr
Slice(int64_t offset, int64_t length) const = 0; + + /// \brief Slice from first row at offset until end of the table + std::shared_ptr
Slice(int64_t offset) const { return Slice(offset, num_rows_); } + + /// \brief Return a column by name + /// \param[in] name field name + /// \return an Array or null if no field was found + std::shared_ptr GetColumnByName(const std::string& name) const { + auto i = schema_->GetFieldIndex(name); + return i == -1 ? NULLPTR : column(i); + } + + /// \brief Remove column from the table, producing a new Table + virtual Result> RemoveColumn(int i) const = 0; + + /// \brief Add column to the table, producing a new Table + virtual Result> AddColumn( + int i, std::shared_ptr field_arg, + std::shared_ptr column) const = 0; + + /// \brief Replace a column in the table, producing a new Table + virtual Result> SetColumn( + int i, std::shared_ptr field_arg, + std::shared_ptr column) const = 0; + + /// \brief Return names of all columns + std::vector ColumnNames() const; + + /// \brief Rename columns with provided names + Result> RenameColumns( + const std::vector& names) const; + + /// \brief Return new table with specified columns + Result> SelectColumns(const std::vector& indices) const; + + /// \brief Replace schema key-value metadata with new metadata + /// \since 0.5.0 + /// + /// \param[in] metadata new KeyValueMetadata + /// \return new Table + virtual std::shared_ptr
ReplaceSchemaMetadata( + const std::shared_ptr& metadata) const = 0; + + /// \brief Flatten the table, producing a new Table. Any column with a + /// struct type will be flattened into multiple columns + /// + /// \param[in] pool The pool for buffer allocations, if any + virtual Result> Flatten( + MemoryPool* pool = default_memory_pool()) const = 0; + + /// \return PrettyPrint representation suitable for debugging + std::string ToString() const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the table's schema and internal data. + /// + /// This is O(k*m) where k is the total number of field descendents, + /// and m is the number of chunks. + /// + /// \return Status + virtual Status Validate() const = 0; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the table's schema and internal data. + /// + /// This is O(k*n) where k is the total number of field descendents, + /// and n is the number of rows. + /// + /// \return Status + virtual Status ValidateFull() const = 0; + + /// \brief Return the number of columns in the table + int num_columns() const { return schema_->num_fields(); } + + /// \brief Return the number of rows (equal to each column's logical length) + int64_t num_rows() const { return num_rows_; } + + /// \brief Determine if tables are equal + /// + /// Two tables can be equal only if they have equal schemas. + /// However, they may be equal even if they have different chunkings. + bool Equals(const Table& other, bool check_metadata = false) const; + + /// \brief Make a new table by combining the chunks this table has. + /// + /// All the underlying chunks in the ChunkedArray of each column are + /// concatenated into zero or one chunk. + /// + /// \param[in] pool The pool for buffer allocations + Result> CombineChunks( + MemoryPool* pool = default_memory_pool()) const; + + /// \brief Make a new record batch by combining the chunks this table has. + /// + /// All the underlying chunks in the ChunkedArray of each column are + /// concatenated into a single chunk. + /// + /// \param[in] pool The pool for buffer allocations + Result> CombineChunksToBatch( + MemoryPool* pool = default_memory_pool()) const; + + protected: + Table(); + + std::shared_ptr schema_; + int64_t num_rows_; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(Table); +}; + +/// \brief Compute a stream of record batches from a (possibly chunked) Table +/// +/// The conversion is zero-copy: each record batch is a view over a slice +/// of the table's columns. +class ARROW_EXPORT TableBatchReader : public RecordBatchReader { + public: + /// \brief Construct a TableBatchReader for the given table + explicit TableBatchReader(const Table& table); + explicit TableBatchReader(std::shared_ptr
table); + + std::shared_ptr schema() const override; + + Status ReadNext(std::shared_ptr* out) override; + + /// \brief Set the desired maximum chunk size of record batches + /// + /// The actual chunk size of each record batch may be smaller, depending + /// on actual chunking characteristics of each table column. + void set_chunksize(int64_t chunksize); + + private: + std::shared_ptr
owned_table_; + const Table& table_; + std::vector column_data_; + std::vector chunk_numbers_; + std::vector chunk_offsets_; + int64_t absolute_row_position_; + int64_t max_chunksize_; +}; + +/// \defgroup concat-tables ConcatenateTables function. +/// +/// ConcatenateTables function. +/// @{ + +/// \brief Controls the behavior of ConcatenateTables(). +struct ARROW_EXPORT ConcatenateTablesOptions { + /// If true, the schemas of the tables will be first unified with fields of + /// the same name being merged, according to `field_merge_options`, then each + /// table will be promoted to the unified schema before being concatenated. + /// Otherwise, all tables should have the same schema. Each column in the output table + /// is the result of concatenating the corresponding columns in all input tables. + bool unify_schemas = false; + + /// options to control how fields are merged when unifying schemas + /// + /// This field will be ignored if unify_schemas is false + Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults(); + + static ConcatenateTablesOptions Defaults() { return {}; } +}; + +/// \brief Construct a new table from multiple input tables. +/// +/// The new table is assembled from existing column chunks without copying, +/// if schemas are identical. If schemas do not match exactly and +/// unify_schemas is enabled in options (off by default), an attempt is +/// made to unify them, and then column chunks are converted to their +/// respective unified datatype, which will probably incur a copy. +/// :func:`arrow::PromoteTableToSchema` is used to unify schemas. +/// +/// Tables are concatenated in order they are provided in and the order of +/// rows within tables will be preserved. +/// +/// \param[in] tables a std::vector of Tables to be concatenated +/// \param[in] options specify how to unify schema of input tables +/// \param[in] memory_pool MemoryPool to be used if null-filled arrays need to +/// be created or if existing column chunks need to endure type conversion +/// \return new Table + +ARROW_EXPORT +Result> ConcatenateTables( + const std::vector>& tables, + ConcatenateTablesOptions options = ConcatenateTablesOptions::Defaults(), + MemoryPool* memory_pool = default_memory_pool()); + +namespace compute { +class CastOptions; +} + +/// \brief Promotes a table to conform to the given schema. +/// +/// If a field in the schema does not have a corresponding column in +/// the table, a column of nulls will be added to the resulting table. +/// If the corresponding column is of type Null, it will be promoted +/// to the type specified by schema, with null values filled. The +/// column will be casted to the type specified by the schema. +/// +/// Returns an error: +/// - if the corresponding column's type is not compatible with the +/// schema. +/// - if there is a column in the table that does not exist in the schema. +/// - if the cast fails or casting would be required but is not available. +/// +/// \param[in] table the input Table +/// \param[in] schema the target schema to promote to +/// \param[in] pool The memory pool to be used if null-filled arrays need to +/// be created. +ARROW_EXPORT +Result> PromoteTableToSchema( + const std::shared_ptr
& table, const std::shared_ptr& schema, + MemoryPool* pool = default_memory_pool()); + +/// \brief Promotes a table to conform to the given schema. +/// +/// If a field in the schema does not have a corresponding column in +/// the table, a column of nulls will be added to the resulting table. +/// If the corresponding column is of type Null, it will be promoted +/// to the type specified by schema, with null values filled. The column +/// will be casted to the type specified by the schema. +/// +/// Returns an error: +/// - if the corresponding column's type is not compatible with the +/// schema. +/// - if there is a column in the table that does not exist in the schema. +/// - if the cast fails or casting would be required but is not available. +/// +/// \param[in] table the input Table +/// \param[in] schema the target schema to promote to +/// \param[in] options The cast options to allow promotion of types +/// \param[in] pool The memory pool to be used if null-filled arrays need to +/// be created. +ARROW_EXPORT +Result> PromoteTableToSchema( + const std::shared_ptr
& table, const std::shared_ptr& schema, + const compute::CastOptions& options, MemoryPool* pool = default_memory_pool()); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/tensor.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..ff6f3735f9193e1e27da6ca37cc39dc04a1d0e41 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/tensor.h @@ -0,0 +1,246 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +static inline bool is_tensor_supported(Type::type type_id) { + switch (type_id) { + case Type::UINT8: + case Type::INT8: + case Type::UINT16: + case Type::INT16: + case Type::UINT32: + case Type::INT32: + case Type::UINT64: + case Type::INT64: + case Type::HALF_FLOAT: + case Type::FLOAT: + case Type::DOUBLE: + return true; + default: + break; + } + return false; +} + +namespace internal { + +ARROW_EXPORT +Status ComputeRowMajorStrides(const FixedWidthType& type, + const std::vector& shape, + std::vector* strides); + +ARROW_EXPORT +Status ComputeColumnMajorStrides(const FixedWidthType& type, + const std::vector& shape, + std::vector* strides); + +ARROW_EXPORT +bool IsTensorStridesContiguous(const std::shared_ptr& type, + const std::vector& shape, + const std::vector& strides); + +ARROW_EXPORT +Status ValidateTensorParameters(const std::shared_ptr& type, + const std::shared_ptr& data, + const std::vector& shape, + const std::vector& strides, + const std::vector& dim_names); + +} // namespace internal + +class ARROW_EXPORT Tensor { + public: + /// \brief Create a Tensor with full parameters + /// + /// This factory function will return Status::Invalid when the parameters are + /// inconsistent + /// + /// \param[in] type The data type of the tensor values + /// \param[in] data The buffer of the tensor content + /// \param[in] shape The shape of the tensor + /// \param[in] strides The strides of the tensor + /// (if this is empty, the data assumed to be row-major) + /// \param[in] dim_names The names of the tensor dimensions + static inline Result> Make( + const std::shared_ptr& type, const std::shared_ptr& data, + const std::vector& shape, const std::vector& strides = {}, + const std::vector& dim_names = {}) { + ARROW_RETURN_NOT_OK( + internal::ValidateTensorParameters(type, data, shape, strides, dim_names)); + return std::make_shared(type, data, shape, strides, dim_names); + } + + virtual ~Tensor() = default; + + /// Constructor with no dimension names or strides, data assumed to be row-major + Tensor(const std::shared_ptr& type, const std::shared_ptr& data, + const std::vector& shape); + + /// Constructor with non-negative strides + Tensor(const std::shared_ptr& type, const std::shared_ptr& data, + const std::vector& shape, const std::vector& strides); + + /// Constructor with non-negative strides and dimension names + Tensor(const std::shared_ptr& type, const std::shared_ptr& data, + const std::vector& shape, const std::vector& strides, + const std::vector& dim_names); + + std::shared_ptr type() const { return type_; } + std::shared_ptr data() const { return data_; } + + const uint8_t* raw_data() const { return data_->data(); } + uint8_t* raw_mutable_data() { return data_->mutable_data(); } + + const std::vector& shape() const { return shape_; } + const std::vector& strides() const { return strides_; } + + int ndim() const { return static_cast(shape_.size()); } + + const std::vector& dim_names() const { return dim_names_; } + const std::string& dim_name(int i) const; + + /// Total number of value cells in the tensor + int64_t size() const; + + /// Return true if the underlying data buffer is mutable + bool is_mutable() const { return data_->is_mutable(); } + + /// Either row major or column major + bool is_contiguous() const; + + /// AKA "C order" + bool is_row_major() const; + + /// AKA "Fortran order" + bool is_column_major() const; + + Type::type type_id() const; + + bool Equals(const Tensor& other, const EqualOptions& = EqualOptions::Defaults()) const; + + /// Compute the number of non-zero values in the tensor + Result CountNonZero() const; + + /// Return the offset of the given index on the given strides + static int64_t CalculateValueOffset(const std::vector& strides, + const std::vector& index) { + const int64_t n = static_cast(index.size()); + int64_t offset = 0; + for (int64_t i = 0; i < n; ++i) { + offset += index[i] * strides[i]; + } + return offset; + } + + int64_t CalculateValueOffset(const std::vector& index) const { + return Tensor::CalculateValueOffset(strides_, index); + } + + /// Returns the value at the given index without data-type and bounds checks + template + const typename ValueType::c_type& Value(const std::vector& index) const { + using c_type = typename ValueType::c_type; + const int64_t offset = CalculateValueOffset(index); + const c_type* ptr = reinterpret_cast(raw_data() + offset); + return *ptr; + } + + Status Validate() const { + return internal::ValidateTensorParameters(type_, data_, shape_, strides_, dim_names_); + } + + protected: + Tensor() {} + + std::shared_ptr type_; + std::shared_ptr data_; + std::vector shape_; + std::vector strides_; + + /// These names are optional + std::vector dim_names_; + + template + friend class SparseTensorImpl; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(Tensor); +}; + +template +class NumericTensor : public Tensor { + public: + using TypeClass = TYPE; + using value_type = typename TypeClass::c_type; + + /// \brief Create a NumericTensor with full parameters + /// + /// This factory function will return Status::Invalid when the parameters are + /// inconsistent + /// + /// \param[in] data The buffer of the tensor content + /// \param[in] shape The shape of the tensor + /// \param[in] strides The strides of the tensor + /// (if this is empty, the data assumed to be row-major) + /// \param[in] dim_names The names of the tensor dimensions + static Result>> Make( + const std::shared_ptr& data, const std::vector& shape, + const std::vector& strides = {}, + const std::vector& dim_names = {}) { + ARROW_RETURN_NOT_OK(internal::ValidateTensorParameters( + TypeTraits::type_singleton(), data, shape, strides, dim_names)); + return std::make_shared>(data, shape, strides, dim_names); + } + + /// Constructor with non-negative strides and dimension names + NumericTensor(const std::shared_ptr& data, const std::vector& shape, + const std::vector& strides, + const std::vector& dim_names) + : Tensor(TypeTraits::type_singleton(), data, shape, strides, dim_names) {} + + /// Constructor with no dimension names or strides, data assumed to be row-major + NumericTensor(const std::shared_ptr& data, const std::vector& shape) + : NumericTensor(data, shape, {}, {}) {} + + /// Constructor with non-negative strides + NumericTensor(const std::shared_ptr& data, const std::vector& shape, + const std::vector& strides) + : NumericTensor(data, shape, strides, {}) {} + + const value_type& Value(const std::vector& index) const { + return Tensor::Value(index); + } +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/visit_array_inline.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/visit_array_inline.h new file mode 100644 index 0000000000000000000000000000000000000000..cb6ff49b6950d51f4e189488307f5f0037c2eef6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/visit_array_inline.h @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/array.h" +#include "arrow/extension_type.h" +#include "arrow/visitor_generate.h" + +namespace arrow { + +#define ARRAY_VISIT_INLINE(TYPE_CLASS) \ + case TYPE_CLASS##Type::type_id: \ + return visitor->Visit( \ + internal::checked_cast::ArrayType&>( \ + array), \ + std::forward(args)...); + +/// \brief Apply the visitors Visit() method specialized to the array type +/// +/// \tparam VISITOR Visitor type that implements Visit() for all array types. +/// \tparam ARGS Additional arguments, if any, will be passed to the Visit function after +/// the `arr` argument +/// \return Status +/// +/// A visitor is a type that implements specialized logic for each Arrow type. +/// Example usage: +/// +/// ``` +/// class ExampleVisitor { +/// arrow::Status Visit(arrow::NumericArray arr) { ... } +/// arrow::Status Visit(arrow::NumericArray arr) { ... } +/// ... +/// } +/// ExampleVisitor visitor; +/// VisitArrayInline(some_array, &visitor); +/// ``` +template +inline Status VisitArrayInline(const Array& array, VISITOR* visitor, ARGS&&... args) { + switch (array.type_id()) { + ARROW_GENERATE_FOR_ALL_TYPES(ARRAY_VISIT_INLINE); + default: + break; + } + return Status::NotImplemented("Type not implemented"); +} + +#undef ARRAY_VISIT_INLINE + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/visit_scalar_inline.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/visit_scalar_inline.h new file mode 100644 index 0000000000000000000000000000000000000000..85357f288c63b328f93afb1ee419775caac53b2c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/visit_scalar_inline.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Private header, not to be exported + +#pragma once + +#include + +#include "arrow/scalar.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/visitor_generate.h" + +namespace arrow { + +#define SCALAR_VISIT_INLINE(TYPE_CLASS) \ + case TYPE_CLASS##Type::type_id: \ + return visitor->Visit(internal::checked_cast(scalar), \ + std::forward(args)...); + +/// \brief Apply the visitors Visit() method specialized to the scalar type +/// +/// \tparam VISITOR Visitor type that implements Visit() for all scalar types. +/// \tparam ARGS Additional arguments, if any, will be passed to the Visit function after +/// the `scalar` argument +/// \return Status +/// +/// A visitor is a type that implements specialized logic for each Arrow type. +/// Example usage: +/// +/// ``` +/// class ExampleVisitor { +/// arrow::Status Visit(arrow::Int32Scalar scalar) { ... } +/// arrow::Status Visit(arrow::Int64Scalar scalar) { ... } +/// ... +/// } +/// ExampleVisitor visitor; +/// VisitScalarInline(some_scalar, &visitor); +/// ``` +template +inline Status VisitScalarInline(const Scalar& scalar, VISITOR* visitor, ARGS&&... args) { + switch (scalar.type->id()) { + ARROW_GENERATE_FOR_ALL_TYPES(SCALAR_VISIT_INLINE); + default: + break; + } + return Status::NotImplemented("Scalar visitor for type not implemented ", + scalar.type->ToString()); +} + +#undef SCALAR_VISIT_INLINE + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/io.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/io.pxi new file mode 100644 index 0000000000000000000000000000000000000000..1897e76efc2a06fcea15364cd5cac22d3abe9f3f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/io.pxi @@ -0,0 +1,2789 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Cython wrappers for IO interfaces defined in arrow::io and messaging in +# arrow::ipc + +from libc.stdlib cimport malloc, free + +import codecs +import pickle +import re +import sys +import threading +import time +import warnings +from io import BufferedIOBase, IOBase, TextIOBase, UnsupportedOperation +from queue import Queue, Empty as QueueEmpty + +from pyarrow.util import _is_path_like, _stringify_path + + +# 64K +DEFAULT_BUFFER_SIZE = 2 ** 16 + + +cdef extern from "Python.h": + # To let us get a PyObject* and avoid Cython auto-ref-counting + PyObject* PyBytes_FromStringAndSizeNative" PyBytes_FromStringAndSize"( + char *v, Py_ssize_t len) except NULL + + # Workaround https://github.com/cython/cython/issues/4707 + bytearray PyByteArray_FromStringAndSize(char *string, Py_ssize_t len) + + +def io_thread_count(): + """ + Return the number of threads to use for I/O operations. + + Many operations, such as scanning a dataset, will implicitly make + use of this pool. The number of threads is set to a fixed value at + startup. It can be modified at runtime by calling + :func:`set_io_thread_count()`. + + See Also + -------- + set_io_thread_count : Modify the size of this pool. + cpu_count : The analogous function for the CPU thread pool. + """ + return GetIOThreadPoolCapacity() + + +def set_io_thread_count(int count): + """ + Set the number of threads to use for I/O operations. + + Many operations, such as scanning a dataset, will implicitly make + use of this pool. + + Parameters + ---------- + count : int + The max number of threads that may be used for I/O. + Must be positive. + + See Also + -------- + io_thread_count : Get the size of this pool. + set_cpu_count : The analogous function for the CPU thread pool. + """ + if count < 1: + raise ValueError("IO thread count must be strictly positive") + check_status(SetIOThreadPoolCapacity(count)) + + +cdef class NativeFile(_Weakrefable): + """ + The base class for all Arrow streams. + + Streams are either readable, writable, or both. + They optionally support seeking. + + While this class exposes methods to read or write data from Python, the + primary intent of using a Arrow stream is to pass it to other Arrow + facilities that will make use of it, such as Arrow IPC routines. + + Be aware that there are subtle differences with regular Python files, + e.g. destroying a writable Arrow stream without closing it explicitly + will not flush any pending data. + """ + + # Default chunk size for chunked reads. + # Use a large enough value for networked filesystems. + _default_chunk_size = 256 * 1024 + + def __cinit__(self): + self.own_file = False + self.is_readable = False + self.is_writable = False + self.is_seekable = False + self._is_appending = False + + def __dealloc__(self): + if self.own_file: + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close() + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"closed={self.closed} " + f"own_file={self.own_file} " + f"is_seekable={self.is_seekable} " + f"is_writable={self.is_writable} " + f"is_readable={self.is_readable}>") + + @property + def mode(self): + """ + The file mode. Currently instances of NativeFile may support: + + * rb: binary read + * wb: binary write + * rb+: binary read and write + * ab: binary append + """ + # Emulate built-in file modes + if self.is_readable and self.is_writable: + return 'rb+' + elif self.is_readable: + return 'rb' + elif self.is_writable and self._is_appending: + return 'ab' + elif self.is_writable: + return 'wb' + else: + raise ValueError('File object is malformed, has no mode') + + def readable(self): + self._assert_open() + return self.is_readable + + def writable(self): + self._assert_open() + return self.is_writable + + def seekable(self): + self._assert_open() + return self.is_seekable + + def isatty(self): + self._assert_open() + return False + + def fileno(self): + """ + NOT IMPLEMENTED + """ + raise UnsupportedOperation() + + @property + def closed(self): + if self.is_readable: + return self.input_stream.get().closed() + elif self.is_writable: + return self.output_stream.get().closed() + else: + return True + + def close(self): + if not self.closed: + with nogil: + if self.is_readable: + check_status(self.input_stream.get().Close()) + else: + check_status(self.output_stream.get().Close()) + + cdef set_random_access_file(self, shared_ptr[CRandomAccessFile] handle): + self.input_stream = handle + self.random_access = handle + self.is_seekable = True + + cdef set_input_stream(self, shared_ptr[CInputStream] handle): + self.input_stream = handle + self.random_access.reset() + self.is_seekable = False + + cdef set_output_stream(self, shared_ptr[COutputStream] handle): + self.output_stream = handle + + cdef shared_ptr[CRandomAccessFile] get_random_access_file(self) except *: + self._assert_readable() + self._assert_seekable() + return self.random_access + + cdef shared_ptr[CInputStream] get_input_stream(self) except *: + self._assert_readable() + return self.input_stream + + cdef shared_ptr[COutputStream] get_output_stream(self) except *: + self._assert_writable() + return self.output_stream + + def _assert_open(self): + if self.closed: + raise ValueError("I/O operation on closed file") + + def _assert_readable(self): + self._assert_open() + if not self.is_readable: + # XXX UnsupportedOperation + raise IOError("only valid on readable files") + + def _assert_writable(self): + self._assert_open() + if not self.is_writable: + raise IOError("only valid on writable files") + + def _assert_seekable(self): + self._assert_open() + if not self.is_seekable: + raise IOError("only valid on seekable files") + + def size(self): + """ + Return file size + """ + cdef int64_t size + + handle = self.get_random_access_file() + with nogil: + size = GetResultValue(handle.get().GetSize()) + + return size + + def metadata(self): + """ + Return file metadata + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_metadata + + handle = self.get_input_stream() + with nogil: + c_metadata = GetResultValue(handle.get().ReadMetadata()) + + metadata = {} + if c_metadata.get() != nullptr: + for i in range(c_metadata.get().size()): + metadata[frombytes(c_metadata.get().key(i))] = \ + c_metadata.get().value(i) + return metadata + + def tell(self): + """ + Return current stream position + """ + cdef int64_t position + + if self.is_readable: + rd_handle = self.get_random_access_file() + with nogil: + position = GetResultValue(rd_handle.get().Tell()) + else: + wr_handle = self.get_output_stream() + with nogil: + position = GetResultValue(wr_handle.get().Tell()) + + return position + + def seek(self, int64_t position, int whence=0): + """ + Change current file stream position + + Parameters + ---------- + position : int + Byte offset, interpreted relative to value of whence argument + whence : int, default 0 + Point of reference for seek offset + + Notes + ----- + Values of whence: + * 0 -- start of stream (the default); offset should be zero or positive + * 1 -- current stream position; offset may be negative + * 2 -- end of stream; offset is usually negative + + Returns + ------- + int + The new absolute stream position. + """ + cdef int64_t offset + handle = self.get_random_access_file() + + with nogil: + if whence == 0: + offset = position + elif whence == 1: + offset = GetResultValue(handle.get().Tell()) + offset = offset + position + elif whence == 2: + offset = GetResultValue(handle.get().GetSize()) + offset = offset + position + else: + with gil: + raise ValueError("Invalid value of whence: {0}" + .format(whence)) + check_status(handle.get().Seek(offset)) + + return self.tell() + + def flush(self): + """ + Flush the stream, if applicable. + + An error is raised if stream is not writable. + """ + self._assert_open() + # For IOBase compatibility, flush() on an input stream is a no-op + if self.is_writable: + handle = self.get_output_stream() + with nogil: + check_status(handle.get().Flush()) + + def write(self, data): + """ + Write data to the file. + + Parameters + ---------- + data : bytes-like object or exporter of buffer protocol + + Returns + ------- + int + nbytes: number of bytes written + """ + self._assert_writable() + handle = self.get_output_stream() + + cdef shared_ptr[CBuffer] buf = as_c_buffer(data) + + with nogil: + check_status(handle.get().WriteBuffer(buf)) + return buf.get().size() + + def read(self, nbytes=None): + """ + Read and return up to n bytes. + + If *nbytes* is None, then the entire remaining file contents are read. + + Parameters + ---------- + nbytes : int, default None + + Returns + ------- + data : bytes + """ + cdef: + int64_t c_nbytes + int64_t bytes_read = 0 + PyObject* obj + + if nbytes is None: + if not self.is_seekable: + # Cannot get file size => read chunkwise + bs = self._default_chunk_size + chunks = [] + while True: + chunk = self.read(bs) + if not chunk: + break + chunks.append(chunk) + return b"".join(chunks) + + c_nbytes = self.size() - self.tell() + else: + c_nbytes = nbytes + + handle = self.get_input_stream() + + # Allocate empty write space + obj = PyBytes_FromStringAndSizeNative(NULL, c_nbytes) + + cdef uint8_t* buf = cp.PyBytes_AS_STRING( obj) + with nogil: + bytes_read = GetResultValue(handle.get().Read(c_nbytes, buf)) + + if bytes_read < c_nbytes: + cp._PyBytes_Resize(&obj, bytes_read) + + return PyObject_to_object(obj) + + def get_stream(self, file_offset, nbytes): + """ + Return an input stream that reads a file segment independent of the + state of the file. + + Allows reading portions of a random access file as an input stream + without interfering with each other. + + Parameters + ---------- + file_offset : int + nbytes : int + + Returns + ------- + stream : NativeFile + """ + cdef: + shared_ptr[CInputStream] data + int64_t c_file_offset + int64_t c_nbytes + + c_file_offset = file_offset + c_nbytes = nbytes + + handle = self.get_random_access_file() + + data = GetResultValue( + CRandomAccessFile.GetStream(handle, c_file_offset, c_nbytes)) + + stream = NativeFile() + stream.set_input_stream(data) + stream.is_readable = True + + return stream + + def read_at(self, nbytes, offset): + """ + Read indicated number of bytes at offset from the file + + Parameters + ---------- + nbytes : int + offset : int + + Returns + ------- + data : bytes + """ + cdef: + int64_t c_nbytes + int64_t c_offset + int64_t bytes_read = 0 + PyObject* obj + + c_nbytes = nbytes + + c_offset = offset + + handle = self.get_random_access_file() + + # Allocate empty write space + obj = PyBytes_FromStringAndSizeNative(NULL, c_nbytes) + + cdef uint8_t* buf = cp.PyBytes_AS_STRING( obj) + with nogil: + bytes_read = GetResultValue(handle.get(). + ReadAt(c_offset, c_nbytes, buf)) + + if bytes_read < c_nbytes: + cp._PyBytes_Resize(&obj, bytes_read) + + return PyObject_to_object(obj) + + def read1(self, nbytes=None): + """Read and return up to n bytes. + + Unlike read(), if *nbytes* is None then a chunk is read, not the + entire file. + + Parameters + ---------- + nbytes : int, default None + The maximum number of bytes to read. + + Returns + ------- + data : bytes + """ + if nbytes is None: + # The expectation when passing `nbytes=None` is not to read the + # entire file but to issue a single underlying read call up to + # a reasonable size (the use case being to read a bufferable + # amount of bytes, such as with io.TextIOWrapper). + nbytes = self._default_chunk_size + return self.read(nbytes) + + def readall(self): + return self.read() + + def readinto(self, b): + """ + Read into the supplied buffer + + Parameters + ---------- + b : buffer-like object + A writable buffer object (such as a bytearray). + + Returns + ------- + written : int + number of bytes written + """ + + cdef: + int64_t bytes_read + uint8_t* buf + Buffer py_buf + int64_t buf_len + + handle = self.get_input_stream() + + py_buf = py_buffer(b) + buf_len = py_buf.size + buf = py_buf.buffer.get().mutable_data() + + with nogil: + bytes_read = GetResultValue(handle.get().Read(buf_len, buf)) + + return bytes_read + + def readline(self, size=None): + """NOT IMPLEMENTED. Read and return a line of bytes from the file. + + If size is specified, read at most size bytes. + + Line terminator is always b"\\n". + + Parameters + ---------- + size : int + maximum number of bytes read + """ + raise UnsupportedOperation() + + def readlines(self, hint=None): + """NOT IMPLEMENTED. Read lines of the file + + Parameters + ---------- + hint : int + maximum number of bytes read until we stop + """ + raise UnsupportedOperation() + + def __iter__(self): + self._assert_readable() + return self + + def __next__(self): + line = self.readline() + if not line: + raise StopIteration + return line + + def read_buffer(self, nbytes=None): + """ + Read from buffer. + + Parameters + ---------- + nbytes : int, optional + maximum number of bytes read + """ + cdef: + int64_t c_nbytes + int64_t bytes_read = 0 + shared_ptr[CBuffer] output + + handle = self.get_input_stream() + + if nbytes is None: + if not self.is_seekable: + # Cannot get file size => read chunkwise + return py_buffer(self.read()) + c_nbytes = self.size() - self.tell() + else: + c_nbytes = nbytes + + with nogil: + output = GetResultValue(handle.get().ReadBuffer(c_nbytes)) + + return pyarrow_wrap_buffer(output) + + def truncate(self): + """ + NOT IMPLEMENTED + """ + raise UnsupportedOperation() + + def writelines(self, lines): + """ + Write lines to the file. + + Parameters + ---------- + lines : iterable + Iterable of bytes-like objects or exporters of buffer protocol + """ + self._assert_writable() + + for line in lines: + self.write(line) + + def download(self, stream_or_path, buffer_size=None): + """ + Read this file completely to a local path or destination stream. + + This method first seeks to the beginning of the file. + + Parameters + ---------- + stream_or_path : str or file-like object + If a string, a local file path to write to; otherwise, + should be a writable stream. + buffer_size : int, optional + The buffer size to use for data transfers. + """ + cdef: + int64_t bytes_read = 0 + uint8_t* buf + + handle = self.get_input_stream() + + buffer_size = buffer_size or DEFAULT_BUFFER_SIZE + + write_queue = Queue(50) + + if not hasattr(stream_or_path, 'read'): + stream = open(stream_or_path, 'wb') + + def cleanup(): + stream.close() + else: + stream = stream_or_path + + def cleanup(): + pass + + done = False + exc_info = None + + def bg_write(): + try: + while not done or write_queue.qsize() > 0: + try: + buf = write_queue.get(timeout=0.01) + except QueueEmpty: + continue + stream.write(buf) + except Exception as e: + exc_info = sys.exc_info() + finally: + cleanup() + + self.seek(0) + + writer_thread = threading.Thread(target=bg_write) + + # This isn't ideal -- PyBytes_FromStringAndSize copies the data from + # the passed buffer, so it's hard for us to avoid doubling the memory + buf = malloc(buffer_size) + if buf == NULL: + raise MemoryError("Failed to allocate {0} bytes" + .format(buffer_size)) + + writer_thread.start() + + cdef int64_t total_bytes = 0 + cdef int32_t c_buffer_size = buffer_size + + try: + while True: + with nogil: + bytes_read = GetResultValue( + handle.get().Read(c_buffer_size, buf)) + + total_bytes += bytes_read + + # EOF + if bytes_read == 0: + break + + pybuf = cp.PyBytes_FromStringAndSize(buf, + bytes_read) + + if writer_thread.is_alive(): + while write_queue.full(): + time.sleep(0.01) + else: + break + + write_queue.put_nowait(pybuf) + finally: + free(buf) + done = True + + writer_thread.join() + if exc_info is not None: + raise exc_info[0], exc_info[1], exc_info[2] + + def upload(self, stream, buffer_size=None): + """ + Write from a source stream to this file. + + Parameters + ---------- + stream : file-like object + Source stream to pipe to this file. + buffer_size : int, optional + The buffer size to use for data transfers. + """ + write_queue = Queue(50) + self._assert_writable() + + buffer_size = buffer_size or DEFAULT_BUFFER_SIZE + + done = False + exc_info = None + + def bg_write(): + try: + while not done or write_queue.qsize() > 0: + try: + buf = write_queue.get(timeout=0.01) + except QueueEmpty: + continue + + self.write(buf) + + except Exception as e: + exc_info = sys.exc_info() + + writer_thread = threading.Thread(target=bg_write) + writer_thread.start() + + try: + while True: + buf = stream.read(buffer_size) + if not buf: + break + + if writer_thread.is_alive(): + while write_queue.full(): + time.sleep(0.01) + else: + break + + write_queue.put_nowait(buf) + finally: + done = True + + writer_thread.join() + if exc_info is not None: + raise exc_info[0], exc_info[1], exc_info[2] + +BufferedIOBase.register(NativeFile) + +# ---------------------------------------------------------------------- +# Python file-like objects + + +cdef class PythonFile(NativeFile): + """ + A stream backed by a Python file object. + + This class allows using Python file objects with arbitrary Arrow + functions, including functions written in another language than Python. + + As a downside, there is a non-zero redirection cost in translating + Arrow stream calls to Python method calls. Furthermore, Python's + Global Interpreter Lock may limit parallelism in some situations. + + Examples + -------- + >>> import io + >>> import pyarrow as pa + >>> pa.PythonFile(io.BytesIO()) + + + Create a stream for writing: + + >>> buf = io.BytesIO() + >>> f = pa.PythonFile(buf, mode = 'w') + >>> f.writable() + True + >>> f.write(b'PythonFile') + 10 + >>> buf.getvalue() + b'PythonFile' + >>> f.close() + >>> f + + + Create a stream for reading: + + >>> buf = io.BytesIO(b'PythonFile') + >>> f = pa.PythonFile(buf, mode = 'r') + >>> f.mode + 'rb' + >>> f.read() + b'PythonFile' + >>> f + + >>> f.close() + >>> f + + """ + cdef: + object handle + + def __cinit__(self, handle, mode=None): + self.handle = handle + + if mode is None: + try: + inferred_mode = handle.mode + except AttributeError: + # Not all file-like objects have a mode attribute + # (e.g. BytesIO) + try: + inferred_mode = 'w' if handle.writable() else 'r' + except AttributeError: + raise ValueError("could not infer open mode for file-like " + "object %r, please pass it explicitly" + % (handle,)) + else: + inferred_mode = mode + + if inferred_mode.startswith('w'): + kind = 'w' + elif inferred_mode.startswith('r'): + kind = 'r' + else: + raise ValueError('Invalid file mode: {0}'.format(mode)) + + # If mode was given, check it matches the given file + if mode is not None: + if isinstance(handle, IOBase): + # Python 3 IO object + if kind == 'r': + if not handle.readable(): + raise TypeError("readable file expected") + else: + if not handle.writable(): + raise TypeError("writable file expected") + # (other duck-typed file-like objects are possible) + + # If possible, check the file is a binary file + if isinstance(handle, TextIOBase): + raise TypeError("binary file expected, got text file") + + if kind == 'r': + self.set_random_access_file( + shared_ptr[CRandomAccessFile](new PyReadableFile(handle))) + self.is_readable = True + else: + self.set_output_stream( + shared_ptr[COutputStream](new PyOutputStream(handle))) + self.is_writable = True + + def truncate(self, pos=None): + """ + Parameters + ---------- + pos : int, optional + """ + self.handle.truncate(pos) + + def readline(self, size=None): + """ + Read and return a line of bytes from the file. + + If size is specified, read at most size bytes. + + Parameters + ---------- + size : int + Maximum number of bytes read + """ + return self.handle.readline(size) + + def readlines(self, hint=None): + """ + Read lines of the file. + + Parameters + ---------- + hint : int + Maximum number of bytes read until we stop + """ + return self.handle.readlines(hint) + + +cdef class MemoryMappedFile(NativeFile): + """ + A stream that represents a memory-mapped file. + + Supports 'r', 'r+', 'w' modes. + + Examples + -------- + Create a new file with memory map: + + >>> import pyarrow as pa + >>> mmap = pa.create_memory_map('example_mmap.dat', 10) + >>> mmap + + >>> mmap.close() + + Open an existing file with memory map: + + >>> with pa.memory_map('example_mmap.dat') as mmap: + ... mmap + ... + + """ + cdef: + shared_ptr[CMemoryMappedFile] handle + object path + + @staticmethod + def create(path, size): + """ + Create a MemoryMappedFile + + Parameters + ---------- + path : str + Where to create the file. + size : int + Size of the memory mapped file. + """ + cdef: + shared_ptr[CMemoryMappedFile] handle + c_string c_path = encode_file_path(path) + int64_t c_size = size + + with nogil: + handle = GetResultValue(CMemoryMappedFile.Create(c_path, c_size)) + + cdef MemoryMappedFile result = MemoryMappedFile() + result.path = path + result.is_readable = True + result.is_writable = True + result.set_output_stream( handle) + result.set_random_access_file( handle) + result.handle = handle + + return result + + def _open(self, path, mode='r'): + self.path = path + + cdef: + FileMode c_mode + shared_ptr[CMemoryMappedFile] handle + c_string c_path = encode_file_path(path) + + if mode in ('r', 'rb'): + c_mode = FileMode_READ + self.is_readable = True + elif mode in ('w', 'wb'): + c_mode = FileMode_WRITE + self.is_writable = True + elif mode in ('r+', 'r+b', 'rb+'): + c_mode = FileMode_READWRITE + self.is_readable = True + self.is_writable = True + else: + raise ValueError('Invalid file mode: {0}'.format(mode)) + + with nogil: + handle = GetResultValue(CMemoryMappedFile.Open(c_path, c_mode)) + + self.set_output_stream( handle) + self.set_random_access_file( handle) + self.handle = handle + + def resize(self, new_size): + """ + Resize the map and underlying file. + + Parameters + ---------- + new_size : new size in bytes + """ + check_status(self.handle.get().Resize(new_size)) + + def fileno(self): + self._assert_open() + return self.handle.get().file_descriptor() + + +def memory_map(path, mode='r'): + """ + Open memory map at file path. Size of the memory map cannot change. + + Parameters + ---------- + path : str + mode : {'r', 'r+', 'w'}, default 'r' + Whether the file is opened for reading ('r'), writing ('w') + or both ('r+'). + + Returns + ------- + mmap : MemoryMappedFile + + Examples + -------- + Reading from a memory map without any memory allocation or copying: + + >>> import pyarrow as pa + >>> with pa.output_stream('example_mmap.txt') as stream: + ... stream.write(b'Constructing a buffer referencing the mapped memory') + ... + 51 + >>> with pa.memory_map('example_mmap.txt') as mmap: + ... mmap.read_at(6,45) + ... + b'memory' + """ + _check_is_file(path) + + cdef MemoryMappedFile mmap = MemoryMappedFile() + mmap._open(path, mode) + return mmap + + +cdef _check_is_file(path): + if os.path.isdir(path): + raise IOError("Expected file path, but {0} is a directory" + .format(path)) + + +def create_memory_map(path, size): + """ + Create a file of the given size and memory-map it. + + Parameters + ---------- + path : str + The file path to create, on the local filesystem. + size : int + The file size to create. + + Returns + ------- + mmap : MemoryMappedFile + + Examples + -------- + Create a file with a memory map: + + >>> import pyarrow as pa + >>> with pa.create_memory_map('example_mmap_create.dat', 27) as mmap: + ... mmap.write(b'Create a memory-mapped file') + ... mmap.read_at(10, 9) + ... + 27 + b'memory-map' + """ + return MemoryMappedFile.create(path, size) + + +cdef class OSFile(NativeFile): + """ + A stream backed by a regular file descriptor. + + Examples + -------- + Create a new file to write to: + + >>> import pyarrow as pa + >>> with pa.OSFile('example_osfile.arrow', mode='w') as f: + ... f.writable() + ... f.write(b'OSFile') + ... f.seekable() + ... + True + 6 + False + + Open the file to read: + + >>> with pa.OSFile('example_osfile.arrow', mode='r') as f: + ... f.mode + ... f.read() + ... + 'rb' + b'OSFile' + + Open the file to append: + + >>> with pa.OSFile('example_osfile.arrow', mode='ab') as f: + ... f.mode + ... f.write(b' is super!') + ... + 'ab' + 10 + >>> with pa.OSFile('example_osfile.arrow') as f: + ... f.read() + ... + b'OSFile is super!' + + Inspect created OSFile: + + >>> pa.OSFile('example_osfile.arrow') + + """ + cdef: + object path + + def __cinit__(self, path, mode='r', MemoryPool memory_pool=None): + _check_is_file(path) + self.path = path + + cdef: + FileMode c_mode + shared_ptr[Readable] handle + c_string c_path = encode_file_path(path) + + if mode in ('r', 'rb'): + self._open_readable(c_path, maybe_unbox_memory_pool(memory_pool)) + elif mode in ('w', 'wb'): + self._open_writable(c_path) + elif mode in ('a', 'ab'): + self._open_writable(c_path, append=True) + else: + raise ValueError('Invalid file mode: {0}'.format(mode)) + + cdef _open_readable(self, c_string path, CMemoryPool* pool): + cdef shared_ptr[ReadableFile] handle + + with nogil: + handle = GetResultValue(ReadableFile.Open(path, pool)) + + self.is_readable = True + self.set_random_access_file( handle) + + cdef _open_writable(self, c_string path, c_bool append=False): + with nogil: + self.output_stream = GetResultValue( + FileOutputStream.OpenWithAppend(path, append) + ) + self.is_writable = True + self._is_appending = append + + def fileno(self): + self._assert_open() + return self.handle.file_descriptor() + + +cdef class FixedSizeBufferWriter(NativeFile): + """ + A stream writing to a Arrow buffer. + + Examples + -------- + Create a stream to write to ``pyarrow.Buffer``: + + >>> import pyarrow as pa + >>> buf = pa.allocate_buffer(5) + >>> with pa.output_stream(buf) as stream: + ... stream.write(b'abcde') + ... stream + ... + 5 + + + Inspect the buffer: + + >>> buf.to_pybytes() + b'abcde' + >>> buf + + """ + + def __cinit__(self, Buffer buffer): + self.output_stream.reset(new CFixedSizeBufferWriter(buffer.buffer)) + self.is_writable = True + + def set_memcopy_threads(self, int num_threads): + """ + Parameters + ---------- + num_threads : int + """ + cdef CFixedSizeBufferWriter* writer = \ + self.output_stream.get() + writer.set_memcopy_threads(num_threads) + + def set_memcopy_blocksize(self, int64_t blocksize): + """ + Parameters + ---------- + blocksize : int64 + """ + cdef CFixedSizeBufferWriter* writer = \ + self.output_stream.get() + writer.set_memcopy_blocksize(blocksize) + + def set_memcopy_threshold(self, int64_t threshold): + """ + Parameters + ---------- + threshold : int64 + """ + cdef CFixedSizeBufferWriter* writer = \ + self.output_stream.get() + writer.set_memcopy_threshold(threshold) + + +# ---------------------------------------------------------------------- +# Arrow buffers + + +cdef class Buffer(_Weakrefable): + """ + The base class for all Arrow buffers. + + A buffer represents a contiguous memory area. Many buffers will own + their memory, though not all of them do. + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call Buffer's constructor directly, use " + "`pyarrow.py_buffer` function instead.") + + cdef void init(self, const shared_ptr[CBuffer]& buffer): + self.buffer = buffer + self.shape[0] = self.size + self.strides[0] = (1) + + def __len__(self): + return self.size + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"address={hex(self.address)} " + f"size={self.size} " + f"is_cpu={self.is_cpu} " + f"is_mutable={self.is_mutable}>") + + @property + def size(self): + """ + The buffer size in bytes. + """ + return self.buffer.get().size() + + @property + def address(self): + """ + The buffer's address, as an integer. + + The returned address may point to CPU or device memory. + Use `is_cpu()` to disambiguate. + """ + return self.buffer.get().address() + + def hex(self): + """ + Compute hexadecimal representation of the buffer. + + Returns + ------- + : bytes + """ + return self.buffer.get().ToHexString() + + @property + def is_mutable(self): + """ + Whether the buffer is mutable. + """ + return self.buffer.get().is_mutable() + + @property + def is_cpu(self): + """ + Whether the buffer is CPU-accessible. + """ + return self.buffer.get().is_cpu() + + @property + def parent(self): + cdef shared_ptr[CBuffer] parent_buf = self.buffer.get().parent() + + if parent_buf.get() == NULL: + return None + else: + return pyarrow_wrap_buffer(parent_buf) + + def __getitem__(self, key): + if isinstance(key, slice): + if (key.step or 1) != 1: + raise IndexError('only slices with step 1 supported') + return _normalize_slice(self, key) + + return self.getitem(_normalize_index(key, self.size)) + + cdef getitem(self, int64_t i): + return self.buffer.get().data()[i] + + def slice(self, offset=0, length=None): + """ + Slice this buffer. Memory is not copied. + + You can also use the Python slice notation ``buffer[start:stop]``. + + Parameters + ---------- + offset : int, default 0 + Offset from start of buffer to slice. + length : int, default None + Length of slice (default is until end of Buffer starting from + offset). + + Returns + ------- + sliced : Buffer + A logical view over this buffer. + """ + cdef shared_ptr[CBuffer] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + if length is None: + result = GetResultValue(SliceBufferSafe(self.buffer, offset)) + else: + result = GetResultValue(SliceBufferSafe(self.buffer, offset, + length)) + return pyarrow_wrap_buffer(result) + + def equals(self, Buffer other): + """ + Determine if two buffers contain exactly the same data. + + Parameters + ---------- + other : Buffer + + Returns + ------- + are_equal : bool + True if buffer contents and size are equal + """ + cdef c_bool result = False + with nogil: + result = self.buffer.get().Equals(deref(other.buffer.get())) + return result + + def __eq__(self, other): + if isinstance(other, Buffer): + return self.equals(other) + else: + return self.equals(py_buffer(other)) + + def __reduce_ex__(self, protocol): + if protocol >= 5: + bufobj = pickle.PickleBuffer(self) + elif self.buffer.get().is_mutable(): + # Need to pass a bytearray to recreate a mutable buffer when + # unpickling. + bufobj = PyByteArray_FromStringAndSize( + self.buffer.get().data(), + self.buffer.get().size()) + else: + bufobj = self.to_pybytes() + return py_buffer, (bufobj,) + + def to_pybytes(self): + """ + Return this buffer as a Python bytes object. Memory is copied. + """ + return cp.PyBytes_FromStringAndSize( + self.buffer.get().data(), + self.buffer.get().size()) + + def __getbuffer__(self, cp.Py_buffer* buffer, int flags): + if self.buffer.get().is_mutable(): + buffer.readonly = 0 + else: + if flags & cp.PyBUF_WRITABLE: + raise BufferError("Writable buffer requested but Arrow " + "buffer was not mutable") + buffer.readonly = 1 + buffer.buf = self.buffer.get().data() + buffer.len = self.size + if buffer.buf == NULL: + # ARROW-16048: Ensure we don't export a NULL address. + assert buffer.len == 0 + buffer.buf = cp.PyBytes_AS_STRING(b"") + buffer.format = 'b' + buffer.internal = NULL + buffer.itemsize = 1 + buffer.ndim = 1 + buffer.obj = self + buffer.shape = self.shape + buffer.strides = self.strides + buffer.suboffsets = NULL + + def __getsegcount__(self, Py_ssize_t *len_out): + if len_out != NULL: + len_out[0] = self.size + return 1 + + def __getreadbuffer__(self, Py_ssize_t idx, void **p): + if idx != 0: + raise SystemError("accessing nonexistent buffer segment") + if p != NULL: + p[0] = self.buffer.get().data() + return self.size + + def __getwritebuffer__(self, Py_ssize_t idx, void **p): + if not self.buffer.get().is_mutable(): + raise SystemError("trying to write an immutable buffer") + if idx != 0: + raise SystemError("accessing nonexistent buffer segment") + if p != NULL: + p[0] = self.buffer.get().data() + return self.size + + +cdef class ResizableBuffer(Buffer): + """ + A base class for buffers that can be resized. + """ + + cdef void init_rz(self, const shared_ptr[CResizableBuffer]& buffer): + self.init( buffer) + + def resize(self, int64_t new_size, shrink_to_fit=False): + """ + Resize buffer to indicated size. + + Parameters + ---------- + new_size : int + New size of buffer (padding may be added internally). + shrink_to_fit : bool, default False + If this is true, the buffer is shrunk when new_size is less + than the current size. + If this is false, the buffer is never shrunk. + """ + cdef c_bool c_shrink_to_fit = shrink_to_fit + with nogil: + check_status(( self.buffer.get()) + .Resize(new_size, c_shrink_to_fit)) + + +cdef shared_ptr[CResizableBuffer] _allocate_buffer(CMemoryPool* pool) except *: + with nogil: + return to_shared(GetResultValue(AllocateResizableBuffer(0, pool))) + + +def allocate_buffer(int64_t size, MemoryPool memory_pool=None, + resizable=False): + """ + Allocate a mutable buffer. + + Parameters + ---------- + size : int + Number of bytes to allocate (plus internal padding) + memory_pool : MemoryPool, optional + The pool to allocate memory from. + If not given, the default memory pool is used. + resizable : bool, default False + If true, the returned buffer is resizable. + + Returns + ------- + buffer : Buffer or ResizableBuffer + """ + cdef: + CMemoryPool* cpool = maybe_unbox_memory_pool(memory_pool) + shared_ptr[CResizableBuffer] c_rz_buffer + shared_ptr[CBuffer] c_buffer + + if resizable: + with nogil: + c_rz_buffer = to_shared(GetResultValue( + AllocateResizableBuffer(size, cpool))) + return pyarrow_wrap_resizable_buffer(c_rz_buffer) + else: + with nogil: + c_buffer = to_shared(GetResultValue(AllocateBuffer(size, cpool))) + return pyarrow_wrap_buffer(c_buffer) + + +cdef class BufferOutputStream(NativeFile): + """ + An output stream that writes to a resizable buffer. + + The buffer is produced as a result when ``getvalue()`` is called. + + Examples + -------- + Create an output stream, write data to it and finalize it with + ``getvalue()``: + + >>> import pyarrow as pa + >>> f = pa.BufferOutputStream() + >>> f.write(b'pyarrow.Buffer') + 14 + >>> f.closed + False + >>> f.getvalue() + + >>> f.closed + True + """ + + cdef: + shared_ptr[CResizableBuffer] buffer + + def __cinit__(self, MemoryPool memory_pool=None): + self.buffer = _allocate_buffer(maybe_unbox_memory_pool(memory_pool)) + self.output_stream.reset(new CBufferOutputStream( + self.buffer)) + self.is_writable = True + + def getvalue(self): + """ + Finalize output stream and return result as pyarrow.Buffer. + + Returns + ------- + value : Buffer + """ + with nogil: + check_status(self.output_stream.get().Close()) + return pyarrow_wrap_buffer( self.buffer) + + +cdef class MockOutputStream(NativeFile): + + def __cinit__(self): + self.output_stream.reset(new CMockOutputStream()) + self.is_writable = True + + def size(self): + handle = self.output_stream.get() + return handle.GetExtentBytesWritten() + + +cdef class BufferReader(NativeFile): + """ + Zero-copy reader from objects convertible to Arrow buffer. + + Parameters + ---------- + obj : Python bytes or pyarrow.Buffer + + Examples + -------- + Create an Arrow input stream and inspect it: + + >>> import pyarrow as pa + >>> data = b'reader data' + >>> buf = memoryview(data) + >>> with pa.input_stream(buf) as stream: + ... stream.size() + ... stream.read(6) + ... stream.seek(7) + ... stream.read(15) + ... + 11 + b'reader' + 7 + b'data' + """ + cdef: + Buffer buffer + + # XXX Needed to make numpydoc happy + def __init__(self, obj): + pass + + def __cinit__(self, object obj): + self.buffer = as_buffer(obj) + self.set_random_access_file(shared_ptr[CRandomAccessFile]( + new CBufferReader(self.buffer.buffer))) + self.is_readable = True + + +cdef class CompressedInputStream(NativeFile): + """ + An input stream wrapper which decompresses data on the fly. + + Parameters + ---------- + stream : string, path, pyarrow.NativeFile, or file-like object + Input stream object to wrap with the compression. + compression : str + The compression type ("bz2", "brotli", "gzip", "lz4" or "zstd"). + + Examples + -------- + Create an output stream wich compresses the data: + + >>> import pyarrow as pa + >>> data = b"Compressed stream" + >>> raw = pa.BufferOutputStream() + >>> with pa.CompressedOutputStream(raw, "gzip") as compressed: + ... compressed.write(data) + ... + 17 + + Create an input stream with decompression referencing the + buffer with compressed data: + + >>> cdata = raw.getvalue() + >>> with pa.input_stream(cdata, compression="gzip") as compressed: + ... compressed.read() + ... + b'Compressed stream' + + which actually translates to the use of ``BufferReader``and + ``CompressedInputStream``: + + >>> raw = pa.BufferReader(cdata) + >>> with pa.CompressedInputStream(raw, "gzip") as compressed: + ... compressed.read() + ... + b'Compressed stream' + """ + + def __init__(self, object stream, str compression not None): + cdef: + NativeFile nf + Codec codec = Codec(compression) + shared_ptr[CInputStream] c_reader + shared_ptr[CCompressedInputStream] compressed_stream + nf = get_native_file(stream, False) + c_reader = nf.get_input_stream() + compressed_stream = GetResultValue( + CCompressedInputStream.Make(codec.unwrap(), c_reader) + ) + self.set_input_stream( compressed_stream) + self.is_readable = True + + +cdef class CompressedOutputStream(NativeFile): + """ + An output stream wrapper which compresses data on the fly. + + Parameters + ---------- + stream : string, path, pyarrow.NativeFile, or file-like object + Input stream object to wrap with the compression. + compression : str + The compression type ("bz2", "brotli", "gzip", "lz4" or "zstd"). + + Examples + -------- + Create an output stream wich compresses the data: + + >>> import pyarrow as pa + >>> data = b"Compressed stream" + >>> raw = pa.BufferOutputStream() + >>> with pa.CompressedOutputStream(raw, "gzip") as compressed: + ... compressed.write(data) + ... + 17 + """ + + def __init__(self, object stream, str compression not None): + cdef: + Codec codec = Codec(compression) + shared_ptr[COutputStream] c_writer + shared_ptr[CCompressedOutputStream] compressed_stream + get_writer(stream, &c_writer) + compressed_stream = GetResultValue( + CCompressedOutputStream.Make(codec.unwrap(), c_writer) + ) + self.set_output_stream( compressed_stream) + self.is_writable = True + + +ctypedef CBufferedInputStream* _CBufferedInputStreamPtr +ctypedef CBufferedOutputStream* _CBufferedOutputStreamPtr +ctypedef CRandomAccessFile* _RandomAccessFilePtr + + +cdef class BufferedInputStream(NativeFile): + """ + An input stream that performs buffered reads from + an unbuffered input stream, which can mitigate the overhead + of many small reads in some cases. + + Parameters + ---------- + stream : NativeFile + The input stream to wrap with the buffer + buffer_size : int + Size of the temporary read buffer. + memory_pool : MemoryPool + The memory pool used to allocate the buffer. + """ + + def __init__(self, NativeFile stream, int buffer_size, + MemoryPool memory_pool=None): + cdef shared_ptr[CBufferedInputStream] buffered_stream + + if buffer_size <= 0: + raise ValueError('Buffer size must be larger than zero') + buffered_stream = GetResultValue(CBufferedInputStream.Create( + buffer_size, maybe_unbox_memory_pool(memory_pool), + stream.get_input_stream())) + + self.set_input_stream( buffered_stream) + self.is_readable = True + + def detach(self): + """ + Release the raw InputStream. + Further operations on this stream are invalid. + + Returns + ------- + raw : NativeFile + The underlying raw input stream + """ + cdef: + shared_ptr[CInputStream] c_raw + _CBufferedInputStreamPtr buffered + NativeFile raw + + buffered = dynamic_cast[_CBufferedInputStreamPtr]( + self.input_stream.get()) + assert buffered != nullptr + + with nogil: + c_raw = GetResultValue(buffered.Detach()) + + raw = NativeFile() + raw.is_readable = True + # Find out whether the raw stream is a RandomAccessFile + # or a mere InputStream. This helps us support seek() etc. + # selectively. + if dynamic_cast[_RandomAccessFilePtr](c_raw.get()) != nullptr: + raw.set_random_access_file( + static_pointer_cast[CRandomAccessFile, CInputStream](c_raw)) + else: + raw.set_input_stream(c_raw) + return raw + + +cdef class BufferedOutputStream(NativeFile): + """ + An output stream that performs buffered reads from + an unbuffered output stream, which can mitigate the overhead + of many small writes in some cases. + + Parameters + ---------- + stream : NativeFile + The writable output stream to wrap with the buffer + buffer_size : int + Size of the buffer that should be added. + memory_pool : MemoryPool + The memory pool used to allocate the buffer. + """ + + def __init__(self, NativeFile stream, int buffer_size, + MemoryPool memory_pool=None): + cdef shared_ptr[CBufferedOutputStream] buffered_stream + + if buffer_size <= 0: + raise ValueError('Buffer size must be larger than zero') + buffered_stream = GetResultValue(CBufferedOutputStream.Create( + buffer_size, maybe_unbox_memory_pool(memory_pool), + stream.get_output_stream())) + + self.set_output_stream( buffered_stream) + self.is_writable = True + + def detach(self): + """ + Flush any buffered writes and release the raw OutputStream. + Further operations on this stream are invalid. + + Returns + ------- + raw : NativeFile + The underlying raw output stream. + """ + cdef: + shared_ptr[COutputStream] c_raw + _CBufferedOutputStreamPtr buffered + NativeFile raw + + buffered = dynamic_cast[_CBufferedOutputStreamPtr]( + self.output_stream.get()) + assert buffered != nullptr + + with nogil: + c_raw = GetResultValue(buffered.Detach()) + + raw = NativeFile() + raw.is_writable = True + raw.set_output_stream(c_raw) + return raw + + +cdef void _cb_transform(transform_func, const shared_ptr[CBuffer]& src, + shared_ptr[CBuffer]* dest) except *: + py_dest = transform_func(pyarrow_wrap_buffer(src)) + dest[0] = pyarrow_unwrap_buffer(py_buffer(py_dest)) + + +cdef class TransformInputStream(NativeFile): + """ + Transform an input stream. + + Parameters + ---------- + stream : NativeFile + The stream to transform. + transform_func : callable + The transformation to apply. + """ + + def __init__(self, NativeFile stream, transform_func): + self.set_input_stream(TransformInputStream.make_native( + stream.get_input_stream(), transform_func)) + self.is_readable = True + + @staticmethod + cdef shared_ptr[CInputStream] make_native( + shared_ptr[CInputStream] stream, transform_func) except *: + cdef: + shared_ptr[CInputStream] transform_stream + CTransformInputStreamVTable vtable + + vtable.transform = _cb_transform + return MakeTransformInputStream(stream, move(vtable), + transform_func) + + +class Transcoder: + + def __init__(self, decoder, encoder): + self._decoder = decoder + self._encoder = encoder + + def __call__(self, buf): + final = len(buf) == 0 + return self._encoder.encode(self._decoder.decode(buf, final), final) + + +cdef shared_ptr[function[StreamWrapFunc]] make_streamwrap_func( + src_encoding, dest_encoding) except *: + """ + Create a function that will add a transcoding transformation to a stream. + Data from that stream will be decoded according to ``src_encoding`` and + then re-encoded according to ``dest_encoding``. + The created function can be used to wrap streams. + + Parameters + ---------- + src_encoding : str + The codec to use when reading data. + dest_encoding : str + The codec to use for emitted data. + """ + cdef: + shared_ptr[function[StreamWrapFunc]] empty_func + CTransformInputStreamVTable vtable + + vtable.transform = _cb_transform + src_codec = codecs.lookup(src_encoding) + dest_codec = codecs.lookup(dest_encoding) + return MakeStreamTransformFunc(move(vtable), + Transcoder(src_codec.incrementaldecoder(), + dest_codec.incrementalencoder())) + + +def transcoding_input_stream(stream, src_encoding, dest_encoding): + """ + Add a transcoding transformation to the stream. + Incoming data will be decoded according to ``src_encoding`` and + then re-encoded according to ``dest_encoding``. + + Parameters + ---------- + stream : NativeFile + The stream to which the transformation should be applied. + src_encoding : str + The codec to use when reading data. + dest_encoding : str + The codec to use for emitted data. + """ + src_codec = codecs.lookup(src_encoding) + dest_codec = codecs.lookup(dest_encoding) + if src_codec.name == dest_codec.name: + # Avoid losing performance on no-op transcoding + # (encoding errors won't be detected) + return stream + return TransformInputStream(stream, + Transcoder(src_codec.incrementaldecoder(), + dest_codec.incrementalencoder())) + + +cdef shared_ptr[CInputStream] native_transcoding_input_stream( + shared_ptr[CInputStream] stream, src_encoding, + dest_encoding) except *: + src_codec = codecs.lookup(src_encoding) + dest_codec = codecs.lookup(dest_encoding) + if src_codec.name == dest_codec.name: + # Avoid losing performance on no-op transcoding + # (encoding errors won't be detected) + return stream + return TransformInputStream.make_native( + stream, Transcoder(src_codec.incrementaldecoder(), + dest_codec.incrementalencoder())) + + +def py_buffer(object obj): + """ + Construct an Arrow buffer from a Python bytes-like or buffer-like object + + Parameters + ---------- + obj : object + the object from which the buffer should be constructed. + """ + cdef shared_ptr[CBuffer] buf + buf = GetResultValue(PyBuffer.FromPyObject(obj)) + return pyarrow_wrap_buffer(buf) + + +def foreign_buffer(address, size, base=None): + """ + Construct an Arrow buffer with the given *address* and *size*. + + The buffer will be optionally backed by the Python *base* object, if given. + The *base* object will be kept alive as long as this buffer is alive, + including across language boundaries (for example if the buffer is + referenced by C++ code). + + Parameters + ---------- + address : int + The starting address of the buffer. The address can + refer to both device or host memory but it must be + accessible from device after mapping it with + `get_device_address` method. + size : int + The size of device buffer in bytes. + base : {None, object} + Object that owns the referenced memory. + """ + cdef: + intptr_t c_addr = address + int64_t c_size = size + shared_ptr[CBuffer] buf + + check_status(PyForeignBuffer.Make( c_addr, c_size, + base, &buf)) + return pyarrow_wrap_buffer(buf) + + +def as_buffer(object o): + if isinstance(o, Buffer): + return o + return py_buffer(o) + + +cdef shared_ptr[CBuffer] as_c_buffer(object o) except *: + cdef shared_ptr[CBuffer] buf + if isinstance(o, Buffer): + buf = ( o).buffer + if buf == nullptr: + raise ValueError("got null buffer") + else: + buf = GetResultValue(PyBuffer.FromPyObject(o)) + return buf + + +cdef NativeFile get_native_file(object source, c_bool use_memory_map): + try: + source_path = _stringify_path(source) + except TypeError: + if isinstance(source, Buffer): + source = BufferReader(source) + elif not isinstance(source, NativeFile) and hasattr(source, 'read'): + # Optimistically hope this is file-like + source = PythonFile(source, mode='r') + else: + if use_memory_map: + source = memory_map(source_path, mode='r') + else: + source = OSFile(source_path, mode='r') + + return source + + +cdef get_reader(object source, c_bool use_memory_map, + shared_ptr[CRandomAccessFile]* reader): + cdef NativeFile nf + + nf = get_native_file(source, use_memory_map) + reader[0] = nf.get_random_access_file() + + +cdef get_input_stream(object source, c_bool use_memory_map, + shared_ptr[CInputStream]* out): + """ + Like get_reader(), but can automatically decompress, and returns + an InputStream. + """ + cdef: + NativeFile nf + Codec codec + shared_ptr[CInputStream] input_stream + + try: + codec = Codec.detect(source) + except TypeError: + codec = None + + nf = get_native_file(source, use_memory_map) + input_stream = nf.get_input_stream() + + # codec is None if compression can't be detected + if codec is not None: + input_stream = GetResultValue( + CCompressedInputStream.Make(codec.unwrap(), input_stream) + ) + + out[0] = input_stream + + +cdef get_writer(object source, shared_ptr[COutputStream]* writer): + cdef NativeFile nf + + try: + source_path = _stringify_path(source) + except TypeError: + if not isinstance(source, NativeFile) and hasattr(source, 'write'): + # Optimistically hope this is file-like + source = PythonFile(source, mode='w') + else: + source = OSFile(source_path, mode='w') + + if isinstance(source, NativeFile): + nf = source + writer[0] = nf.get_output_stream() + else: + raise TypeError('Unable to write to object of type: {0}' + .format(type(source))) + + +# --------------------------------------------------------------------- + + +def _detect_compression(path): + if isinstance(path, str): + if path.endswith('.bz2'): + return 'bz2' + elif path.endswith('.gz'): + return 'gzip' + elif path.endswith('.lz4'): + return 'lz4' + elif path.endswith('.zst'): + return 'zstd' + + +cdef CCompressionType _ensure_compression(str name) except *: + uppercase = name.upper() + if uppercase == 'BZ2': + return CCompressionType_BZ2 + elif uppercase == 'GZIP': + return CCompressionType_GZIP + elif uppercase == 'BROTLI': + return CCompressionType_BROTLI + elif uppercase == 'LZ4' or uppercase == 'LZ4_FRAME': + return CCompressionType_LZ4_FRAME + elif uppercase == 'LZ4_RAW': + return CCompressionType_LZ4 + elif uppercase == 'SNAPPY': + return CCompressionType_SNAPPY + elif uppercase == 'ZSTD': + return CCompressionType_ZSTD + else: + raise ValueError('Invalid value for compression: {!r}'.format(name)) + + +cdef class CacheOptions(_Weakrefable): + """ + Cache options for a pre-buffered fragment scan. + + Parameters + ---------- + hole_size_limit : int, default 8KiB + The maximum distance in bytes between two consecutive ranges; beyond + this value, ranges are not combined. + range_size_limit : int, default 32MiB + The maximum size in bytes of a combined range; if combining two + consecutive ranges would produce a range of a size greater than this, + they are not combined + lazy : bool, default True + lazy = false: request all byte ranges when PreBuffer or WillNeed is called. + lazy = True, prefetch_limit = 0: request merged byte ranges only after the reader + needs them. + lazy = True, prefetch_limit = k: prefetch up to k merged byte ranges ahead of the + range that is currently being read. + prefetch_limit : int, default 0 + The maximum number of ranges to be prefetched. This is only used for + lazy cache to asynchronously read some ranges after reading the target + range. + """ + + def __init__(self, *, hole_size_limit=None, range_size_limit=None, lazy=None, prefetch_limit=None): + self.wrapped = CCacheOptions.LazyDefaults() + if hole_size_limit is not None: + self.hole_size_limit = hole_size_limit + if range_size_limit is not None: + self.range_size_limit = range_size_limit + if lazy is not None: + self.lazy = lazy + if prefetch_limit is not None: + self.prefetch_limit = prefetch_limit + + cdef void init(self, CCacheOptions options): + self.wrapped = options + + cdef inline CCacheOptions unwrap(self): + return self.wrapped + + @staticmethod + cdef wrap(CCacheOptions options): + self = CacheOptions() + self.init(options) + return self + + @property + def hole_size_limit(self): + return self.wrapped.hole_size_limit + + @hole_size_limit.setter + def hole_size_limit(self, hole_size_limit): + self.wrapped.hole_size_limit = hole_size_limit + + @property + def range_size_limit(self): + return self.wrapped.range_size_limit + + @range_size_limit.setter + def range_size_limit(self, range_size_limit): + self.wrapped.range_size_limit = range_size_limit + + @property + def lazy(self): + return self.wrapped.lazy + + @lazy.setter + def lazy(self, lazy): + self.wrapped.lazy = lazy + + @property + def prefetch_limit(self): + return self.wrapped.prefetch_limit + + @prefetch_limit.setter + def prefetch_limit(self, prefetch_limit): + self.wrapped.prefetch_limit = prefetch_limit + + def __eq__(self, CacheOptions other): + try: + return self.unwrap().Equals(other.unwrap()) + except TypeError: + return False + + @staticmethod + def from_network_metrics(time_to_first_byte_millis, transfer_bandwidth_mib_per_sec, + ideal_bandwidth_utilization_frac=0.9, max_ideal_request_size_mib=64): + """ + Create suiteable CacheOptions based on provided network metrics. + + Typically this will be used with object storage solutions like Amazon S3, + Google Cloud Storage and Azure Blob Storage. + + Parameters + ---------- + time_to_first_byte_millis : int + Seek-time or Time-To-First-Byte (TTFB) in milliseconds, also called call + setup latency of a new read request. The value is a positive integer. + transfer_bandwidth_mib_per_sec : int + Data transfer Bandwidth (BW) in MiB/sec (per connection). The value is a positive + integer. + ideal_bandwidth_utilization_frac : int, default 0.9 + Transfer bandwidth utilization fraction (per connection) to maximize the net + data load. The value is a positive float less than 1. + max_ideal_request_size_mib : int, default 64 + The maximum single data request size (in MiB) to maximize the net data load. + + Returns + ------- + CacheOptions + """ + return CacheOptions.wrap(CCacheOptions.MakeFromNetworkMetrics( + time_to_first_byte_millis, transfer_bandwidth_mib_per_sec, + ideal_bandwidth_utilization_frac, max_ideal_request_size_mib)) + + @staticmethod + @binding(True) # Required for Cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return CacheOptions(**kwargs) + + def __reduce__(self): + kwargs = dict( + hole_size_limit=self.hole_size_limit, + range_size_limit=self.range_size_limit, + lazy=self.lazy, + prefetch_limit=self.prefetch_limit, + ) + return CacheOptions._reconstruct, (kwargs,) + + +cdef class Codec(_Weakrefable): + """ + Compression codec. + + Parameters + ---------- + compression : str + Type of compression codec to initialize, valid values are: 'gzip', + 'bz2', 'brotli', 'lz4' (or 'lz4_frame'), 'lz4_raw', 'zstd' and + 'snappy'. + compression_level : int, None + Optional parameter specifying how aggressively to compress. The + possible ranges and effect of this parameter depend on the specific + codec chosen. Higher values compress more but typically use more + resources (CPU/RAM). Some codecs support negative values. + + gzip + The compression_level maps to the memlevel parameter of + deflateInit2. Higher levels use more RAM but are faster + and should have higher compression ratios. + + bz2 + The compression level maps to the blockSize100k parameter of + the BZ2_bzCompressInit function. Higher levels use more RAM + but are faster and should have higher compression ratios. + + brotli + The compression level maps to the BROTLI_PARAM_QUALITY + parameter. Higher values are slower and should have higher + compression ratios. + + lz4/lz4_frame/lz4_raw + The compression level parameter is not supported and must + be None + + zstd + The compression level maps to the compressionLevel parameter + of ZSTD_initCStream. Negative values are supported. Higher + values are slower and should have higher compression ratios. + + snappy + The compression level parameter is not supported and must + be None + + + Raises + ------ + ValueError + If invalid compression value is passed. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.Codec.is_available('gzip') + True + >>> codec = pa.Codec('gzip') + >>> codec.name + 'gzip' + >>> codec.compression_level + 9 + """ + + def __init__(self, str compression not None, compression_level=None): + cdef CCompressionType typ = _ensure_compression(compression) + if compression_level is not None: + self.wrapped = shared_ptr[CCodec](move(GetResultValue( + CCodec.CreateWithLevel(typ, compression_level)))) + else: + self.wrapped = shared_ptr[CCodec](move(GetResultValue( + CCodec.Create(typ)))) + + cdef inline CCodec* unwrap(self) nogil: + return self.wrapped.get() + + @staticmethod + def detect(path): + """ + Detect and instantiate compression codec based on file extension. + + Parameters + ---------- + path : str, path-like + File-path to detect compression from. + + Raises + ------ + TypeError + If the passed value is not path-like. + ValueError + If the compression can't be detected from the path. + + Returns + ------- + Codec + """ + return Codec(_detect_compression(_stringify_path(path))) + + @staticmethod + def is_available(str compression not None): + """ + Returns whether the compression support has been built and enabled. + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + + Returns + ------- + bool + """ + cdef CCompressionType typ = _ensure_compression(compression) + return CCodec.IsAvailable(typ) + + @staticmethod + def supports_compression_level(str compression not None): + """ + Returns true if the compression level parameter is supported + for the given codec. + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return CCodec.SupportsCompressionLevel(typ) + + @staticmethod + def default_compression_level(str compression not None): + """ + Returns the compression level that Arrow will use for the codec if + None is specified. + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return GetResultValue(CCodec.DefaultCompressionLevel(typ)) + + @staticmethod + def minimum_compression_level(str compression not None): + """ + Returns the smallest valid value for the compression level + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return GetResultValue(CCodec.MinimumCompressionLevel(typ)) + + @staticmethod + def maximum_compression_level(str compression not None): + """ + Returns the largest valid value for the compression level + + Parameters + ---------- + compression : str + Type of compression codec, + refer to Codec docstring for a list of supported ones. + """ + cdef CCompressionType typ = _ensure_compression(compression) + return GetResultValue(CCodec.MaximumCompressionLevel(typ)) + + @property + def name(self): + """Returns the name of the codec""" + return frombytes(self.unwrap().name()) + + @property + def compression_level(self): + """Returns the compression level parameter of the codec""" + if self.name == 'snappy': + return None + return self.unwrap().compression_level() + + def compress(self, object buf, asbytes=False, memory_pool=None): + """ + Compress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or other object supporting buffer protocol + asbytes : bool, default False + Return result as Python bytes object, otherwise Buffer + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any + + Returns + ------- + compressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef: + shared_ptr[CBuffer] owned_buf + CBuffer* c_buf + PyObject* pyobj + ResizableBuffer out_buf + int64_t max_output_size + int64_t output_length + uint8_t* output_buffer = NULL + + owned_buf = as_c_buffer(buf) + c_buf = owned_buf.get() + + max_output_size = self.wrapped.get().MaxCompressedLen( + c_buf.size(), c_buf.data() + ) + + if asbytes: + pyobj = PyBytes_FromStringAndSizeNative(NULL, max_output_size) + output_buffer = cp.PyBytes_AS_STRING( pyobj) + else: + out_buf = allocate_buffer( + max_output_size, memory_pool=memory_pool, resizable=True + ) + output_buffer = out_buf.buffer.get().mutable_data() + + with nogil: + output_length = GetResultValue( + self.unwrap().Compress( + c_buf.size(), + c_buf.data(), + max_output_size, + output_buffer + ) + ) + + if asbytes: + cp._PyBytes_Resize(&pyobj, output_length) + return PyObject_to_object(pyobj) + else: + out_buf.resize(output_length) + return out_buf + + def decompress(self, object buf, decompressed_size=None, asbytes=False, + memory_pool=None): + """ + Decompress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or memoryview-compatible object + decompressed_size : int, default None + Size of the decompressed result + asbytes : boolean, default False + Return result as Python bytes object, otherwise Buffer + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any. + + Returns + ------- + uncompressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef: + shared_ptr[CBuffer] owned_buf + CBuffer* c_buf + Buffer out_buf + int64_t output_size + uint8_t* output_buffer = NULL + + owned_buf = as_c_buffer(buf) + c_buf = owned_buf.get() + + if decompressed_size is None: + raise ValueError( + "Must pass decompressed_size" + ) + + output_size = decompressed_size + + if asbytes: + pybuf = cp.PyBytes_FromStringAndSize(NULL, output_size) + output_buffer = cp.PyBytes_AS_STRING(pybuf) + else: + out_buf = allocate_buffer(output_size, memory_pool=memory_pool) + output_buffer = out_buf.buffer.get().mutable_data() + + with nogil: + GetResultValue( + self.unwrap().Decompress( + c_buf.size(), + c_buf.data(), + output_size, + output_buffer + ) + ) + + return pybuf if asbytes else out_buf + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"name={self.name} " + f"compression_level={self.compression_level}>") + + +def compress(object buf, codec='lz4', asbytes=False, memory_pool=None): + """ + Compress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or other object supporting buffer protocol + codec : str, default 'lz4' + Compression codec. + Supported types: {'brotli, 'gzip', 'lz4', 'lz4_raw', 'snappy', 'zstd'} + asbytes : bool, default False + Return result as Python bytes object, otherwise Buffer. + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any. + + Returns + ------- + compressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef Codec coder = Codec(codec) + return coder.compress(buf, asbytes=asbytes, memory_pool=memory_pool) + + +def decompress(object buf, decompressed_size=None, codec='lz4', + asbytes=False, memory_pool=None): + """ + Decompress data from buffer-like object. + + Parameters + ---------- + buf : pyarrow.Buffer, bytes, or memoryview-compatible object + Input object to decompress data from. + decompressed_size : int, default None + Size of the decompressed result + codec : str, default 'lz4' + Compression codec. + Supported types: {'brotli, 'gzip', 'lz4', 'lz4_raw', 'snappy', 'zstd'} + asbytes : bool, default False + Return result as Python bytes object, otherwise Buffer. + memory_pool : MemoryPool, default None + Memory pool to use for buffer allocations, if any. + + Returns + ------- + uncompressed : pyarrow.Buffer or bytes (if asbytes=True) + """ + cdef Codec decoder = Codec(codec) + return decoder.decompress(buf, asbytes=asbytes, memory_pool=memory_pool, + decompressed_size=decompressed_size) + + +def input_stream(source, compression='detect', buffer_size=None): + """ + Create an Arrow input stream. + + Parameters + ---------- + source : str, Path, buffer, or file-like object + The source to open for reading. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly decompression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. + Otherwise, a well-known algorithm name must be supplied (e.g. "gzip"). + buffer_size : int, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary read buffer. + + Examples + -------- + Create a readable BufferReader (NativeFile) from a Buffer or a memoryview object: + + >>> import pyarrow as pa + >>> buf = memoryview(b"some data") + >>> with pa.input_stream(buf) as stream: + ... stream.read(4) + ... + b'some' + + Create a readable OSFile (NativeFile) from a string or file path: + + >>> import gzip + >>> with gzip.open('example.gz', 'wb') as f: + ... f.write(b'some data') + ... + 9 + >>> with pa.input_stream('example.gz') as stream: + ... stream.read() + ... + b'some data' + + Create a readable PythonFile (NativeFile) from a a Python file object: + + >>> with open('example.txt', mode='w') as f: + ... f.write('some text') + ... + 9 + >>> with pa.input_stream('example.txt') as stream: + ... stream.read(6) + ... + b'some t' + """ + cdef NativeFile stream + + try: + source_path = _stringify_path(source) + except TypeError: + source_path = None + + if isinstance(source, NativeFile): + stream = source + elif source_path is not None: + stream = OSFile(source_path, 'r') + elif isinstance(source, (Buffer, memoryview)): + stream = BufferReader(as_buffer(source)) + elif (hasattr(source, 'read') and + hasattr(source, 'close') and + hasattr(source, 'closed')): + stream = PythonFile(source, 'r') + else: + raise TypeError("pa.input_stream() called with instance of '{}'" + .format(source.__class__)) + + if compression == 'detect': + # detect for OSFile too + compression = _detect_compression(source_path) + + if buffer_size is not None and buffer_size != 0: + stream = BufferedInputStream(stream, buffer_size) + + if compression is not None: + stream = CompressedInputStream(stream, compression) + + return stream + + +def output_stream(source, compression='detect', buffer_size=None): + """ + Create an Arrow output stream. + + Parameters + ---------- + source : str, Path, buffer, file-like object + The source to open for writing. + compression : str optional, default 'detect' + The compression algorithm to use for on-the-fly compression. + If "detect" and source is a file path, then compression will be + chosen based on the file extension. + If None, no compression will be applied. + Otherwise, a well-known algorithm name must be supplied (e.g. "gzip"). + buffer_size : int, default None + If None or 0, no buffering will happen. Otherwise the size of the + temporary write buffer. + + Examples + -------- + Create a writable NativeFile from a pyarrow Buffer: + + >>> import pyarrow as pa + >>> data = b"buffer data" + >>> empty_obj = bytearray(11) + >>> buf = pa.py_buffer(empty_obj) + >>> with pa.output_stream(buf) as stream: + ... stream.write(data) + ... + 11 + >>> with pa.input_stream(buf) as stream: + ... stream.read(6) + ... + b'buffer' + + or from a memoryview object: + + >>> buf = memoryview(empty_obj) + >>> with pa.output_stream(buf) as stream: + ... stream.write(data) + ... + 11 + >>> with pa.input_stream(buf) as stream: + ... stream.read() + ... + b'buffer data' + + Create a writable NativeFile from a string or file path: + + >>> with pa.output_stream('example_second.txt') as stream: + ... stream.write(b'Write some data') + ... + 15 + >>> with pa.input_stream('example_second.txt') as stream: + ... stream.read() + ... + b'Write some data' + """ + cdef NativeFile stream + + try: + source_path = _stringify_path(source) + except TypeError: + source_path = None + + if isinstance(source, NativeFile): + stream = source + elif source_path is not None: + stream = OSFile(source_path, 'w') + elif isinstance(source, (Buffer, memoryview)): + stream = FixedSizeBufferWriter(as_buffer(source)) + elif (hasattr(source, 'write') and + hasattr(source, 'close') and + hasattr(source, 'closed')): + stream = PythonFile(source, 'w') + else: + raise TypeError("pa.output_stream() called with instance of '{}'" + .format(source.__class__)) + + if compression == 'detect': + compression = _detect_compression(source_path) + + if buffer_size is not None and buffer_size != 0: + stream = BufferedOutputStream(stream, buffer_size) + + if compression is not None: + stream = CompressedOutputStream(stream, compression) + + return stream diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/ipc.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/ipc.py new file mode 100644 index 0000000000000000000000000000000000000000..523196e1e33894871319462cdd6c72bd85830cf0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/ipc.py @@ -0,0 +1,285 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Arrow file and stream reader/writer classes, and other messaging tools + +import os + +import pyarrow as pa + +from pyarrow.lib import (IpcReadOptions, IpcWriteOptions, ReadStats, WriteStats, # noqa + Message, MessageReader, + RecordBatchReader, _ReadPandasMixin, + MetadataVersion, + read_message, read_record_batch, read_schema, + read_tensor, write_tensor, + get_record_batch_size, get_tensor_size) +import pyarrow.lib as lib + + +class RecordBatchStreamReader(lib._RecordBatchStreamReader): + """ + Reader for the Arrow streaming binary format. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + If you want to use memory map use MemoryMappedFile as source. + options : pyarrow.ipc.IpcReadOptions + Options for IPC deserialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + """ + + def __init__(self, source, *, options=None, memory_pool=None): + options = _ensure_default_ipc_read_options(options) + self._open(source, options=options, memory_pool=memory_pool) + + +_ipc_writer_class_doc = """\ +Parameters +---------- +sink : str, pyarrow.NativeFile, or file-like Python object + Either a file path, or a writable file object. +schema : pyarrow.Schema + The Arrow schema for data to be written to the file. +use_legacy_format : bool, default None + Deprecated in favor of setting options. Cannot be provided with + options. + + If None, False will be used unless this default is overridden by + setting the environment variable ARROW_PRE_0_15_IPC_FORMAT=1 +options : pyarrow.ipc.IpcWriteOptions + Options for IPC serialization. + + If None, default values will be used: the legacy format will not + be used unless overridden by setting the environment variable + ARROW_PRE_0_15_IPC_FORMAT=1, and the V5 metadata version will be + used unless overridden by setting the environment variable + ARROW_PRE_1_0_METADATA_VERSION=1.""" + + +class RecordBatchStreamWriter(lib._RecordBatchStreamWriter): + __doc__ = """Writer for the Arrow streaming binary format + +{}""".format(_ipc_writer_class_doc) + + def __init__(self, sink, schema, *, use_legacy_format=None, options=None): + options = _get_legacy_format_default(use_legacy_format, options) + self._open(sink, schema, options=options) + + +class RecordBatchFileReader(lib._RecordBatchFileReader): + """ + Class for reading Arrow record batch data from the Arrow binary file format + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + If you want to use memory map use MemoryMappedFile as source. + footer_offset : int, default None + If the file is embedded in some larger file, this is the byte offset to + the very end of the file data + options : pyarrow.ipc.IpcReadOptions + Options for IPC serialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + """ + + def __init__(self, source, footer_offset=None, *, options=None, + memory_pool=None): + options = _ensure_default_ipc_read_options(options) + self._open(source, footer_offset=footer_offset, + options=options, memory_pool=memory_pool) + + +class RecordBatchFileWriter(lib._RecordBatchFileWriter): + + __doc__ = """Writer to create the Arrow binary file format + +{}""".format(_ipc_writer_class_doc) + + def __init__(self, sink, schema, *, use_legacy_format=None, options=None): + options = _get_legacy_format_default(use_legacy_format, options) + self._open(sink, schema, options=options) + + +def _get_legacy_format_default(use_legacy_format, options): + if use_legacy_format is not None and options is not None: + raise ValueError( + "Can provide at most one of options and use_legacy_format") + elif options: + if not isinstance(options, IpcWriteOptions): + raise TypeError("expected IpcWriteOptions, got {}" + .format(type(options))) + return options + + metadata_version = MetadataVersion.V5 + if use_legacy_format is None: + use_legacy_format = \ + bool(int(os.environ.get('ARROW_PRE_0_15_IPC_FORMAT', '0'))) + if bool(int(os.environ.get('ARROW_PRE_1_0_METADATA_VERSION', '0'))): + metadata_version = MetadataVersion.V4 + return IpcWriteOptions(use_legacy_format=use_legacy_format, + metadata_version=metadata_version) + + +def _ensure_default_ipc_read_options(options): + if options and not isinstance(options, IpcReadOptions): + raise TypeError( + "expected IpcReadOptions, got {}".format(type(options)) + ) + return options or IpcReadOptions() + + +def new_stream(sink, schema, *, use_legacy_format=None, options=None): + return RecordBatchStreamWriter(sink, schema, + use_legacy_format=use_legacy_format, + options=options) + + +new_stream.__doc__ = """\ +Create an Arrow columnar IPC stream writer instance + +{} + +Returns +------- +writer : RecordBatchStreamWriter + A writer for the given sink +""".format(_ipc_writer_class_doc) + + +def open_stream(source, *, options=None, memory_pool=None): + """ + Create reader for Arrow streaming format. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + options : pyarrow.ipc.IpcReadOptions + Options for IPC serialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + + Returns + ------- + reader : RecordBatchStreamReader + A reader for the given source + """ + return RecordBatchStreamReader(source, options=options, + memory_pool=memory_pool) + + +def new_file(sink, schema, *, use_legacy_format=None, options=None): + return RecordBatchFileWriter(sink, schema, + use_legacy_format=use_legacy_format, + options=options) + + +new_file.__doc__ = """\ +Create an Arrow columnar IPC file writer instance + +{} + +Returns +------- +writer : RecordBatchFileWriter + A writer for the given sink +""".format(_ipc_writer_class_doc) + + +def open_file(source, footer_offset=None, *, options=None, memory_pool=None): + """ + Create reader for Arrow file format. + + Parameters + ---------- + source : bytes/buffer-like, pyarrow.NativeFile, or file-like Python object + Either an in-memory buffer, or a readable file object. + footer_offset : int, default None + If the file is embedded in some larger file, this is the byte offset to + the very end of the file data. + options : pyarrow.ipc.IpcReadOptions + Options for IPC serialization. + If None, default values will be used. + memory_pool : MemoryPool, default None + If None, default memory pool is used. + + Returns + ------- + reader : RecordBatchFileReader + A reader for the given source + """ + return RecordBatchFileReader( + source, footer_offset=footer_offset, + options=options, memory_pool=memory_pool) + + +def serialize_pandas(df, *, nthreads=None, preserve_index=None): + """ + Serialize a pandas DataFrame into a buffer protocol compatible object. + + Parameters + ---------- + df : pandas.DataFrame + nthreads : int, default None + Number of threads to use for conversion to Arrow, default all CPUs. + preserve_index : bool, default None + The default of None will store the index as a column, except for + RangeIndex which is stored as metadata only. If True, always + preserve the pandas index data as a column. If False, no index + information is saved and the result will have a default RangeIndex. + + Returns + ------- + buf : buffer + An object compatible with the buffer protocol. + """ + batch = pa.RecordBatch.from_pandas(df, nthreads=nthreads, + preserve_index=preserve_index) + sink = pa.BufferOutputStream() + with pa.RecordBatchStreamWriter(sink, batch.schema) as writer: + writer.write_batch(batch) + return sink.getvalue() + + +def deserialize_pandas(buf, *, use_threads=True): + """Deserialize a buffer protocol compatible object into a pandas DataFrame. + + Parameters + ---------- + buf : buffer + An object compatible with the buffer protocol. + use_threads : bool, default True + Whether to parallelize the conversion using multiple threads. + + Returns + ------- + df : pandas.DataFrame + The buffer deserialized as pandas DataFrame + """ + buffer_reader = pa.BufferReader(buf) + with pa.RecordBatchStreamReader(buffer_reader) as reader: + table = reader.read_all() + return table.to_pandas(use_threads=use_threads) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/json.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/json.py new file mode 100644 index 0000000000000000000000000000000000000000..a864f5d998a443e949d4ee24ae2df628cf81b8c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/json.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from pyarrow._json import ReadOptions, ParseOptions, read_json # noqa diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/lib.pxd b/env-llmeval/lib/python3.10/site-packages/pyarrow/lib.pxd new file mode 100644 index 0000000000000000000000000000000000000000..58ec34addbc0a8a1399ca9d576b37dc302fcd38b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/lib.pxd @@ -0,0 +1,679 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cpython cimport PyObject +from libcpp cimport nullptr, bool as c_bool +from libcpp.cast cimport dynamic_cast +from libcpp.memory cimport dynamic_pointer_cast +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * + +# Will be available in Cython 3, not backported +# ref: https://github.com/cython/cython/issues/3293#issuecomment-1223058101 +cdef extern from "" namespace "std" nogil: + cdef cppclass nullopt_t: + nullopt_t() + + cdef nullopt_t nullopt + + cdef cppclass optional[T]: + ctypedef T value_type + optional() + optional(nullopt_t) + optional(optional&) except + + optional(T&) except + + c_bool has_value() + T& value() + T& value_or[U](U& default_value) + void swap(optional&) + void reset() + T& emplace(...) + T& operator*() + # T* operator->() # Not Supported + optional& operator=(optional&) + optional& operator=[U](U&) + c_bool operator bool() + c_bool operator!() + c_bool operator==[U](optional&, U&) + c_bool operator!=[U](optional&, U&) + c_bool operator<[U](optional&, U&) + c_bool operator>[U](optional&, U&) + c_bool operator<=[U](optional&, U&) + c_bool operator>=[U](optional&, U&) + + optional[T] make_optional[T](...) except + + +cdef extern from "Python.h": + int PySlice_Check(object) + + +cdef int check_status(const CStatus& status) except -1 nogil +cdef object convert_status(const CStatus& status) + + +cdef class _Weakrefable: + cdef object __weakref__ + + +cdef class IpcWriteOptions(_Weakrefable): + cdef: + CIpcWriteOptions c_options + + +cdef class IpcReadOptions(_Weakrefable): + cdef: + CIpcReadOptions c_options + + +cdef class Message(_Weakrefable): + cdef: + unique_ptr[CMessage] message + + +cdef class MemoryPool(_Weakrefable): + cdef: + CMemoryPool* pool + + cdef void init(self, CMemoryPool* pool) + + +cdef CMemoryPool* maybe_unbox_memory_pool(MemoryPool memory_pool) + + +cdef object box_memory_pool(CMemoryPool* pool) + + +cdef class DataType(_Weakrefable): + cdef: + shared_ptr[CDataType] sp_type + CDataType* type + bytes pep3118_format + + cdef void init(self, const shared_ptr[CDataType]& type) except * + cpdef Field field(self, i) + + +cdef class ListType(DataType): + cdef: + const CListType* list_type + + +cdef class LargeListType(DataType): + cdef: + const CLargeListType* list_type + + +cdef class MapType(DataType): + cdef: + const CMapType* map_type + + +cdef class FixedSizeListType(DataType): + cdef: + const CFixedSizeListType* list_type + + +cdef class StructType(DataType): + cdef: + const CStructType* struct_type + + cdef Field field_by_name(self, name) + + +cdef class DictionaryMemo(_Weakrefable): + cdef: + # Even though the CDictionaryMemo instance is private, we allocate + # it on the heap so as to avoid C++ ABI issues with Python wheels. + shared_ptr[CDictionaryMemo] sp_memo + CDictionaryMemo* memo + + +cdef class DictionaryType(DataType): + cdef: + const CDictionaryType* dict_type + + +cdef class TimestampType(DataType): + cdef: + const CTimestampType* ts_type + + +cdef class Time32Type(DataType): + cdef: + const CTime32Type* time_type + + +cdef class Time64Type(DataType): + cdef: + const CTime64Type* time_type + + +cdef class DurationType(DataType): + cdef: + const CDurationType* duration_type + + +cdef class FixedSizeBinaryType(DataType): + cdef: + const CFixedSizeBinaryType* fixed_size_binary_type + + +cdef class Decimal128Type(FixedSizeBinaryType): + cdef: + const CDecimal128Type* decimal128_type + + +cdef class Decimal256Type(FixedSizeBinaryType): + cdef: + const CDecimal256Type* decimal256_type + + +cdef class RunEndEncodedType(DataType): + cdef: + const CRunEndEncodedType* run_end_encoded_type + + +cdef class BaseExtensionType(DataType): + cdef: + const CExtensionType* ext_type + + +cdef class ExtensionType(BaseExtensionType): + cdef: + const CPyExtensionType* cpy_ext_type + + +cdef class FixedShapeTensorType(BaseExtensionType): + cdef: + const CFixedShapeTensorType* tensor_ext_type + + +cdef class PyExtensionType(ExtensionType): + pass + + +cdef class _Metadata(_Weakrefable): + # required because KeyValueMetadata also extends collections.abc.Mapping + # and the first parent class must be an extension type + pass + + +cdef class KeyValueMetadata(_Metadata): + cdef: + shared_ptr[const CKeyValueMetadata] wrapped + const CKeyValueMetadata* metadata + + cdef void init(self, const shared_ptr[const CKeyValueMetadata]& wrapped) + + @staticmethod + cdef wrap(const shared_ptr[const CKeyValueMetadata]& sp) + cdef inline shared_ptr[const CKeyValueMetadata] unwrap(self) nogil + + +cdef class Field(_Weakrefable): + cdef: + shared_ptr[CField] sp_field + CField* field + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CField]& field) + + +cdef class Schema(_Weakrefable): + cdef: + shared_ptr[CSchema] sp_schema + CSchema* schema + + cdef void init(self, const vector[shared_ptr[CField]]& fields) + cdef void init_schema(self, const shared_ptr[CSchema]& schema) + + +cdef class Scalar(_Weakrefable): + cdef: + shared_ptr[CScalar] wrapped + + cdef void init(self, const shared_ptr[CScalar]& wrapped) + + @staticmethod + cdef wrap(const shared_ptr[CScalar]& wrapped) + + cdef inline shared_ptr[CScalar] unwrap(self) nogil + + +cdef class _PandasConvertible(_Weakrefable): + pass + + +cdef class Array(_PandasConvertible): + cdef: + shared_ptr[CArray] sp_array + CArray* ap + + cdef readonly: + DataType type + # To allow Table to propagate metadata to pandas.Series + object _name + + cdef void init(self, const shared_ptr[CArray]& sp_array) except * + cdef getitem(self, int64_t i) + cdef int64_t length(self) + + +cdef class Tensor(_Weakrefable): + cdef: + shared_ptr[CTensor] sp_tensor + CTensor* tp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CTensor]& sp_tensor) + + +cdef class SparseCSRMatrix(_Weakrefable): + cdef: + shared_ptr[CSparseCSRMatrix] sp_sparse_tensor + CSparseCSRMatrix* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor) + + +cdef class SparseCSCMatrix(_Weakrefable): + cdef: + shared_ptr[CSparseCSCMatrix] sp_sparse_tensor + CSparseCSCMatrix* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor) + + +cdef class SparseCOOTensor(_Weakrefable): + cdef: + shared_ptr[CSparseCOOTensor] sp_sparse_tensor + CSparseCOOTensor* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor) + + +cdef class SparseCSFTensor(_Weakrefable): + cdef: + shared_ptr[CSparseCSFTensor] sp_sparse_tensor + CSparseCSFTensor* stp + + cdef readonly: + DataType type + + cdef void init(self, const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor) + + +cdef class NullArray(Array): + pass + + +cdef class BooleanArray(Array): + pass + + +cdef class NumericArray(Array): + pass + + +cdef class IntegerArray(NumericArray): + pass + + +cdef class FloatingPointArray(NumericArray): + pass + + +cdef class Int8Array(IntegerArray): + pass + + +cdef class UInt8Array(IntegerArray): + pass + + +cdef class Int16Array(IntegerArray): + pass + + +cdef class UInt16Array(IntegerArray): + pass + + +cdef class Int32Array(IntegerArray): + pass + + +cdef class UInt32Array(IntegerArray): + pass + + +cdef class Int64Array(IntegerArray): + pass + + +cdef class UInt64Array(IntegerArray): + pass + + +cdef class HalfFloatArray(FloatingPointArray): + pass + + +cdef class FloatArray(FloatingPointArray): + pass + + +cdef class DoubleArray(FloatingPointArray): + pass + + +cdef class FixedSizeBinaryArray(Array): + pass + + +cdef class Decimal128Array(FixedSizeBinaryArray): + pass + + +cdef class Decimal256Array(FixedSizeBinaryArray): + pass + + +cdef class StructArray(Array): + pass + + +cdef class BaseListArray(Array): + pass + + +cdef class ListArray(BaseListArray): + pass + + +cdef class LargeListArray(BaseListArray): + pass + + +cdef class MapArray(ListArray): + pass + + +cdef class FixedSizeListArray(BaseListArray): + pass + + +cdef class UnionArray(Array): + pass + + +cdef class StringArray(Array): + pass + + +cdef class BinaryArray(Array): + pass + + +cdef class DictionaryArray(Array): + cdef: + object _indices, _dictionary + + +cdef class ExtensionArray(Array): + pass + + +cdef class MonthDayNanoIntervalArray(Array): + pass + + +cdef wrap_array_output(PyObject* output) +cdef wrap_datum(const CDatum& datum) + + +cdef class ChunkedArray(_PandasConvertible): + cdef: + shared_ptr[CChunkedArray] sp_chunked_array + CChunkedArray* chunked_array + + cdef readonly: + # To allow Table to propagate metadata to pandas.Series + object _name + + cdef void init(self, const shared_ptr[CChunkedArray]& chunked_array) + cdef getitem(self, int64_t i) + + +cdef class _Tabular(_PandasConvertible): + pass + + +cdef class Table(_Tabular): + cdef: + shared_ptr[CTable] sp_table + CTable* table + + cdef void init(self, const shared_ptr[CTable]& table) + + +cdef class RecordBatch(_Tabular): + cdef: + shared_ptr[CRecordBatch] sp_batch + CRecordBatch* batch + Schema _schema + + cdef void init(self, const shared_ptr[CRecordBatch]& table) + + +cdef class Buffer(_Weakrefable): + cdef: + shared_ptr[CBuffer] buffer + Py_ssize_t shape[1] + Py_ssize_t strides[1] + + cdef void init(self, const shared_ptr[CBuffer]& buffer) + cdef getitem(self, int64_t i) + + +cdef class ResizableBuffer(Buffer): + + cdef void init_rz(self, const shared_ptr[CResizableBuffer]& buffer) + + +cdef class NativeFile(_Weakrefable): + cdef: + shared_ptr[CInputStream] input_stream + shared_ptr[CRandomAccessFile] random_access + shared_ptr[COutputStream] output_stream + bint is_readable + bint is_writable + bint is_seekable + bint _is_appending + bint own_file + + # By implementing these "virtual" functions (all functions in Cython + # extension classes are technically virtual in the C++ sense) we can expose + # the arrow::io abstract file interfaces to other components throughout the + # suite of Arrow C++ libraries + cdef set_random_access_file(self, shared_ptr[CRandomAccessFile] handle) + cdef set_input_stream(self, shared_ptr[CInputStream] handle) + cdef set_output_stream(self, shared_ptr[COutputStream] handle) + + cdef shared_ptr[CRandomAccessFile] get_random_access_file(self) except * + cdef shared_ptr[CInputStream] get_input_stream(self) except * + cdef shared_ptr[COutputStream] get_output_stream(self) except * + + +cdef class BufferedInputStream(NativeFile): + pass + + +cdef class BufferedOutputStream(NativeFile): + pass + + +cdef class CompressedInputStream(NativeFile): + pass + + +cdef class CompressedOutputStream(NativeFile): + pass + + +cdef class _CRecordBatchWriter(_Weakrefable): + cdef: + SharedPtrNoGIL[CRecordBatchWriter] writer + + +cdef class RecordBatchReader(_Weakrefable): + cdef: + SharedPtrNoGIL[CRecordBatchReader] reader + + +cdef class CacheOptions(_Weakrefable): + cdef: + CCacheOptions wrapped + + cdef void init(self, CCacheOptions options) + + cdef inline CCacheOptions unwrap(self) + + @staticmethod + cdef wrap(const CCacheOptions options) + + +cdef class Codec(_Weakrefable): + cdef: + shared_ptr[CCodec] wrapped + + cdef inline CCodec* unwrap(self) nogil + + +# This class is only used internally for now +cdef class StopToken: + cdef: + CStopToken stop_token + + cdef void init(self, CStopToken stop_token) + + +cdef get_input_stream(object source, c_bool use_memory_map, + shared_ptr[CInputStream]* reader) +cdef get_reader(object source, c_bool use_memory_map, + shared_ptr[CRandomAccessFile]* reader) +cdef get_writer(object source, shared_ptr[COutputStream]* writer) +cdef NativeFile get_native_file(object source, c_bool use_memory_map) + +cdef shared_ptr[CInputStream] native_transcoding_input_stream( + shared_ptr[CInputStream] stream, src_encoding, + dest_encoding) except * + +cdef shared_ptr[function[StreamWrapFunc]] make_streamwrap_func( + src_encoding, dest_encoding) except * + +# Default is allow_none=False +cpdef DataType ensure_type(object type, bint allow_none=*) + +cdef timeunit_to_string(TimeUnit unit) +cdef TimeUnit string_to_timeunit(unit) except * + +# Exceptions may be raised when converting dict values, so need to +# check exception state on return +cdef shared_ptr[const CKeyValueMetadata] pyarrow_unwrap_metadata( + object meta) except * +cdef object pyarrow_wrap_metadata( + const shared_ptr[const CKeyValueMetadata]& meta) + +# +# Public Cython API for 3rd party code +# +# If you add functions to this list, please also update +# `cpp/src/arrow/python/pyarrow.{h, cc}` +# + +# Wrapping C++ -> Python + +cdef public object pyarrow_wrap_buffer(const shared_ptr[CBuffer]& buf) +cdef public object pyarrow_wrap_resizable_buffer( + const shared_ptr[CResizableBuffer]& buf) + +cdef public object pyarrow_wrap_data_type(const shared_ptr[CDataType]& type) +cdef public object pyarrow_wrap_field(const shared_ptr[CField]& field) +cdef public object pyarrow_wrap_schema(const shared_ptr[CSchema]& type) + +cdef public object pyarrow_wrap_scalar(const shared_ptr[CScalar]& sp_scalar) + +cdef public object pyarrow_wrap_array(const shared_ptr[CArray]& sp_array) +cdef public object pyarrow_wrap_chunked_array( + const shared_ptr[CChunkedArray]& sp_array) + +cdef public object pyarrow_wrap_sparse_coo_tensor( + const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor) +cdef public object pyarrow_wrap_sparse_csc_matrix( + const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor) +cdef public object pyarrow_wrap_sparse_csf_tensor( + const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor) +cdef public object pyarrow_wrap_sparse_csr_matrix( + const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor) +cdef public object pyarrow_wrap_tensor(const shared_ptr[CTensor]& sp_tensor) + +cdef public object pyarrow_wrap_batch(const shared_ptr[CRecordBatch]& cbatch) +cdef public object pyarrow_wrap_table(const shared_ptr[CTable]& ctable) + +# Unwrapping Python -> C++ + +cdef public shared_ptr[CBuffer] pyarrow_unwrap_buffer(object buffer) + +cdef public shared_ptr[CDataType] pyarrow_unwrap_data_type(object data_type) +cdef public shared_ptr[CField] pyarrow_unwrap_field(object field) +cdef public shared_ptr[CSchema] pyarrow_unwrap_schema(object schema) + +cdef public shared_ptr[CScalar] pyarrow_unwrap_scalar(object scalar) + +cdef public shared_ptr[CArray] pyarrow_unwrap_array(object array) +cdef public shared_ptr[CChunkedArray] pyarrow_unwrap_chunked_array( + object array) + +cdef public shared_ptr[CSparseCOOTensor] pyarrow_unwrap_sparse_coo_tensor( + object sparse_tensor) +cdef public shared_ptr[CSparseCSCMatrix] pyarrow_unwrap_sparse_csc_matrix( + object sparse_tensor) +cdef public shared_ptr[CSparseCSFTensor] pyarrow_unwrap_sparse_csf_tensor( + object sparse_tensor) +cdef public shared_ptr[CSparseCSRMatrix] pyarrow_unwrap_sparse_csr_matrix( + object sparse_tensor) +cdef public shared_ptr[CTensor] pyarrow_unwrap_tensor(object tensor) + +cdef public shared_ptr[CRecordBatch] pyarrow_unwrap_batch(object batch) +cdef public shared_ptr[CTable] pyarrow_unwrap_table(object table) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/lib.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/lib.pyx new file mode 100644 index 0000000000000000000000000000000000000000..29a0bed55949ce31da38b83dc881998257931d8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/lib.pyx @@ -0,0 +1,192 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile = False +# cython: nonecheck = True +# distutils: language = c++ + +import datetime +import decimal as _pydecimal +import numpy as np +import os +import sys + +from cython.operator cimport dereference as deref +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.includes.common cimport PyObject_to_object +cimport pyarrow.includes.libarrow_python as libarrow_python +cimport cpython as cp + +# Initialize NumPy C API +arrow_init_numpy() +# Initialize PyArrow C++ API +# (used from some of our C++ code, see e.g. ARROW-5260) +import_pyarrow() + + +MonthDayNano = NewMonthDayNanoTupleType() + + +def cpu_count(): + """ + Return the number of threads to use in parallel operations. + + The number of threads is determined at startup by inspecting the + ``OMP_NUM_THREADS`` and ``OMP_THREAD_LIMIT`` environment variables. + If neither is present, it will default to the number of hardware threads + on the system. It can be modified at runtime by calling + :func:`set_cpu_count()`. + + See Also + -------- + set_cpu_count : Modify the size of this pool. + io_thread_count : The analogous function for the I/O thread pool. + """ + return GetCpuThreadPoolCapacity() + + +def set_cpu_count(int count): + """ + Set the number of threads to use in parallel operations. + + Parameters + ---------- + count : int + The number of concurrent threads that should be used. + + See Also + -------- + cpu_count : Get the size of this pool. + set_io_thread_count : The analogous function for the I/O thread pool. + """ + if count < 1: + raise ValueError("CPU count must be strictly positive") + check_status(SetCpuThreadPoolCapacity(count)) + + +Type_NA = _Type_NA +Type_BOOL = _Type_BOOL +Type_UINT8 = _Type_UINT8 +Type_INT8 = _Type_INT8 +Type_UINT16 = _Type_UINT16 +Type_INT16 = _Type_INT16 +Type_UINT32 = _Type_UINT32 +Type_INT32 = _Type_INT32 +Type_UINT64 = _Type_UINT64 +Type_INT64 = _Type_INT64 +Type_HALF_FLOAT = _Type_HALF_FLOAT +Type_FLOAT = _Type_FLOAT +Type_DOUBLE = _Type_DOUBLE +Type_DECIMAL128 = _Type_DECIMAL128 +Type_DECIMAL256 = _Type_DECIMAL256 +Type_DATE32 = _Type_DATE32 +Type_DATE64 = _Type_DATE64 +Type_TIMESTAMP = _Type_TIMESTAMP +Type_TIME32 = _Type_TIME32 +Type_TIME64 = _Type_TIME64 +Type_DURATION = _Type_DURATION +Type_INTERVAL_MONTH_DAY_NANO = _Type_INTERVAL_MONTH_DAY_NANO +Type_BINARY = _Type_BINARY +Type_STRING = _Type_STRING +Type_LARGE_BINARY = _Type_LARGE_BINARY +Type_LARGE_STRING = _Type_LARGE_STRING +Type_FIXED_SIZE_BINARY = _Type_FIXED_SIZE_BINARY +Type_LIST = _Type_LIST +Type_LARGE_LIST = _Type_LARGE_LIST +Type_MAP = _Type_MAP +Type_FIXED_SIZE_LIST = _Type_FIXED_SIZE_LIST +Type_STRUCT = _Type_STRUCT +Type_SPARSE_UNION = _Type_SPARSE_UNION +Type_DENSE_UNION = _Type_DENSE_UNION +Type_DICTIONARY = _Type_DICTIONARY +Type_RUN_END_ENCODED = _Type_RUN_END_ENCODED + +UnionMode_SPARSE = _UnionMode_SPARSE +UnionMode_DENSE = _UnionMode_DENSE + +__pc = None +__pac = None + + +def _pc(): + global __pc + if __pc is None: + import pyarrow.compute as pc + __pc = pc + return __pc + + +def _pac(): + global __pac + if __pac is None: + import pyarrow.acero as pac + __pac = pac + return __pac + + +def _gdb_test_session(): + GdbTestSession() + + +# Assorted compatibility helpers +include "compat.pxi" + +# Exception types and Status handling +include "error.pxi" + +# Configuration information +include "config.pxi" + +# pandas API shim +include "pandas-shim.pxi" + +# Memory pools and allocation +include "memory.pxi" + +# DataType, Field, Schema +include "types.pxi" + +# Array scalar values +include "scalar.pxi" + +# Array types +include "array.pxi" + +# Builders +include "builder.pxi" + +# Column, Table, Record Batch +include "table.pxi" + +# Tensors +include "tensor.pxi" + +# DLPack +include "_dlpack.pxi" + +# File IO +include "io.pxi" + +# IPC / Messaging +include "ipc.pxi" + +# Micro-benchmark routines +include "benchmark.pxi" + +# Public API +include "public-api.pxi" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python_flight.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python_flight.so new file mode 100644 index 0000000000000000000000000000000000000000..e240089cdc6c16f8608843b1e0f32c1751ab91b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python_flight.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python_parquet_encryption.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python_parquet_encryption.so new file mode 100644 index 0000000000000000000000000000000000000000..c3941be84ca5929a7e5ee0861098e0164161fc07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python_parquet_encryption.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/memory.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/memory.pxi new file mode 100644 index 0000000000000000000000000000000000000000..1ddcb01ccb6ab2ca84786e6e60a5f4c4ffbfc5bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/memory.pxi @@ -0,0 +1,274 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: embedsignature = True + + +cdef class MemoryPool(_Weakrefable): + """ + Base class for memory allocation. + + Besides tracking its number of allocated bytes, a memory pool also + takes care of the required 64-byte alignment for Arrow data. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.*_memory_pool instead." + .format(self.__class__.__name__)) + + cdef void init(self, CMemoryPool* pool): + self.pool = pool + + def release_unused(self): + """ + Attempt to return to the OS any memory being held onto by the pool. + + This function should not be called except potentially for + benchmarking or debugging as it could be expensive and detrimental to + performance. + + This is best effort and may not have any effect on some memory pools + or in some situations (e.g. fragmentation). + """ + cdef CMemoryPool* pool = c_get_memory_pool() + with nogil: + pool.ReleaseUnused() + + def bytes_allocated(self): + """ + Return the number of bytes that are currently allocated from this + memory pool. + """ + return self.pool.bytes_allocated() + + def max_memory(self): + """ + Return the peak memory allocation in this memory pool. + This can be an approximate number in multi-threaded applications. + + None is returned if the pool implementation doesn't know how to + compute this number. + """ + ret = self.pool.max_memory() + return ret if ret >= 0 else None + + @property + def backend_name(self): + """ + The name of the backend used by this MemoryPool (e.g. "jemalloc"). + """ + return frombytes(self.pool.backend_name()) + + def __repr__(self): + name = f"pyarrow.{self.__class__.__name__}" + return (f"<{name} " + f"backend_name={self.backend_name} " + f"bytes_allocated={self.bytes_allocated()} " + f"max_memory={self.max_memory()}>") + +cdef CMemoryPool* maybe_unbox_memory_pool(MemoryPool memory_pool): + if memory_pool is None: + return c_get_memory_pool() + else: + return memory_pool.pool + + +cdef api object box_memory_pool(CMemoryPool *c_pool): + cdef MemoryPool pool = MemoryPool.__new__(MemoryPool) + pool.init(c_pool) + return pool + + +cdef class LoggingMemoryPool(MemoryPool): + cdef: + unique_ptr[CLoggingMemoryPool] logging_pool + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.logging_memory_pool instead." + .format(self.__class__.__name__)) + + +cdef class ProxyMemoryPool(MemoryPool): + """ + Memory pool implementation that tracks the number of bytes and + maximum memory allocated through its direct calls, while redirecting + to another memory pool. + """ + cdef: + unique_ptr[CProxyMemoryPool] proxy_pool + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.proxy_memory_pool instead." + .format(self.__class__.__name__)) + + +def default_memory_pool(): + """ + Return the process-global memory pool. + + Examples + -------- + >>> default_memory_pool() + + """ + cdef: + MemoryPool pool = MemoryPool.__new__(MemoryPool) + pool.init(c_get_memory_pool()) + return pool + + +def proxy_memory_pool(MemoryPool parent): + """ + Create and return a MemoryPool instance that redirects to the + *parent*, but with separate allocation statistics. + + Parameters + ---------- + parent : MemoryPool + The real memory pool that should be used for allocations. + """ + cdef ProxyMemoryPool out = ProxyMemoryPool.__new__(ProxyMemoryPool) + out.proxy_pool.reset(new CProxyMemoryPool(parent.pool)) + out.init(out.proxy_pool.get()) + return out + + +def logging_memory_pool(MemoryPool parent): + """ + Create and return a MemoryPool instance that redirects to the + *parent*, but also dumps allocation logs on stderr. + + Parameters + ---------- + parent : MemoryPool + The real memory pool that should be used for allocations. + """ + cdef LoggingMemoryPool out = LoggingMemoryPool.__new__( + LoggingMemoryPool, parent) + out.logging_pool.reset(new CLoggingMemoryPool(parent.pool)) + out.init(out.logging_pool.get()) + return out + + +def system_memory_pool(): + """ + Return a memory pool based on the C malloc heap. + """ + cdef: + MemoryPool pool = MemoryPool.__new__(MemoryPool) + pool.init(c_system_memory_pool()) + return pool + + +def jemalloc_memory_pool(): + """ + Return a memory pool based on the jemalloc heap. + + NotImplementedError is raised if jemalloc support is not enabled. + """ + cdef: + CMemoryPool* c_pool + MemoryPool pool = MemoryPool.__new__(MemoryPool) + check_status(c_jemalloc_memory_pool(&c_pool)) + pool.init(c_pool) + return pool + + +def mimalloc_memory_pool(): + """ + Return a memory pool based on the mimalloc heap. + + NotImplementedError is raised if mimalloc support is not enabled. + """ + cdef: + CMemoryPool* c_pool + MemoryPool pool = MemoryPool.__new__(MemoryPool) + check_status(c_mimalloc_memory_pool(&c_pool)) + pool.init(c_pool) + return pool + + +def set_memory_pool(MemoryPool pool): + """ + Set the default memory pool. + + Parameters + ---------- + pool : MemoryPool + The memory pool that should be used by default. + """ + c_set_default_memory_pool(pool.pool) + + +cdef MemoryPool _default_memory_pool = default_memory_pool() +cdef LoggingMemoryPool _logging_memory_pool = logging_memory_pool( + _default_memory_pool) + + +def log_memory_allocations(enable=True): + """ + Enable or disable memory allocator logging for debugging purposes + + Parameters + ---------- + enable : bool, default True + Pass False to disable logging + """ + if enable: + set_memory_pool(_logging_memory_pool) + else: + set_memory_pool(_default_memory_pool) + + +def total_allocated_bytes(): + """ + Return the currently allocated bytes from the default memory pool. + Other memory pools may not be accounted for. + """ + cdef CMemoryPool* pool = c_get_memory_pool() + return pool.bytes_allocated() + + +def jemalloc_set_decay_ms(decay_ms): + """ + Set arenas.dirty_decay_ms and arenas.muzzy_decay_ms to indicated number of + milliseconds. A value of 0 (the default) results in dirty / muzzy memory + pages being released right away to the OS, while a higher value will result + in a time-based decay. See the jemalloc docs for more information + + It's best to set this at the start of your application. + + Parameters + ---------- + decay_ms : int + Number of milliseconds to set for jemalloc decay conf parameters. Note + that this change will only affect future memory arenas + """ + check_status(c_jemalloc_set_decay_ms(decay_ms)) + + +def supported_memory_backends(): + """ + Return a list of available memory pool backends + """ + cdef vector[c_string] backends = c_supported_memory_backends() + return [backend.decode() for backend in backends] diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/orc.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/orc.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5feafb3363eb35e642e5c98db07ba1908053d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/orc.py @@ -0,0 +1,384 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from numbers import Integral +import warnings + +from pyarrow.lib import Table +import pyarrow._orc as _orc +from pyarrow.fs import _resolve_filesystem_and_path + + +class ORCFile: + """ + Reader interface for a single ORC file + + Parameters + ---------- + source : str or pyarrow.NativeFile + Readable source. For passing Python file objects or byte buffers, + see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader. + """ + + def __init__(self, source): + self.reader = _orc.ORCReader() + self.reader.open(source) + + @property + def metadata(self): + """The file metadata, as an arrow KeyValueMetadata""" + return self.reader.metadata() + + @property + def schema(self): + """The file schema, as an arrow schema""" + return self.reader.schema() + + @property + def nrows(self): + """The number of rows in the file""" + return self.reader.nrows() + + @property + def nstripes(self): + """The number of stripes in the file""" + return self.reader.nstripes() + + @property + def file_version(self): + """Format version of the ORC file, must be 0.11 or 0.12""" + return self.reader.file_version() + + @property + def software_version(self): + """Software instance and version that wrote this file""" + return self.reader.software_version() + + @property + def compression(self): + """Compression codec of the file""" + return self.reader.compression() + + @property + def compression_size(self): + """Number of bytes to buffer for the compression codec in the file""" + return self.reader.compression_size() + + @property + def writer(self): + """Name of the writer that wrote this file. + If the writer is unknown then its Writer ID + (a number) is returned""" + return self.reader.writer() + + @property + def writer_version(self): + """Version of the writer""" + return self.reader.writer_version() + + @property + def row_index_stride(self): + """Number of rows per an entry in the row index or 0 + if there is no row index""" + return self.reader.row_index_stride() + + @property + def nstripe_statistics(self): + """Number of stripe statistics""" + return self.reader.nstripe_statistics() + + @property + def content_length(self): + """Length of the data stripes in the file in bytes""" + return self.reader.content_length() + + @property + def stripe_statistics_length(self): + """The number of compressed bytes in the file stripe statistics""" + return self.reader.stripe_statistics_length() + + @property + def file_footer_length(self): + """The number of compressed bytes in the file footer""" + return self.reader.file_footer_length() + + @property + def file_postscript_length(self): + """The number of bytes in the file postscript""" + return self.reader.file_postscript_length() + + @property + def file_length(self): + """The number of bytes in the file""" + return self.reader.file_length() + + def _select_names(self, columns=None): + if columns is None: + return None + + schema = self.schema + names = [] + for col in columns: + if isinstance(col, Integral): + col = int(col) + if 0 <= col < len(schema): + col = schema[col].name + names.append(col) + else: + raise ValueError("Column indices must be in 0 <= ind < %d," + " got %d" % (len(schema), col)) + else: + return columns + + return names + + def read_stripe(self, n, columns=None): + """Read a single stripe from the file. + + Parameters + ---------- + n : int + The stripe index + columns : list + If not None, only these columns will be read from the stripe. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e' + + Returns + ------- + pyarrow.RecordBatch + Content of the stripe as a RecordBatch. + """ + columns = self._select_names(columns) + return self.reader.read_stripe(n, columns=columns) + + def read(self, columns=None): + """Read the whole file. + + Parameters + ---------- + columns : list + If not None, only these columns will be read from the file. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. Output always follows the + ordering of the file and not the `columns` list. + + Returns + ------- + pyarrow.Table + Content of the file as a Table. + """ + columns = self._select_names(columns) + return self.reader.read(columns=columns) + + +_orc_writer_args_docs = """file_version : {"0.11", "0.12"}, default "0.12" + Determine which ORC file version to use. + `Hive 0.11 / ORC v0 `_ + is the older version + while `Hive 0.12 / ORC v1 `_ + is the newer one. +batch_size : int, default 1024 + Number of rows the ORC writer writes at a time. +stripe_size : int, default 64 * 1024 * 1024 + Size of each ORC stripe in bytes. +compression : string, default 'uncompressed' + The compression codec. + Valid values: {'UNCOMPRESSED', 'SNAPPY', 'ZLIB', 'LZ4', 'ZSTD'} + Note that LZ0 is currently not supported. +compression_block_size : int, default 64 * 1024 + Size of each compression block in bytes. +compression_strategy : string, default 'speed' + The compression strategy i.e. speed vs size reduction. + Valid values: {'SPEED', 'COMPRESSION'} +row_index_stride : int, default 10000 + The row index stride i.e. the number of rows per + an entry in the row index. +padding_tolerance : double, default 0.0 + The padding tolerance. +dictionary_key_size_threshold : double, default 0.0 + The dictionary key size threshold. 0 to disable dictionary encoding. + 1 to always enable dictionary encoding. +bloom_filter_columns : None, set-like or list-like, default None + Columns that use the bloom filter. +bloom_filter_fpp : double, default 0.05 + Upper limit of the false-positive rate of the bloom filter. +""" + + +class ORCWriter: + __doc__ = """ +Writer interface for a single ORC file + +Parameters +---------- +where : str or pyarrow.io.NativeFile + Writable target. For passing Python file objects or byte buffers, + see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream + or pyarrow.io.FixedSizeBufferWriter. +{} +""".format(_orc_writer_args_docs) + + is_open = False + + def __init__(self, where, *, + file_version='0.12', + batch_size=1024, + stripe_size=64 * 1024 * 1024, + compression='uncompressed', + compression_block_size=65536, + compression_strategy='speed', + row_index_stride=10000, + padding_tolerance=0.0, + dictionary_key_size_threshold=0.0, + bloom_filter_columns=None, + bloom_filter_fpp=0.05, + ): + self.writer = _orc.ORCWriter() + self.writer.open( + where, + file_version=file_version, + batch_size=batch_size, + stripe_size=stripe_size, + compression=compression, + compression_block_size=compression_block_size, + compression_strategy=compression_strategy, + row_index_stride=row_index_stride, + padding_tolerance=padding_tolerance, + dictionary_key_size_threshold=dictionary_key_size_threshold, + bloom_filter_columns=bloom_filter_columns, + bloom_filter_fpp=bloom_filter_fpp + ) + self.is_open = True + + def __del__(self): + self.close() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.close() + + def write(self, table): + """ + Write the table into an ORC file. The schema of the table must + be equal to the schema used when opening the ORC file. + + Parameters + ---------- + table : pyarrow.Table + The table to be written into the ORC file + """ + assert self.is_open + self.writer.write(table) + + def close(self): + """ + Close the ORC file + """ + if self.is_open: + self.writer.close() + self.is_open = False + + +def read_table(source, columns=None, filesystem=None): + filesystem, path = _resolve_filesystem_and_path(source, filesystem) + if filesystem is not None: + source = filesystem.open_input_file(path) + + if columns is not None and len(columns) == 0: + result = ORCFile(source).read().select(columns) + else: + result = ORCFile(source).read(columns=columns) + + return result + + +read_table.__doc__ = """ +Read a Table from an ORC file. + +Parameters +---------- +source : str, pyarrow.NativeFile, or file-like object + If a string passed, can be a single file name. For file-like objects, + only read a single file. Use pyarrow.BufferReader to read a file + contained in a bytes or buffer-like object. +columns : list + If not None, only these columns will be read from the file. A column + name may be a prefix of a nested field, e.g. 'a' will select 'a.b', + 'a.c', and 'a.d.e'. Output always follows the ordering of the file and + not the `columns` list. If empty, no columns will be read. Note + that the table will still have the correct num_rows set despite having + no columns. +filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. +""" + + +def write_table(table, where, *, + file_version='0.12', + batch_size=1024, + stripe_size=64 * 1024 * 1024, + compression='uncompressed', + compression_block_size=65536, + compression_strategy='speed', + row_index_stride=10000, + padding_tolerance=0.0, + dictionary_key_size_threshold=0.0, + bloom_filter_columns=None, + bloom_filter_fpp=0.05): + if isinstance(where, Table): + warnings.warn( + "The order of the arguments has changed. Pass as " + "'write_table(table, where)' instead. The old order will raise " + "an error in the future.", FutureWarning, stacklevel=2 + ) + table, where = where, table + with ORCWriter( + where, + file_version=file_version, + batch_size=batch_size, + stripe_size=stripe_size, + compression=compression, + compression_block_size=compression_block_size, + compression_strategy=compression_strategy, + row_index_stride=row_index_stride, + padding_tolerance=padding_tolerance, + dictionary_key_size_threshold=dictionary_key_size_threshold, + bloom_filter_columns=bloom_filter_columns, + bloom_filter_fpp=bloom_filter_fpp + ) as writer: + writer.write(table) + + +write_table.__doc__ = """ +Write a table into an ORC file. + +Parameters +---------- +table : pyarrow.lib.Table + The table to be written into the ORC file +where : str or pyarrow.io.NativeFile + Writable target. For passing Python file objects or byte buffers, + see pyarrow.io.PythonFileInterface, pyarrow.io.BufferOutputStream + or pyarrow.io.FixedSizeBufferWriter. +{} +""".format(_orc_writer_args_docs) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/pandas-shim.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/pandas-shim.pxi new file mode 100644 index 0000000000000000000000000000000000000000..0409e133ada5d7346660115ff846818da17de5b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/pandas-shim.pxi @@ -0,0 +1,256 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# pandas lazy-loading API shim that reduces API call and import overhead + +import warnings +from threading import Lock + + +cdef class _PandasAPIShim(object): + """ + Lazy pandas importer that isolates usages of pandas APIs and avoids + importing pandas until it's actually needed + """ + cdef: + bint _tried_importing_pandas + bint _have_pandas + + cdef readonly: + object _loose_version, _version + object _pd, _types_api, _compat_module + object _data_frame, _index, _series, _categorical_type + object _datetimetz_type, _extension_array, _extension_dtype + object _array_like_types, _is_extension_array_dtype, _lock + bint has_sparse + bint _pd024 + bint _is_v1, _is_ge_v21 + + def __init__(self): + self._lock = Lock() + self._tried_importing_pandas = False + self._have_pandas = 0 + + cdef _import_pandas(self, bint raise_): + try: + import pandas as pd + import pyarrow.pandas_compat as pdcompat + except ImportError: + self._have_pandas = False + if raise_: + raise + else: + return + + from pyarrow.vendored.version import Version + + self._pd = pd + self._version = pd.__version__ + self._loose_version = Version(pd.__version__) + self._is_v1 = False + + if self._loose_version < Version('1.0.0'): + self._have_pandas = False + if raise_: + raise ImportError( + "pyarrow requires pandas 1.0.0 or above, pandas {} is " + "installed".format(self._version) + ) + else: + warnings.warn( + "pyarrow requires pandas 1.0.0 or above, pandas {} is " + "installed. Therefore, pandas-specific integration is not " + "used.".format(self._version), stacklevel=2) + return + + self._is_v1 = self._loose_version < Version('2.0.0') + self._is_ge_v21 = self._loose_version >= Version('2.1.0') + + self._compat_module = pdcompat + self._data_frame = pd.DataFrame + self._index = pd.Index + self._categorical_type = pd.Categorical + self._series = pd.Series + self._extension_array = pd.api.extensions.ExtensionArray + self._array_like_types = ( + self._series, self._index, self._categorical_type, + self._extension_array) + self._extension_dtype = pd.api.extensions.ExtensionDtype + self._is_extension_array_dtype = ( + pd.api.types.is_extension_array_dtype) + self._types_api = pd.api.types + self._datetimetz_type = pd.api.types.DatetimeTZDtype + self._have_pandas = True + self.has_sparse = False + + cdef inline _check_import(self, bint raise_=True): + if not self._tried_importing_pandas: + with self._lock: + if not self._tried_importing_pandas: + try: + self._import_pandas(raise_) + finally: + self._tried_importing_pandas = True + return + + if not self._have_pandas and raise_: + self._import_pandas(raise_) + + def series(self, *args, **kwargs): + self._check_import() + return self._series(*args, **kwargs) + + def data_frame(self, *args, **kwargs): + self._check_import() + return self._data_frame(*args, **kwargs) + + cdef inline bint _have_pandas_internal(self): + if not self._tried_importing_pandas: + self._check_import(raise_=False) + return self._have_pandas + + @property + def have_pandas(self): + return self._have_pandas_internal() + + @property + def compat(self): + self._check_import() + return self._compat_module + + @property + def pd(self): + self._check_import() + return self._pd + + cpdef infer_dtype(self, obj): + self._check_import() + try: + return self._types_api.infer_dtype(obj, skipna=False) + except AttributeError: + return self._pd.lib.infer_dtype(obj) + + cpdef pandas_dtype(self, dtype): + self._check_import() + try: + return self._types_api.pandas_dtype(dtype) + except AttributeError: + return None + + @property + def loose_version(self): + self._check_import() + return self._loose_version + + @property + def version(self): + self._check_import() + return self._version + + def is_v1(self): + self._check_import() + return self._is_v1 + + def is_ge_v21(self): + self._check_import() + return self._is_ge_v21 + + @property + def categorical_type(self): + self._check_import() + return self._categorical_type + + @property + def datetimetz_type(self): + self._check_import() + return self._datetimetz_type + + @property + def extension_dtype(self): + self._check_import() + return self._extension_dtype + + cpdef is_array_like(self, obj): + self._check_import() + return isinstance(obj, self._array_like_types) + + cpdef is_categorical(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._categorical_type) + else: + return False + + cpdef is_datetimetz(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._datetimetz_type) + else: + return False + + cpdef is_extension_array_dtype(self, obj): + self._check_import() + if self._is_extension_array_dtype: + return self._is_extension_array_dtype(obj) + else: + return False + + cpdef is_sparse(self, obj): + if self._have_pandas_internal(): + return isinstance(obj.dtype, self.pd.SparseDtype) + else: + return False + + cpdef is_data_frame(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._data_frame) + else: + return False + + cpdef is_series(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._series) + else: + return False + + cpdef is_index(self, obj): + if self._have_pandas_internal(): + return isinstance(obj, self._index) + else: + return False + + cpdef get_values(self, obj): + """ + Get the underlying array values of a pandas Series or Index in the + format (np.ndarray or pandas ExtensionArray) as we need them. + + Assumes obj is a pandas Series or Index. + """ + self._check_import() + if isinstance(obj.dtype, (self.pd.api.types.IntervalDtype, + self.pd.api.types.PeriodDtype)): + return obj.array + return obj.values + + def get_rangeindex_attribute(self, level, name): + # public start/stop/step attributes added in pandas 0.25.0 + self._check_import() + if hasattr(level, name): + return getattr(level, name) + return getattr(level, '_' + name) + + +cdef _PandasAPIShim pandas_api = _PandasAPIShim() +_pandas_api = pandas_api diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/public-api.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/public-api.pxi new file mode 100644 index 0000000000000000000000000000000000000000..72e16f2cec3872d31242dcce9ee589cc2aec1bee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/public-api.pxi @@ -0,0 +1,426 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from libcpp.memory cimport shared_ptr +from pyarrow.includes.libarrow cimport (CArray, CDataType, CField, + CRecordBatch, CSchema, + CTable, CTensor, + CSparseCOOTensor, CSparseCSRMatrix, + CSparseCSCMatrix, CSparseCSFTensor) + +# You cannot assign something to a dereferenced pointer in Cython thus these +# methods don't use Status to indicate a successful operation. + + +cdef api bint pyarrow_is_buffer(object buffer): + return isinstance(buffer, Buffer) + + +cdef api shared_ptr[CBuffer] pyarrow_unwrap_buffer(object buffer): + cdef Buffer buf + if pyarrow_is_buffer(buffer): + buf = (buffer) + return buf.buffer + + return shared_ptr[CBuffer]() + + +cdef api object pyarrow_wrap_buffer(const shared_ptr[CBuffer]& buf): + cdef Buffer result = Buffer.__new__(Buffer) + result.init(buf) + return result + + +cdef api object pyarrow_wrap_resizable_buffer( + const shared_ptr[CResizableBuffer]& buf): + cdef ResizableBuffer result = ResizableBuffer.__new__(ResizableBuffer) + result.init_rz(buf) + return result + + +cdef api bint pyarrow_is_data_type(object type_): + return isinstance(type_, DataType) + + +cdef api shared_ptr[CDataType] pyarrow_unwrap_data_type( + object data_type): + cdef DataType type_ + if pyarrow_is_data_type(data_type): + type_ = (data_type) + return type_.sp_type + + return shared_ptr[CDataType]() + + +# Workaround for Cython parsing bug +# https://github.com/cython/cython/issues/2143 +ctypedef const CPyExtensionType* _CPyExtensionTypePtr + + +cdef api object pyarrow_wrap_data_type( + const shared_ptr[CDataType]& type): + cdef: + const CExtensionType* ext_type + const CPyExtensionType* cpy_ext_type + DataType out + + if type.get() == NULL: + return None + + if type.get().id() == _Type_DICTIONARY: + out = DictionaryType.__new__(DictionaryType) + elif type.get().id() == _Type_LIST: + out = ListType.__new__(ListType) + elif type.get().id() == _Type_LARGE_LIST: + out = LargeListType.__new__(LargeListType) + elif type.get().id() == _Type_MAP: + out = MapType.__new__(MapType) + elif type.get().id() == _Type_FIXED_SIZE_LIST: + out = FixedSizeListType.__new__(FixedSizeListType) + elif type.get().id() == _Type_STRUCT: + out = StructType.__new__(StructType) + elif type.get().id() == _Type_SPARSE_UNION: + out = SparseUnionType.__new__(SparseUnionType) + elif type.get().id() == _Type_DENSE_UNION: + out = DenseUnionType.__new__(DenseUnionType) + elif type.get().id() == _Type_TIME32: + out = Time32Type.__new__(Time32Type) + elif type.get().id() == _Type_TIME64: + out = Time64Type.__new__(Time64Type) + elif type.get().id() == _Type_TIMESTAMP: + out = TimestampType.__new__(TimestampType) + elif type.get().id() == _Type_DURATION: + out = DurationType.__new__(DurationType) + elif type.get().id() == _Type_FIXED_SIZE_BINARY: + out = FixedSizeBinaryType.__new__(FixedSizeBinaryType) + elif type.get().id() == _Type_DECIMAL128: + out = Decimal128Type.__new__(Decimal128Type) + elif type.get().id() == _Type_DECIMAL256: + out = Decimal256Type.__new__(Decimal256Type) + elif type.get().id() == _Type_RUN_END_ENCODED: + out = RunEndEncodedType.__new__(RunEndEncodedType) + elif type.get().id() == _Type_EXTENSION: + ext_type = type.get() + cpy_ext_type = dynamic_cast[_CPyExtensionTypePtr](ext_type) + if cpy_ext_type != nullptr: + return cpy_ext_type.GetInstance() + elif ext_type.extension_name() == b"arrow.fixed_shape_tensor": + out = FixedShapeTensorType.__new__(FixedShapeTensorType) + else: + out = BaseExtensionType.__new__(BaseExtensionType) + else: + out = DataType.__new__(DataType) + + out.init(type) + return out + + +cdef object pyarrow_wrap_metadata( + const shared_ptr[const CKeyValueMetadata]& meta): + if meta.get() == nullptr: + return None + else: + return KeyValueMetadata.wrap(meta) + + +cdef api bint pyarrow_is_metadata(object metadata): + return isinstance(metadata, KeyValueMetadata) + + +cdef shared_ptr[const CKeyValueMetadata] pyarrow_unwrap_metadata(object meta): + cdef shared_ptr[const CKeyValueMetadata] c_meta + if pyarrow_is_metadata(meta): + c_meta = (meta).unwrap() + return c_meta + + +cdef api bint pyarrow_is_field(object field): + return isinstance(field, Field) + + +cdef api shared_ptr[CField] pyarrow_unwrap_field(object field): + cdef Field field_ + if pyarrow_is_field(field): + field_ = (field) + return field_.sp_field + + return shared_ptr[CField]() + + +cdef api object pyarrow_wrap_field(const shared_ptr[CField]& field): + if field.get() == NULL: + return None + cdef Field out = Field.__new__(Field) + out.init(field) + return out + + +cdef api bint pyarrow_is_schema(object schema): + return isinstance(schema, Schema) + + +cdef api shared_ptr[CSchema] pyarrow_unwrap_schema(object schema): + cdef Schema sch + if pyarrow_is_schema(schema): + sch = (schema) + return sch.sp_schema + + return shared_ptr[CSchema]() + + +cdef api object pyarrow_wrap_schema(const shared_ptr[CSchema]& schema): + cdef Schema out = Schema.__new__(Schema) + out.init_schema(schema) + return out + + +cdef api bint pyarrow_is_array(object array): + return isinstance(array, Array) + + +cdef api shared_ptr[CArray] pyarrow_unwrap_array(object array): + cdef Array arr + if pyarrow_is_array(array): + arr = (array) + return arr.sp_array + + return shared_ptr[CArray]() + + +cdef api object pyarrow_wrap_array(const shared_ptr[CArray]& sp_array): + if sp_array.get() == NULL: + raise ValueError('Array was NULL') + + klass = get_array_class_from_type(sp_array.get().type()) + + cdef Array arr = klass.__new__(klass) + arr.init(sp_array) + return arr + + +cdef api bint pyarrow_is_chunked_array(object array): + return isinstance(array, ChunkedArray) + + +cdef api shared_ptr[CChunkedArray] pyarrow_unwrap_chunked_array(object array): + cdef ChunkedArray arr + if pyarrow_is_chunked_array(array): + arr = (array) + return arr.sp_chunked_array + + return shared_ptr[CChunkedArray]() + + +cdef api object pyarrow_wrap_chunked_array( + const shared_ptr[CChunkedArray]& sp_array): + if sp_array.get() == NULL: + raise ValueError('ChunkedArray was NULL') + + cdef CDataType* data_type = sp_array.get().type().get() + + if data_type == NULL: + raise ValueError('ChunkedArray data type was NULL') + + cdef ChunkedArray arr = ChunkedArray.__new__(ChunkedArray) + arr.init(sp_array) + return arr + + +cdef api bint pyarrow_is_scalar(object value): + return isinstance(value, Scalar) + + +cdef api shared_ptr[CScalar] pyarrow_unwrap_scalar(object scalar): + if pyarrow_is_scalar(scalar): + return ( scalar).unwrap() + return shared_ptr[CScalar]() + + +cdef api object pyarrow_wrap_scalar(const shared_ptr[CScalar]& sp_scalar): + if sp_scalar.get() == NULL: + raise ValueError('Scalar was NULL') + + cdef CDataType* data_type = sp_scalar.get().type.get() + + if data_type == NULL: + raise ValueError('Scalar data type was NULL') + + if data_type.id() == _Type_NA: + return _NULL + + if data_type.id() not in _scalar_classes: + raise ValueError('Scalar type not supported') + + klass = get_scalar_class_from_type(sp_scalar.get().type) + + cdef Scalar scalar = klass.__new__(klass) + scalar.init(sp_scalar) + return scalar + + +cdef api bint pyarrow_is_tensor(object tensor): + return isinstance(tensor, Tensor) + + +cdef api shared_ptr[CTensor] pyarrow_unwrap_tensor(object tensor): + cdef Tensor ten + if pyarrow_is_tensor(tensor): + ten = (tensor) + return ten.sp_tensor + + return shared_ptr[CTensor]() + + +cdef api object pyarrow_wrap_tensor( + const shared_ptr[CTensor]& sp_tensor): + if sp_tensor.get() == NULL: + raise ValueError('Tensor was NULL') + + cdef Tensor tensor = Tensor.__new__(Tensor) + tensor.init(sp_tensor) + return tensor + + +cdef api bint pyarrow_is_sparse_coo_tensor(object sparse_tensor): + return isinstance(sparse_tensor, SparseCOOTensor) + +cdef api shared_ptr[CSparseCOOTensor] pyarrow_unwrap_sparse_coo_tensor( + object sparse_tensor): + cdef SparseCOOTensor sten + if pyarrow_is_sparse_coo_tensor(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCOOTensor]() + +cdef api object pyarrow_wrap_sparse_coo_tensor( + const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCOOTensor was NULL') + + cdef SparseCOOTensor sparse_tensor = SparseCOOTensor.__new__( + SparseCOOTensor) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_sparse_csr_matrix(object sparse_tensor): + return isinstance(sparse_tensor, SparseCSRMatrix) + +cdef api shared_ptr[CSparseCSRMatrix] pyarrow_unwrap_sparse_csr_matrix( + object sparse_tensor): + cdef SparseCSRMatrix sten + if pyarrow_is_sparse_csr_matrix(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCSRMatrix]() + +cdef api object pyarrow_wrap_sparse_csr_matrix( + const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCSRMatrix was NULL') + + cdef SparseCSRMatrix sparse_tensor = SparseCSRMatrix.__new__( + SparseCSRMatrix) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_sparse_csc_matrix(object sparse_tensor): + return isinstance(sparse_tensor, SparseCSCMatrix) + +cdef api shared_ptr[CSparseCSCMatrix] pyarrow_unwrap_sparse_csc_matrix( + object sparse_tensor): + cdef SparseCSCMatrix sten + if pyarrow_is_sparse_csc_matrix(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCSCMatrix]() + +cdef api object pyarrow_wrap_sparse_csc_matrix( + const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCSCMatrix was NULL') + + cdef SparseCSCMatrix sparse_tensor = SparseCSCMatrix.__new__( + SparseCSCMatrix) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_sparse_csf_tensor(object sparse_tensor): + return isinstance(sparse_tensor, SparseCSFTensor) + +cdef api shared_ptr[CSparseCSFTensor] pyarrow_unwrap_sparse_csf_tensor( + object sparse_tensor): + cdef SparseCSFTensor sten + if pyarrow_is_sparse_csf_tensor(sparse_tensor): + sten = (sparse_tensor) + return sten.sp_sparse_tensor + + return shared_ptr[CSparseCSFTensor]() + +cdef api object pyarrow_wrap_sparse_csf_tensor( + const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor): + if sp_sparse_tensor.get() == NULL: + raise ValueError('SparseCSFTensor was NULL') + + cdef SparseCSFTensor sparse_tensor = SparseCSFTensor.__new__( + SparseCSFTensor) + sparse_tensor.init(sp_sparse_tensor) + return sparse_tensor + + +cdef api bint pyarrow_is_table(object table): + return isinstance(table, Table) + + +cdef api shared_ptr[CTable] pyarrow_unwrap_table(object table): + cdef Table tab + if pyarrow_is_table(table): + tab =
(table) + return tab.sp_table + + return shared_ptr[CTable]() + + +cdef api object pyarrow_wrap_table(const shared_ptr[CTable]& ctable): + cdef Table table = Table.__new__(Table) + table.init(ctable) + return table + + +cdef api bint pyarrow_is_batch(object batch): + return isinstance(batch, RecordBatch) + + +cdef api shared_ptr[CRecordBatch] pyarrow_unwrap_batch(object batch): + cdef RecordBatch bat + if pyarrow_is_batch(batch): + bat = (batch) + return bat.sp_batch + + return shared_ptr[CRecordBatch]() + + +cdef api object pyarrow_wrap_batch( + const shared_ptr[CRecordBatch]& cbatch): + cdef RecordBatch batch = RecordBatch.__new__(RecordBatch) + batch.init(cbatch) + return batch diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/scalar.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/scalar.pxi new file mode 100644 index 0000000000000000000000000000000000000000..9a66dc81226d474d4d1776c4a12dd4ee04b97b53 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/scalar.pxi @@ -0,0 +1,1158 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections +from cython cimport binding + + +cdef class Scalar(_Weakrefable): + """ + The base class for scalars. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "pa.scalar() instead.".format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CScalar]& wrapped): + self.wrapped = wrapped + + @staticmethod + cdef wrap(const shared_ptr[CScalar]& wrapped): + cdef: + Scalar self + Type type_id = wrapped.get().type.get().id() + shared_ptr[CDataType] sp_data_type = wrapped.get().type + + if type_id == _Type_NA: + return _NULL + + if type_id not in _scalar_classes: + raise NotImplementedError( + "Wrapping scalar of type " + frombytes(sp_data_type.get().ToString())) + + typ = get_scalar_class_from_type(sp_data_type) + self = typ.__new__(typ) + self.init(wrapped) + + return self + + cdef inline shared_ptr[CScalar] unwrap(self) nogil: + return self.wrapped + + @property + def type(self): + """ + Data type of the Scalar object. + """ + return pyarrow_wrap_data_type(self.wrapped.get().type) + + @property + def is_valid(self): + """ + Holds a valid (non-null) value. + """ + return self.wrapped.get().is_valid + + def cast(self, object target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast scalar value to another data type. + + See :func:`pyarrow.compute.cast` for usage. + + Parameters + ---------- + target_type : DataType, default None + Type to cast scalar to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Returns + ------- + scalar : A Scalar of the given target data type. + """ + return _pc().cast(self, target_type, safe=safe, + options=options, memory_pool=memory_pool) + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.wrapped.get().ValidateFull()) + else: + with nogil: + check_status(self.wrapped.get().Validate()) + + def __repr__(self): + return ''.format( + self.__class__.__name__, self.as_py() + ) + + def __str__(self): + return str(self.as_py()) + + def equals(self, Scalar other not None): + """ + Parameters + ---------- + other : pyarrow.Scalar + + Returns + ------- + bool + """ + return self.wrapped.get().Equals(other.unwrap().get()[0]) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __hash__(self): + cdef CScalarHash hasher + return hasher(self.wrapped) + + def __reduce__(self): + return scalar, (self.as_py(), self.type) + + def as_py(self): + raise NotImplementedError() + + +_NULL = NA = None + + +cdef class NullScalar(Scalar): + """ + Concrete class for null scalars. + """ + + def __cinit__(self): + global NA + if NA is not None: + raise RuntimeError('Cannot create multiple NullScalar instances') + self.init(shared_ptr[CScalar](new CNullScalar())) + + def __init__(self): + pass + + def as_py(self): + """ + Return this value as a Python None. + """ + return None + + +_NULL = NA = NullScalar() + + +cdef class BooleanScalar(Scalar): + """ + Concrete class for boolean scalars. + """ + + def as_py(self): + """ + Return this value as a Python bool. + """ + cdef CBooleanScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt8Scalar(Scalar): + """ + Concrete class for uint8 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt8Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int8Scalar(Scalar): + """ + Concrete class for int8 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt8Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt16Scalar(Scalar): + """ + Concrete class for uint16 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt16Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int16Scalar(Scalar): + """ + Concrete class for int16 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt16Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt32Scalar(Scalar): + """ + Concrete class for uint32 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int32Scalar(Scalar): + """ + Concrete class for int32 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt64Scalar(Scalar): + """ + Concrete class for uint64 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int64Scalar(Scalar): + """ + Concrete class for int64 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class HalfFloatScalar(Scalar): + """ + Concrete class for float scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CHalfFloatScalar* sp = self.wrapped.get() + return PyHalf_FromHalf(sp.value) if sp.is_valid else None + + +cdef class FloatScalar(Scalar): + """ + Concrete class for float scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CFloatScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class DoubleScalar(Scalar): + """ + Concrete class for double scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CDoubleScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Decimal128Scalar(Scalar): + """ + Concrete class for decimal128 scalars. + """ + + def as_py(self): + """ + Return this value as a Python Decimal. + """ + cdef: + CDecimal128Scalar* sp = self.wrapped.get() + CDecimal128Type* dtype = sp.type.get() + if sp.is_valid: + return _pydecimal.Decimal( + frombytes(sp.value.ToString(dtype.scale())) + ) + else: + return None + + +cdef class Decimal256Scalar(Scalar): + """ + Concrete class for decimal256 scalars. + """ + + def as_py(self): + """ + Return this value as a Python Decimal. + """ + cdef: + CDecimal256Scalar* sp = self.wrapped.get() + CDecimal256Type* dtype = sp.type.get() + if sp.is_valid: + return _pydecimal.Decimal( + frombytes(sp.value.ToString(dtype.scale())) + ) + else: + return None + + +cdef class Date32Scalar(Scalar): + """ + Concrete class for date32 scalars. + """ + + @property + def value(self): + cdef CDate32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.datetime instance. + """ + cdef CDate32Scalar* sp = self.wrapped.get() + + if sp.is_valid: + # shift to seconds since epoch + return ( + datetime.date(1970, 1, 1) + datetime.timedelta(days=sp.value) + ) + else: + return None + + +cdef class Date64Scalar(Scalar): + """ + Concrete class for date64 scalars. + """ + + @property + def value(self): + cdef CDate64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.datetime instance. + """ + cdef CDate64Scalar* sp = self.wrapped.get() + + if sp.is_valid: + return ( + datetime.date(1970, 1, 1) + + datetime.timedelta(days=sp.value / 86400000) + ) + else: + return None + + +def _datetime_from_int(int64_t value, TimeUnit unit, tzinfo=None): + if unit == TimeUnit_SECOND: + delta = datetime.timedelta(seconds=value) + elif unit == TimeUnit_MILLI: + delta = datetime.timedelta(milliseconds=value) + elif unit == TimeUnit_MICRO: + delta = datetime.timedelta(microseconds=value) + else: + # TimeUnit_NANO: prefer pandas timestamps if available + if _pandas_api.have_pandas: + return _pandas_api.pd.Timestamp(value, tz=tzinfo, unit='ns') + # otherwise safely truncate to microsecond resolution datetime + if value % 1000 != 0: + raise ValueError( + "Nanosecond resolution temporal type {} is not safely " + "convertible to microseconds to convert to datetime.datetime. " + "Install pandas to return as Timestamp with nanosecond " + "support or access the .value attribute.".format(value) + ) + delta = datetime.timedelta(microseconds=value // 1000) + + dt = datetime.datetime(1970, 1, 1) + delta + # adjust timezone if set to the datatype + if tzinfo is not None: + dt = dt.replace(tzinfo=datetime.timezone.utc).astimezone(tzinfo) + + return dt + + +cdef class Time32Scalar(Scalar): + """ + Concrete class for time32 scalars. + """ + + @property + def value(self): + cdef CTime32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.timedelta instance. + """ + cdef: + CTime32Scalar* sp = self.wrapped.get() + CTime32Type* dtype = sp.type.get() + + if sp.is_valid: + return _datetime_from_int(sp.value, unit=dtype.unit()).time() + else: + return None + + +cdef class Time64Scalar(Scalar): + """ + Concrete class for time64 scalars. + """ + + @property + def value(self): + cdef CTime64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.timedelta instance. + """ + cdef: + CTime64Scalar* sp = self.wrapped.get() + CTime64Type* dtype = sp.type.get() + + if sp.is_valid: + return _datetime_from_int(sp.value, unit=dtype.unit()).time() + else: + return None + + +cdef class TimestampScalar(Scalar): + """ + Concrete class for timestamp scalars. + """ + + @property + def value(self): + cdef CTimestampScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Pandas Timestamp instance (if units are + nanoseconds and pandas is available), otherwise as a Python + datetime.datetime instance. + """ + cdef: + CTimestampScalar* sp = self.wrapped.get() + CTimestampType* dtype = sp.type.get() + + if not sp.is_valid: + return None + + if not dtype.timezone().empty(): + tzinfo = string_to_tzinfo(frombytes(dtype.timezone())) + else: + tzinfo = None + + return _datetime_from_int(sp.value, unit=dtype.unit(), tzinfo=tzinfo) + + def __repr__(self): + """ + Return the representation of TimestampScalar using `strftime` to avoid + original repr datetime values being out of range. + """ + cdef: + CTimestampScalar* sp = self.wrapped.get() + CTimestampType* dtype = sp.type.get() + + if not dtype.timezone().empty(): + type_format = str(_pc().strftime(self, format="%Y-%m-%dT%H:%M:%S%z")) + else: + type_format = str(_pc().strftime(self)) + return ''.format( + self.__class__.__name__, type_format + ) + + +cdef class DurationScalar(Scalar): + """ + Concrete class for duration scalars. + """ + + @property + def value(self): + cdef CDurationScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Pandas Timedelta instance (if units are + nanoseconds and pandas is available), otherwise as a Python + datetime.timedelta instance. + """ + cdef: + CDurationScalar* sp = self.wrapped.get() + CDurationType* dtype = sp.type.get() + TimeUnit unit = dtype.unit() + + if not sp.is_valid: + return None + + if unit == TimeUnit_SECOND: + return datetime.timedelta(seconds=sp.value) + elif unit == TimeUnit_MILLI: + return datetime.timedelta(milliseconds=sp.value) + elif unit == TimeUnit_MICRO: + return datetime.timedelta(microseconds=sp.value) + else: + # TimeUnit_NANO: prefer pandas timestamps if available + if _pandas_api.have_pandas: + return _pandas_api.pd.Timedelta(sp.value, unit='ns') + # otherwise safely truncate to microsecond resolution timedelta + if sp.value % 1000 != 0: + raise ValueError( + "Nanosecond duration {} is not safely convertible to " + "microseconds to convert to datetime.timedelta. Install " + "pandas to return as Timedelta with nanosecond support or " + "access the .value attribute.".format(sp.value) + ) + return datetime.timedelta(microseconds=sp.value // 1000) + + +cdef class MonthDayNanoIntervalScalar(Scalar): + """ + Concrete class for month, day, nanosecond interval scalars. + """ + + @property + def value(self): + """ + Same as self.as_py() + """ + return self.as_py() + + def as_py(self): + """ + Return this value as a pyarrow.MonthDayNano. + """ + cdef: + PyObject* val + CMonthDayNanoIntervalScalar* scalar + scalar = self.wrapped.get() + val = GetResultValue(MonthDayNanoIntervalScalarToPyObject( + deref(scalar))) + return PyObject_to_object(val) + + +cdef class BinaryScalar(Scalar): + """ + Concrete class for binary-like scalars. + """ + + def as_buffer(self): + """ + Return a view over this value as a Buffer object. + """ + cdef CBaseBinaryScalar* sp = self.wrapped.get() + return pyarrow_wrap_buffer(sp.value) if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python bytes. + """ + buffer = self.as_buffer() + return None if buffer is None else buffer.to_pybytes() + + +cdef class LargeBinaryScalar(BinaryScalar): + pass + + +cdef class FixedSizeBinaryScalar(BinaryScalar): + pass + + +cdef class StringScalar(BinaryScalar): + """ + Concrete class for string-like (utf8) scalars. + """ + + def as_py(self): + """ + Return this value as a Python string. + """ + buffer = self.as_buffer() + return None if buffer is None else str(buffer, 'utf8') + + +cdef class LargeStringScalar(StringScalar): + pass + + +cdef class ListScalar(Scalar): + """ + Concrete class for list-like scalars. + """ + + @property + def values(self): + cdef CBaseListScalar* sp = self.wrapped.get() + if sp.is_valid: + return pyarrow_wrap_array(sp.value) + else: + return None + + def __len__(self): + """ + Return the number of values. + """ + return len(self.values) + + def __getitem__(self, i): + """ + Return the value at the given index. + """ + return self.values[_normalize_index(i, len(self))] + + def __iter__(self): + """ + Iterate over this element's values. + """ + return iter(self.values) + + def as_py(self): + """ + Return this value as a Python list. + """ + arr = self.values + return None if arr is None else arr.to_pylist() + + +cdef class FixedSizeListScalar(ListScalar): + pass + + +cdef class LargeListScalar(ListScalar): + pass + + +cdef class StructScalar(Scalar, collections.abc.Mapping): + """ + Concrete class for struct scalars. + """ + + def __len__(self): + cdef CStructScalar* sp = self.wrapped.get() + return sp.value.size() + + def __iter__(self): + cdef: + CStructScalar* sp = self.wrapped.get() + CStructType* dtype = sp.type.get() + vector[shared_ptr[CField]] fields = dtype.fields() + + for i in range(dtype.num_fields()): + yield frombytes(fields[i].get().name()) + + def items(self): + return ((key, self[i]) for i, key in enumerate(self)) + + def __contains__(self, key): + return key in list(self) + + def __getitem__(self, key): + """ + Return the child value for the given field. + + Parameters + ---------- + index : Union[int, str] + Index / position or name of the field. + + Returns + ------- + result : Scalar + """ + cdef: + CFieldRef ref + CStructScalar* sp = self.wrapped.get() + + if isinstance(key, (bytes, str)): + ref = CFieldRef( tobytes(key)) + elif isinstance(key, int): + ref = CFieldRef( key) + else: + raise TypeError('Expected integer or string index') + + try: + return Scalar.wrap(GetResultValue(sp.field(ref))) + except ArrowInvalid as exc: + if isinstance(key, int): + raise IndexError(key) from exc + else: + raise KeyError(key) from exc + + def as_py(self): + """ + Return this value as a Python dict. + """ + if self.is_valid: + try: + return {k: self[k].as_py() for k in self.keys()} + except KeyError: + raise ValueError( + "Converting to Python dictionary is not supported when " + "duplicate field names are present") + else: + return None + + def _as_py_tuple(self): + # a version that returns a tuple instead of dict to support repr/str + # with the presence of duplicate field names + if self.is_valid: + return [(key, self[i].as_py()) for i, key in enumerate(self)] + else: + return None + + def __repr__(self): + return ''.format( + self.__class__.__name__, self._as_py_tuple() + ) + + def __str__(self): + return str(self._as_py_tuple()) + + +cdef class MapScalar(ListScalar): + """ + Concrete class for map scalars. + """ + + def __getitem__(self, i): + """ + Return the value at the given index. + """ + arr = self.values + if arr is None: + raise IndexError(i) + dct = arr[_normalize_index(i, len(arr))] + return (dct[self.type.key_field.name], dct[self.type.item_field.name]) + + def __iter__(self): + """ + Iterate over this element's values. + """ + arr = self.values + if arr is None: + return + for k, v in zip(arr.field(self.type.key_field.name), arr.field(self.type.item_field.name)): + yield (k.as_py(), v.as_py()) + + def as_py(self): + """ + Return this value as a Python list. + """ + cdef CStructScalar* sp = self.wrapped.get() + return list(self) if sp.is_valid else None + + +cdef class DictionaryScalar(Scalar): + """ + Concrete class for dictionary-encoded scalars. + """ + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(type, is_valid, index, dictionary): + cdef: + CDictionaryScalarIndexAndDictionary value + shared_ptr[CDictionaryScalar] wrapped + DataType type_ + Scalar index_ + Array dictionary_ + + type_ = ensure_type(type, allow_none=False) + if not isinstance(type_, DictionaryType): + raise TypeError('Must pass a DictionaryType instance') + + if isinstance(index, Scalar): + if not index.type.equals(type.index_type): + raise TypeError("The Scalar value passed as index must have " + "identical type to the dictionary type's " + "index_type") + index_ = index + else: + index_ = scalar(index, type=type_.index_type) + + if isinstance(dictionary, Array): + if not dictionary.type.equals(type.value_type): + raise TypeError("The Array passed as dictionary must have " + "identical type to the dictionary type's " + "value_type") + dictionary_ = dictionary + else: + dictionary_ = array(dictionary, type=type_.value_type) + + value.index = pyarrow_unwrap_scalar(index_) + value.dictionary = pyarrow_unwrap_array(dictionary_) + + wrapped = make_shared[CDictionaryScalar]( + value, pyarrow_unwrap_data_type(type_), (is_valid) + ) + return Scalar.wrap( wrapped) + + def __reduce__(self): + return DictionaryScalar._reconstruct, ( + self.type, self.is_valid, self.index, self.dictionary + ) + + @property + def index(self): + """ + Return this value's underlying index as a scalar. + """ + cdef CDictionaryScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value.index) + + @property + def value(self): + """ + Return the encoded value as a scalar. + """ + cdef CDictionaryScalar* sp = self.wrapped.get() + return Scalar.wrap(GetResultValue(sp.GetEncodedValue())) + + @property + def dictionary(self): + cdef CDictionaryScalar* sp = self.wrapped.get() + return pyarrow_wrap_array(sp.value.dictionary) + + def as_py(self): + """ + Return this encoded value as a Python object. + """ + return self.value.as_py() if self.is_valid else None + + +cdef class RunEndEncodedScalar(Scalar): + """ + Concrete class for RunEndEncoded scalars. + """ + @property + def value(self): + """ + Return underlying value as a scalar. + """ + cdef CRunEndEncodedScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value) + + def as_py(self): + """ + Return underlying value as a Python object. + """ + return self.value.as_py() + + +cdef class UnionScalar(Scalar): + """ + Concrete class for Union scalars. + """ + + @property + def value(self): + """ + Return underlying value as a scalar. + """ + cdef CSparseUnionScalar* sp + cdef CDenseUnionScalar* dp + if self.type.id == _Type_SPARSE_UNION: + sp = self.wrapped.get() + return Scalar.wrap(sp.value[sp.child_id]) if sp.is_valid else None + else: + dp = self.wrapped.get() + return Scalar.wrap(dp.value) if dp.is_valid else None + + def as_py(self): + """ + Return underlying value as a Python object. + """ + value = self.value + return None if value is None else value.as_py() + + @property + def type_code(self): + """ + Return the union type code for this scalar. + """ + cdef CUnionScalar* sp = self.wrapped.get() + return sp.type_code + + +cdef class ExtensionScalar(Scalar): + """ + Concrete class for Extension scalars. + """ + + @property + def value(self): + """ + Return storage value as a scalar. + """ + cdef CExtensionScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value) if sp.is_valid else None + + def as_py(self): + """ + Return this scalar as a Python object. + """ + return None if self.value is None else self.value.as_py() + + @staticmethod + def from_storage(BaseExtensionType typ, value): + """ + Construct ExtensionScalar from type and storage value. + + Parameters + ---------- + typ : DataType + The extension type for the result scalar. + value : object + The storage value for the result scalar. + + Returns + ------- + ext_scalar : ExtensionScalar + """ + cdef: + shared_ptr[CExtensionScalar] sp_scalar + shared_ptr[CScalar] sp_storage + CExtensionScalar* ext_scalar + + if value is None: + storage = None + elif isinstance(value, Scalar): + if value.type != typ.storage_type: + raise TypeError("Incompatible storage type {0} " + "for extension type {1}" + .format(value.type, typ)) + storage = value + else: + storage = scalar(value, typ.storage_type) + + cdef c_bool is_valid = storage is not None and storage.is_valid + if is_valid: + sp_storage = pyarrow_unwrap_scalar(storage) + else: + sp_storage = MakeNullScalar(( typ.storage_type).sp_type) + sp_scalar = make_shared[CExtensionScalar](sp_storage, typ.sp_type, + is_valid) + with nogil: + check_status(sp_scalar.get().Validate()) + return pyarrow_wrap_scalar( sp_scalar) + + +cdef dict _scalar_classes = { + _Type_BOOL: BooleanScalar, + _Type_UINT8: UInt8Scalar, + _Type_UINT16: UInt16Scalar, + _Type_UINT32: UInt32Scalar, + _Type_UINT64: UInt64Scalar, + _Type_INT8: Int8Scalar, + _Type_INT16: Int16Scalar, + _Type_INT32: Int32Scalar, + _Type_INT64: Int64Scalar, + _Type_HALF_FLOAT: HalfFloatScalar, + _Type_FLOAT: FloatScalar, + _Type_DOUBLE: DoubleScalar, + _Type_DECIMAL128: Decimal128Scalar, + _Type_DECIMAL256: Decimal256Scalar, + _Type_DATE32: Date32Scalar, + _Type_DATE64: Date64Scalar, + _Type_TIME32: Time32Scalar, + _Type_TIME64: Time64Scalar, + _Type_TIMESTAMP: TimestampScalar, + _Type_DURATION: DurationScalar, + _Type_BINARY: BinaryScalar, + _Type_LARGE_BINARY: LargeBinaryScalar, + _Type_FIXED_SIZE_BINARY: FixedSizeBinaryScalar, + _Type_STRING: StringScalar, + _Type_LARGE_STRING: LargeStringScalar, + _Type_LIST: ListScalar, + _Type_LARGE_LIST: LargeListScalar, + _Type_FIXED_SIZE_LIST: FixedSizeListScalar, + _Type_STRUCT: StructScalar, + _Type_MAP: MapScalar, + _Type_DICTIONARY: DictionaryScalar, + _Type_RUN_END_ENCODED: RunEndEncodedScalar, + _Type_SPARSE_UNION: UnionScalar, + _Type_DENSE_UNION: UnionScalar, + _Type_INTERVAL_MONTH_DAY_NANO: MonthDayNanoIntervalScalar, + _Type_EXTENSION: ExtensionScalar, +} + + +cdef object get_scalar_class_from_type( + const shared_ptr[CDataType]& sp_data_type): + cdef CDataType* data_type = sp_data_type.get() + if data_type == NULL: + raise ValueError('Scalar data type was NULL') + + if data_type.id() == _Type_EXTENSION: + py_ext_data_type = pyarrow_wrap_data_type(sp_data_type) + return py_ext_data_type.__arrow_ext_scalar_class__() + else: + return _scalar_classes[data_type.id()] + + +def scalar(value, type=None, *, from_pandas=None, MemoryPool memory_pool=None): + """ + Create a pyarrow.Scalar instance from a Python object. + + Parameters + ---------- + value : Any + Python object coercible to arrow's type system. + type : pyarrow.DataType + Explicit type to attempt to coerce to, otherwise will be inferred from + the value. + from_pandas : bool, default None + Use pandas's semantics for inferring nulls from values in + ndarray-like data. Defaults to False if not passed explicitly by user, + or True if a pandas object is passed in. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the currently-set default + memory pool. + + Returns + ------- + scalar : pyarrow.Scalar + + Examples + -------- + >>> import pyarrow as pa + + >>> pa.scalar(42) + + + >>> pa.scalar("string") + + + >>> pa.scalar([1, 2]) + + + >>> pa.scalar([1, 2], type=pa.list_(pa.int16())) + + """ + cdef: + DataType ty + PyConversionOptions options + shared_ptr[CScalar] scalar + shared_ptr[CArray] array + shared_ptr[CChunkedArray] chunked + bint is_pandas_object = False + CMemoryPool* pool + + type = ensure_type(type, allow_none=True) + pool = maybe_unbox_memory_pool(memory_pool) + + if _is_array_like(value): + value = get_values(value, &is_pandas_object) + + options.size = 1 + + if type is not None: + ty = ensure_type(type) + options.type = ty.sp_type + + if from_pandas is None: + options.from_pandas = is_pandas_object + else: + options.from_pandas = from_pandas + + value = [value] + with nogil: + chunked = GetResultValue(ConvertPySequence(value, None, options, pool)) + + # get the first chunk + assert chunked.get().num_chunks() == 1 + array = chunked.get().chunk(0) + + # retrieve the scalar from the first position + scalar = GetResultValue(array.get().GetScalar(0)) + return Scalar.wrap(scalar) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/table.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/table.pxi new file mode 100644 index 0000000000000000000000000000000000000000..3c450d61a765905c5173531a025587b65cf8c119 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/table.pxi @@ -0,0 +1,5572 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport PyCapsule_CheckExact, PyCapsule_GetPointer, PyCapsule_New + +import warnings +from cython import sizeof + +cdef class ChunkedArray(_PandasConvertible): + """ + An array-like composed from a (possibly empty) collection of pyarrow.Arrays + + Warnings + -------- + Do not call this class's constructor directly. + + Examples + -------- + To construct a ChunkedArray object use :func:`pyarrow.chunked_array`: + + >>> import pyarrow as pa + >>> pa.chunked_array([], type=pa.int8()) + + [ + ... + ] + + >>> pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> isinstance(pa.chunked_array([[2, 2, 4], [4, 5, 100]]), pa.ChunkedArray) + True + """ + + def __cinit__(self): + self.chunked_array = NULL + + def __init__(self): + raise TypeError("Do not call ChunkedArray's constructor directly, use " + "`chunked_array` function instead.") + + cdef void init(self, const shared_ptr[CChunkedArray]& chunked_array): + self.sp_chunked_array = chunked_array + self.chunked_array = chunked_array.get() + + def __reduce__(self): + return chunked_array, (self.chunks, self.type) + + @property + def data(self): + import warnings + warnings.warn("Calling .data on ChunkedArray is provided for " + "compatibility after Column was removed, simply drop " + "this attribute", FutureWarning) + return self + + @property + def type(self): + """ + Return data type of a ChunkedArray. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.type + DataType(int64) + """ + return pyarrow_wrap_data_type(self.sp_chunked_array.get().type()) + + def length(self): + """ + Return length of a ChunkedArray. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.length() + 6 + """ + return self.chunked_array.length() + + def __len__(self): + return self.length() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def to_string(self, *, int indent=0, int window=5, int container_window=2, + c_bool skip_new_lines=False): + """ + Render a "pretty-printed" string representation of the ChunkedArray + + Parameters + ---------- + indent : int + How much to indent right the content of the array, + by default ``0``. + window : int + How many items to preview within each chunk at the begin and end + of the chunk when the chunk is bigger than the window. + The other elements will be ellipsed. + container_window : int + How many chunks to preview at the begin and end + of the array when the array is bigger than the window. + The other elements will be ellipsed. + This setting also applies to list columns. + skip_new_lines : bool + If the array should be rendered as a single line of text + or if each element should be on its own line. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.to_string(skip_new_lines=True) + '[[2,2,4],[4,5,100]]' + """ + cdef: + c_string result + PrettyPrintOptions options + + with nogil: + options = PrettyPrintOptions(indent, window) + options.skip_new_lines = skip_new_lines + options.container_window = container_window + check_status( + PrettyPrint( + deref(self.chunked_array), + options, + &result + ) + ) + + return frombytes(result, safe=True) + + def format(self, **kwargs): + """ + DEPRECATED, use pyarrow.ChunkedArray.to_string + + Parameters + ---------- + **kwargs : dict + + Returns + ------- + str + """ + import warnings + warnings.warn('ChunkedArray.format is deprecated, ' + 'use ChunkedArray.to_string') + return self.to_string(**kwargs) + + def __str__(self): + return self.to_string() + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.sp_chunked_array.get().ValidateFull()) + else: + with nogil: + check_status(self.sp_chunked_array.get().Validate()) + + @property + def null_count(self): + """ + Number of null entries + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.null_count + 1 + """ + return self.chunked_array.null_count() + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the chunked array. + + In other words, the sum of bytes from all buffer ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will only be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.nbytes + 49 + """ + cdef: + CResult[int64_t] c_res_buffer + + with nogil: + c_res_buffer = ReferencedBufferSize(deref(self.chunked_array)) + size = GetResultValue(c_res_buffer) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the chunked array. + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.get_total_buffer_size() + 49 + """ + cdef: + int64_t total_buffer_size + + total_buffer_size = TotalBufferSize(deref(self.chunked_array)) + return total_buffer_size + + def __sizeof__(self): + return super(ChunkedArray, self).__sizeof__() + self.nbytes + + def __iter__(self): + for chunk in self.iterchunks(): + for item in chunk: + yield item + + def __getitem__(self, key): + """ + Slice or return value at given index + + Parameters + ---------- + key : integer or slice + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + + Returns + ------- + value : Scalar (index) or ChunkedArray (slice) + """ + + if isinstance(key, slice): + return _normalize_slice(self, key) + + return self.getitem(_normalize_index(key, self.chunked_array.length())) + + cdef getitem(self, int64_t i): + return Scalar.wrap(GetResultValue(self.chunked_array.GetScalar(i))) + + def is_null(self, *, nan_is_null=False): + """ + Return boolean array indicating the null values. + + Parameters + ---------- + nan_is_null : bool (optional, default False) + Whether floating-point NaN values should also be considered null. + + Returns + ------- + array : boolean Array or ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.is_null() + + [ + [ + false, + false, + false, + false, + true, + false + ] + ] + """ + options = _pc().NullOptions(nan_is_null=nan_is_null) + return _pc().call_function('is_null', [self], options) + + def is_nan(self): + """ + Return boolean array indicating the NaN values. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> arr = pa.chunked_array([[2, np.nan, 4], [4, None, 100]]) + >>> arr.is_nan() + + [ + [ + false, + true, + false, + false, + null, + false + ] + ] + """ + return _pc().is_nan(self) + + def is_valid(self): + """ + Return boolean array indicating the non-null values. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.is_valid() + + [ + [ + true, + true, + true + ], + [ + true, + false, + true + ] + ] + """ + return _pc().is_valid(self) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def fill_null(self, fill_value): + """ + Replace each null element in values with fill_value. + + See :func:`pyarrow.compute.fill_null` for full usage. + + Parameters + ---------- + fill_value : any + The replacement value for null entries. + + Returns + ------- + result : Array or ChunkedArray + A new array with nulls replaced by the given value. + + Examples + -------- + >>> import pyarrow as pa + >>> fill_value = pa.scalar(5, type=pa.int8()) + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.fill_null(fill_value) + + [ + [ + 2, + 2, + 4, + 4, + 5, + 100 + ] + ] + """ + return _pc().fill_null(self, fill_value) + + def equals(self, ChunkedArray other): + """ + Return whether the contents of two chunked arrays are equal. + + Parameters + ---------- + other : pyarrow.ChunkedArray + Chunked array to compare against. + + Returns + ------- + are_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> animals = pa.chunked_array(( + ... ["Flamingo", "Parrot", "Dog"], + ... ["Horse", "Brittle stars", "Centipede"] + ... )) + >>> n_legs.equals(n_legs) + True + >>> n_legs.equals(animals) + False + """ + if other is None: + return False + + cdef: + CChunkedArray* this_arr = self.chunked_array + CChunkedArray* other_arr = other.chunked_array + c_bool result + + with nogil: + result = this_arr.Equals(deref(other_arr)) + + return result + + def _to_pandas(self, options, types_mapper=None, **kwargs): + return _array_like_to_pandas(self, options, types_mapper=types_mapper) + + def to_numpy(self, zero_copy_only=False): + """ + Return a NumPy copy of this array (experimental). + + Parameters + ---------- + zero_copy_only : bool, default False + Introduced for signature consistence with pyarrow.Array.to_numpy. + This must be False here since NumPy arrays' buffer must be contiguous. + + Returns + ------- + array : numpy.ndarray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.to_numpy() + array([ 2, 2, 4, 4, 5, 100]) + """ + if zero_copy_only: + raise ValueError( + "zero_copy_only must be False for pyarrow.ChunkedArray.to_numpy" + ) + cdef: + PyObject* out + PandasOptions c_options + object values + + c_options.to_numpy = True + + with nogil: + check_status( + ConvertChunkedArrayToPandas( + c_options, + self.sp_chunked_array, + self, + &out + ) + ) + + # wrap_array_output uses pandas to convert to Categorical, here + # always convert to numpy array + values = PyObject_to_object(out) + + if isinstance(values, dict): + values = np.take(values['dictionary'], values['indices']) + + return values + + def __array__(self, dtype=None): + values = self.to_numpy() + if dtype is None: + return values + return values.astype(dtype) + + def cast(self, object target_type=None, safe=None, options=None): + """ + Cast array values to another data type + + See :func:`pyarrow.compute.cast` for usage. + + Parameters + ---------- + target_type : DataType, None + Type to cast array to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + + Returns + ------- + cast : Array or ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs.type + DataType(int64) + + Change the data type of an array: + + >>> n_legs_seconds = n_legs.cast(pa.duration('s')) + >>> n_legs_seconds.type + DurationType(duration[s]) + """ + return _pc().cast(self, target_type, safe=safe, options=options) + + def dictionary_encode(self, null_encoding='mask'): + """ + Compute dictionary-encoded representation of array. + + See :func:`pyarrow.compute.dictionary_encode` for full usage. + + Parameters + ---------- + null_encoding : str, default "mask" + How to handle null entries. + + Returns + ------- + encoded : ChunkedArray + A dictionary-encoded version of this array. + + Examples + -------- + >>> import pyarrow as pa + >>> animals = pa.chunked_array(( + ... ["Flamingo", "Parrot", "Dog"], + ... ["Horse", "Brittle stars", "Centipede"] + ... )) + >>> animals.dictionary_encode() + + [ + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 0, + 1, + 2 + ], + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 3, + 4, + 5 + ] + ] + """ + options = _pc().DictionaryEncodeOptions(null_encoding) + return _pc().call_function('dictionary_encode', [self], options) + + def flatten(self, MemoryPool memory_pool=None): + """ + Flatten this ChunkedArray. If it has a struct type, the column is + flattened into one array per struct field. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + result : list of ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> c_arr = pa.chunked_array(n_legs.value_counts()) + >>> c_arr + + [ + -- is_valid: all not null + -- child 0 type: int64 + [ + 2, + 4, + 5, + 100 + ] + -- child 1 type: int64 + [ + 2, + 2, + 1, + 1 + ] + ] + >>> c_arr.flatten() + [ + [ + [ + 2, + 4, + 5, + 100 + ] + ], + [ + [ + 2, + 2, + 1, + 1 + ] + ]] + >>> c_arr.type + StructType(struct) + >>> n_legs.type + DataType(int64) + """ + cdef: + vector[shared_ptr[CChunkedArray]] flattened + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + flattened = GetResultValue(self.chunked_array.Flatten(pool)) + + return [pyarrow_wrap_chunked_array(col) for col in flattened] + + def combine_chunks(self, MemoryPool memory_pool=None): + """ + Flatten this ChunkedArray into a single non-chunked array. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + result : Array + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.combine_chunks() + + [ + 2, + 2, + 4, + 4, + 5, + 100 + ] + """ + if self.num_chunks == 0: + return array([], type=self.type) + else: + return concat_arrays(self.chunks) + + def unique(self): + """ + Compute distinct elements in array + + Returns + ------- + pyarrow.Array + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.unique() + + [ + 2, + 4, + 5, + 100 + ] + """ + return _pc().call_function('unique', [self]) + + def value_counts(self): + """ + Compute counts of unique elements in array. + + Returns + ------- + An array of structs + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.value_counts() + + -- is_valid: all not null + -- child 0 type: int64 + [ + 2, + 4, + 5, + 100 + ] + -- child 1 type: int64 + [ + 2, + 2, + 1, + 1 + ] + """ + return _pc().call_function('value_counts', [self]) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this ChunkedArray + + Parameters + ---------- + offset : int, default 0 + Offset from start of array to slice + length : int, default None + Length of slice (default is until end of batch starting from + offset) + + Returns + ------- + sliced : ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.slice(2,2) + + [ + [ + 4 + ], + [ + 4 + ] + ] + """ + cdef shared_ptr[CChunkedArray] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.chunked_array.Slice(offset) + else: + result = self.chunked_array.Slice(offset, length) + + return pyarrow_wrap_chunked_array(result) + + def filter(self, mask, object null_selection_behavior="drop"): + """ + Select values from the chunked array. + + See :func:`pyarrow.compute.filter` for full usage. + + Parameters + ---------- + mask : Array or array-like + The boolean mask to filter the chunked array with. + null_selection_behavior : str, default "drop" + How nulls in the mask should be handled. + + Returns + ------- + filtered : Array or ChunkedArray + An array of the same type, with only the elements selected by + the boolean mask. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> mask = pa.array([True, False, None, True, False, True]) + >>> n_legs.filter(mask) + + [ + [ + 2 + ], + [ + 4, + 100 + ] + ] + >>> n_legs.filter(mask, null_selection_behavior="emit_null") + + [ + [ + 2, + null + ], + [ + 4, + 100 + ] + ] + """ + return _pc().filter(self, mask, null_selection_behavior) + + def index(self, value, start=None, end=None, *, memory_pool=None): + """ + Find the first index of a value. + + See :func:`pyarrow.compute.index` for full usage. + + Parameters + ---------- + value : Scalar or object + The value to look for in the array. + start : int, optional + The start index where to look for `value`. + end : int, optional + The end index where to look for `value`. + memory_pool : MemoryPool, optional + A memory pool for potential memory allocations. + + Returns + ------- + index : Int64Scalar + The index of the value in the array (-1 if not found). + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.index(4) + + >>> n_legs.index(4, start=3) + + """ + return _pc().index(self, value, start, end, memory_pool=memory_pool) + + def take(self, object indices): + """ + Select values from the chunked array. + + See :func:`pyarrow.compute.take` for full usage. + + Parameters + ---------- + indices : Array or array-like + The indices in the array whose values will be returned. + + Returns + ------- + taken : Array or ChunkedArray + An array with the same datatype, containing the taken values. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.take([1,4,5]) + + [ + [ + 2, + 5, + 100 + ] + ] + """ + return _pc().take(self, indices) + + def drop_null(self): + """ + Remove missing values from a chunked array. + See :func:`pyarrow.compute.drop_null` for full description. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + null + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.drop_null() + + [ + [ + 2, + 2 + ], + [ + 4, + 5, + 100 + ] + ] + """ + return _pc().drop_null(self) + + def sort(self, order="ascending", **kwargs): + """ + Sort the ChunkedArray + + Parameters + ---------- + order : str, default "ascending" + Which order to sort values in. + Accepted values are "ascending", "descending". + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + result : ChunkedArray + """ + indices = _pc().sort_indices( + self, + options=_pc().SortOptions(sort_keys=[("", order)], **kwargs) + ) + return self.take(indices) + + def unify_dictionaries(self, MemoryPool memory_pool=None): + """ + Unify dictionaries across all chunks. + + This method returns an equivalent chunked array, but where all + chunks share the same dictionary values. Dictionary indices are + transposed accordingly. + + If there are no dictionaries in the chunked array, it is returned + unchanged. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + result : ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> arr_1 = pa.array(["Flamingo", "Parrot", "Dog"]).dictionary_encode() + >>> arr_2 = pa.array(["Horse", "Brittle stars", "Centipede"]).dictionary_encode() + >>> c_arr = pa.chunked_array([arr_1, arr_2]) + >>> c_arr + + [ + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog" + ] + -- indices: + [ + 0, + 1, + 2 + ], + ... + -- dictionary: + [ + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 0, + 1, + 2 + ] + ] + >>> c_arr.unify_dictionaries() + + [ + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 0, + 1, + 2 + ], + ... + -- dictionary: + [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede" + ] + -- indices: + [ + 3, + 4, + 5 + ] + ] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + shared_ptr[CChunkedArray] c_result + + with nogil: + c_result = GetResultValue(CDictionaryUnifier.UnifyChunkedArray( + self.sp_chunked_array, pool)) + + return pyarrow_wrap_chunked_array(c_result) + + @property + def num_chunks(self): + """ + Number of underlying chunks. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs.num_chunks + 2 + """ + return self.chunked_array.num_chunks() + + def chunk(self, i): + """ + Select a chunk by its index. + + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Array + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs.chunk(1) + + [ + 4, + 5, + 100 + ] + """ + if i >= self.num_chunks or i < 0: + raise IndexError('Chunk index out of range.') + + return pyarrow_wrap_array(self.chunked_array.chunk(i)) + + @property + def chunks(self): + """ + Convert to a list of single-chunked arrays. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, None], [4, 5, 100]]) + >>> n_legs + + [ + [ + 2, + 2, + null + ], + [ + 4, + 5, + 100 + ] + ] + >>> n_legs.chunks + [ + [ + 2, + 2, + null + ], + [ + 4, + 5, + 100 + ]] + """ + return list(self.iterchunks()) + + def iterchunks(self): + """ + Convert to an iterator of ChunkArrays. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> for i in n_legs.iterchunks(): + ... print(i.null_count) + ... + 0 + 1 + + """ + for i in range(self.num_chunks): + yield self.chunk(i) + + def to_pylist(self): + """ + Convert to a list of native Python objects. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, None, 100]]) + >>> n_legs.to_pylist() + [2, 2, 4, 4, None, 100] + """ + result = [] + for i in range(self.num_chunks): + result += self.chunk(i).to_pylist() + return result + + +def chunked_array(arrays, type=None): + """ + Construct chunked array from list of array-like objects + + Parameters + ---------- + arrays : Array, list of Array, or array-like + Must all be the same data type. Can be empty only if type also passed. + type : DataType or string coercible to DataType + + Returns + ------- + ChunkedArray + + Examples + -------- + >>> import pyarrow as pa + >>> pa.chunked_array([], type=pa.int8()) + + [ + ... + ] + + >>> pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + + [ + [ + 2, + 2, + 4 + ], + [ + 4, + 5, + 100 + ] + ] + """ + cdef: + Array arr + vector[shared_ptr[CArray]] c_arrays + shared_ptr[CChunkedArray] c_result + shared_ptr[CDataType] c_type + + type = ensure_type(type, allow_none=True) + + if isinstance(arrays, Array): + arrays = [arrays] + + for x in arrays: + arr = x if isinstance(x, Array) else array(x, type=type) + + if type is None: + # it allows more flexible chunked array construction from to coerce + # subsequent arrays to the firstly inferred array type + # it also spares the inference overhead after the first chunk + type = arr.type + + c_arrays.push_back(arr.sp_array) + + c_type = pyarrow_unwrap_data_type(type) + with nogil: + c_result = GetResultValue(CChunkedArray.Make(c_arrays, c_type)) + return pyarrow_wrap_chunked_array(c_result) + + +cdef _schema_from_arrays(arrays, names, metadata, shared_ptr[CSchema]* schema): + cdef: + Py_ssize_t K = len(arrays) + c_string c_name + shared_ptr[CDataType] c_type + shared_ptr[const CKeyValueMetadata] c_meta + vector[shared_ptr[CField]] c_fields + + if metadata is not None: + c_meta = KeyValueMetadata(metadata).unwrap() + + if K == 0: + if names is None or len(names) == 0: + schema.reset(new CSchema(c_fields, c_meta)) + return arrays + else: + raise ValueError('Length of names ({}) does not match ' + 'length of arrays ({})'.format(len(names), K)) + + c_fields.resize(K) + + if names is None: + raise ValueError('Must pass names or schema when constructing ' + 'Table or RecordBatch.') + + if len(names) != K: + raise ValueError('Length of names ({}) does not match ' + 'length of arrays ({})'.format(len(names), K)) + + converted_arrays = [] + for i in range(K): + val = arrays[i] + if not isinstance(val, (Array, ChunkedArray)): + val = array(val) + + c_type = ( val.type).sp_type + + if names[i] is None: + c_name = b'None' + else: + c_name = tobytes(names[i]) + c_fields[i].reset(new CField(c_name, c_type, True)) + converted_arrays.append(val) + + schema.reset(new CSchema(c_fields, c_meta)) + return converted_arrays + + +cdef _sanitize_arrays(arrays, names, schema, metadata, + shared_ptr[CSchema]* c_schema): + cdef Schema cy_schema + if schema is None: + converted_arrays = _schema_from_arrays(arrays, names, metadata, + c_schema) + else: + if names is not None: + raise ValueError('Cannot pass both schema and names') + if metadata is not None: + raise ValueError('Cannot pass both schema and metadata') + cy_schema = schema + + if len(schema) != len(arrays): + raise ValueError('Schema and number of arrays unequal') + + c_schema[0] = cy_schema.sp_schema + converted_arrays = [] + for i, item in enumerate(arrays): + item = asarray(item, type=schema[i].type) + converted_arrays.append(item) + return converted_arrays + +cdef class _Tabular(_PandasConvertible): + """Internal: An interface for common operations on tabular objects.""" + + def __init__(self): + raise TypeError(f"Do not call {self.__class__.__name__}'s constructor directly, use " + f"one of the `{self.__class__.__name__}.from_*` functions instead.") + + def __array__(self, dtype=None): + column_arrays = [ + np.asarray(self.column(i), dtype=dtype) for i in range(self.num_columns) + ] + if column_arrays: + arr = np.stack(column_arrays, axis=1) + else: + arr = np.empty((self.num_rows, 0), dtype=dtype) + return arr + + def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): + """ + Return the dataframe interchange object implementing the interchange protocol. + + Parameters + ---------- + nan_as_null : bool, default False + Whether to tell the DataFrame to overwrite null values in the data + with ``NaN`` (or ``NaT``). + allow_copy : bool, default True + Whether to allow memory copying when exporting. If set to False + it would cause non-zero-copy exports to fail. + + Returns + ------- + DataFrame interchange object + The object which consuming library can use to ingress the dataframe. + + Notes + ----- + Details on the interchange protocol: + https://data-apis.org/dataframe-protocol/latest/index.html + `nan_as_null` currently has no effect; once support for nullable extension + dtypes is added, this value should be propagated to columns. + """ + + from pyarrow.interchange.dataframe import _PyArrowDataFrame + + return _PyArrowDataFrame(self, nan_as_null, allow_copy) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __getitem__(self, key): + """ + Slice or return column at given index or column name + + Parameters + ---------- + key : integer, str, or slice + Slices with step not equal to 1 (or None) will produce a copy + rather than a zero-copy view + + Returns + ------- + Array (from RecordBatch) or ChunkedArray (from Table) for column input. + RecordBatch or Table for slice input. + """ + if isinstance(key, slice): + return _normalize_slice(self, key) + + return self.column(key) + + def __len__(self): + return self.num_rows + + def __repr__(self): + if not self._is_initialized(): + raise ValueError("This object's internal pointer is NULL, do not " + "use any methods or attributes on this object") + return self.to_string(preview_cols=10) + + def _column(self, int i): + raise NotImplementedError + + def _ensure_integer_index(self, i): + """ + Ensure integer index (convert string column name to integer if needed). + """ + if isinstance(i, (bytes, str)): + field_indices = self.schema.get_all_field_indices(i) + + if len(field_indices) == 0: + raise KeyError("Field \"{}\" does not exist in schema" + .format(i)) + elif len(field_indices) > 1: + raise KeyError("Field \"{}\" exists {} times in schema" + .format(i, len(field_indices))) + else: + return field_indices[0] + elif isinstance(i, int): + return i + else: + raise TypeError("Index must either be string or integer") + + def _is_initialized(self): + raise NotImplementedError + + def column(self, i): + """ + Select single column from Table or RecordBatch. + + Parameters + ---------- + i : int or string + The index or name of the column to retrieve. + + Returns + ------- + column : Array (for RecordBatch) or ChunkedArray (for Table) + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Select a column by numeric index: + + >>> table.column(0) + + [ + [ + 2, + 4, + 5, + 100 + ] + ] + + Select a column by its name: + + >>> table.column("animals") + + [ + [ + "Flamingo", + "Horse", + "Brittle stars", + "Centipede" + ] + ] + """ + return self._column(self._ensure_integer_index(i)) + + @property + def column_names(self): + """ + Names of the Table or RecordBatch columns. + + Returns + ------- + list of str + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> table = pa.Table.from_arrays([[2, 4, 5, 100], + ... ["Flamingo", "Horse", "Brittle stars", "Centipede"]], + ... names=['n_legs', 'animals']) + >>> table.column_names + ['n_legs', 'animals'] + """ + return [self.field(i).name for i in range(self.num_columns)] + + @property + def columns(self): + """ + List of all columns in numerical order. + + Returns + ------- + columns : list of Array (for RecordBatch) or list of ChunkedArray (for Table) + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.columns + [ + [ + [ + null, + 4, + 5, + null + ] + ], + [ + [ + "Flamingo", + "Horse", + null, + "Centipede" + ] + ]] + """ + return [self._column(i) for i in range(self.num_columns)] + + def drop_null(self): + """ + Remove rows that contain missing values from a Table or RecordBatch. + + See :func:`pyarrow.compute.drop_null` for full usage. + + Returns + ------- + Table or RecordBatch + A tabular object with the same schema, with rows containing + no missing values. + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [None, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.drop_null() + pyarrow.Table + year: double + n_legs: int64 + animals: string + ---- + year: [[2022,2021]] + n_legs: [[4,100]] + animals: [["Horse","Centipede"]] + """ + return _pc().drop_null(self) + + def field(self, i): + """ + Select a schema field by its column name or numeric index. + + Parameters + ---------- + i : int or string + The index or name of the field to retrieve. + + Returns + ------- + Field + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.field(0) + pyarrow.Field + >>> table.field(1) + pyarrow.Field + """ + return self.schema.field(i) + + @classmethod + def from_pydict(cls, mapping, schema=None, metadata=None): + """ + Construct a Table or RecordBatch from Arrow arrays or columns. + + Parameters + ---------- + mapping : dict or Mapping + A mapping of strings to Arrays or Python lists. + schema : Schema, default None + If not passed, will be inferred from the Mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table or RecordBatch + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> pydict = {'n_legs': n_legs, 'animals': animals} + + Construct a Table from a dictionary of arrays: + + >>> pa.Table.from_pydict(pydict) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_pydict(pydict).schema + n_legs: int64 + animals: string + + Construct a Table from a dictionary of arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_pydict(pydict, metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from a dictionary of arrays with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.Table.from_pydict(pydict, schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + + return _from_pydict(cls=cls, + mapping=mapping, + schema=schema, + metadata=metadata) + + @classmethod + def from_pylist(cls, mapping, schema=None, metadata=None): + """ + Construct a Table or RecordBatch from list of rows / dictionaries. + + Parameters + ---------- + mapping : list of dicts of rows + A mapping of strings to row values. + schema : Schema, default None + If not passed, will be inferred from the first row of the + mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table or RecordBatch + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> pylist = [{'n_legs': 2, 'animals': 'Flamingo'}, + ... {'n_legs': 4, 'animals': 'Dog'}] + + Construct a Table from a list of rows: + + >>> pa.Table.from_pylist(pylist) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4]] + animals: [["Flamingo","Dog"]] + + Construct a Table from a list of rows with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_pylist(pylist, metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from a list of rows with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.Table.from_pylist(pylist, schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + + return _from_pylist(cls=cls, + mapping=mapping, + schema=schema, + metadata=metadata) + + def itercolumns(self): + """ + Iterator over all columns in their numerical order. + + Yields + ------ + Array (for RecordBatch) or ChunkedArray (for Table) + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> for i in table.itercolumns(): + ... print(i.null_count) + ... + 2 + 1 + """ + for i in range(self.num_columns): + yield self._column(i) + + @property + def num_columns(self): + raise NotImplementedError + + @property + def num_rows(self): + raise NotImplementedError + + @property + def schema(self): + raise NotImplementedError + + def sort_by(self, sorting, **kwargs): + """ + Sort the Table or RecordBatch by one or multiple columns. + + Parameters + ---------- + sorting : str or list[tuple(name, order)] + Name of the column to use to sort (ascending), or + a list of multiple sorting conditions where + each entry is a tuple with column name + and sorting order ("ascending" or "descending") + **kwargs : dict, optional + Additional sorting options. + As allowed by :class:`SortOptions` + + Returns + ------- + Table or RecordBatch + A new tabular object sorted according to the sort keys. + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pandas as pd + >>> import pyarrow as pa + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.sort_by('animal') + pyarrow.Table + year: int64 + n_legs: int64 + animal: string + ---- + year: [[2019,2021,2021,2020,2022,2022]] + n_legs: [[5,100,4,2,4,2]] + animal: [["Brittle stars","Centipede","Dog","Flamingo","Horse","Parrot"]] + """ + if isinstance(sorting, str): + sorting = [(sorting, "ascending")] + + indices = _pc().sort_indices( + self, + options=_pc().SortOptions(sort_keys=sorting, **kwargs) + ) + return self.take(indices) + + def take(self, object indices): + """ + Select rows from a Table or RecordBatch. + + See :func:`pyarrow.compute.take` for full usage. + + Parameters + ---------- + indices : Array or array-like + The indices in the tabular object whose rows will be returned. + + Returns + ------- + Table or RecordBatch + A tabular object with the same schema, containing the taken rows. + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.take([1,3]) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2022,2021]] + n_legs: [[4,100]] + animals: [["Horse","Centipede"]] + """ + return _pc().take(self, indices) + + def to_pydict(self): + """ + Convert the Table or RecordBatch to a dict or OrderedDict. + + Returns + ------- + dict + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> table = pa.Table.from_arrays([n_legs, animals], names=["n_legs", "animals"]) + >>> table.to_pydict() + {'n_legs': [2, 2, 4, 4, 5, 100], 'animals': ['Flamingo', 'Parrot', ..., 'Centipede']} + """ + entries = [] + for i in range(self.num_columns): + name = self.field(i).name + column = self[i].to_pylist() + entries.append((name, column)) + return ordered_dict(entries) + + def to_pylist(self): + """ + Convert the Table or RecordBatch to a list of rows / dictionaries. + + Returns + ------- + list + + Examples + -------- + Table (works similarly for RecordBatch) + + >>> import pyarrow as pa + >>> data = [[2, 4, 5, 100], + ... ["Flamingo", "Horse", "Brittle stars", "Centipede"]] + >>> table = pa.table(data, names=["n_legs", "animals"]) + >>> table.to_pylist() + [{'n_legs': 2, 'animals': 'Flamingo'}, {'n_legs': 4, 'animals': 'Horse'}, ... + """ + pydict = self.to_pydict() + names = self.schema.names + pylist = [{column: pydict[column][row] for column in names} + for row in range(self.num_rows)] + return pylist + + def to_string(self, *, show_metadata=False, preview_cols=0): + """ + Return human-readable string representation of Table or RecordBatch. + + Parameters + ---------- + show_metadata : bool, default False + Display Field-level and Schema-level KeyValueMetadata. + preview_cols : int, default 0 + Display values of the columns for the first N columns. + + Returns + ------- + str + """ + # Use less verbose schema output. + schema_as_string = self.schema.to_string( + show_field_metadata=show_metadata, + show_schema_metadata=show_metadata + ) + title = 'pyarrow.{}\n{}'.format(type(self).__name__, schema_as_string) + pieces = [title] + if preview_cols: + pieces.append('----') + for i in range(min(self.num_columns, preview_cols)): + pieces.append('{}: {}'.format( + self.field(i).name, + self.column(i).to_string(indent=0, skip_new_lines=True) + )) + if preview_cols < self.num_columns: + pieces.append('...') + return '\n'.join(pieces) + + +cdef class RecordBatch(_Tabular): + """ + Batch of rows of columns of equal length + + Warnings + -------- + Do not call this class's constructor directly, use one of the + ``RecordBatch.from_*`` functions instead. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Constructing a RecordBatch from arrays: + + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + Constructing a RecordBatch from pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022], + ... 'month': [3, 5, 7, 9], + ... 'day': [1, 5, 9, 13], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.RecordBatch.from_pandas(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + >>> pa.RecordBatch.from_pandas(df).to_pandas() + year month day n_legs animals + 0 2020 3 1 2 Flamingo + 1 2022 5 5 4 Horse + 2 2021 7 9 5 Brittle stars + 3 2022 9 13 100 Centipede + + Constructing a RecordBatch from pylist: + + >>> pylist = [{'n_legs': 2, 'animals': 'Flamingo'}, + ... {'n_legs': 4, 'animals': 'Dog'}] + >>> pa.RecordBatch.from_pylist(pylist).to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Dog + + You can also construct a RecordBatch using :func:`pyarrow.record_batch`: + + >>> pa.record_batch([n_legs, animals], names=names).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + >>> pa.record_batch(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + """ + + def __cinit__(self): + self.batch = NULL + self._schema = None + + cdef void init(self, const shared_ptr[CRecordBatch]& batch): + self.sp_batch = batch + self.batch = batch.get() + + def _is_initialized(self): + return self.batch != NULL + + def __reduce__(self): + return _reconstruct_record_batch, (self.columns, self.schema) + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.batch.ValidateFull()) + else: + with nogil: + check_status(self.batch.Validate()) + + def replace_schema_metadata(self, metadata=None): + """ + Create shallow copy of record batch by replacing schema + key-value metadata with the indicated new metadata (which may be None, + which deletes any existing metadata + + Parameters + ---------- + metadata : dict, default None + + Returns + ------- + shallow_copy : RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + + Constructing a RecordBatch with schema and metadata: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> batch = pa.RecordBatch.from_arrays([n_legs], schema=my_schema) + >>> batch.schema + n_legs: int64 + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Shallow copy of a RecordBatch with deleted schema metadata: + + >>> batch.replace_schema_metadata().schema + n_legs: int64 + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_meta + shared_ptr[CRecordBatch] c_batch + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + with nogil: + c_batch = self.batch.ReplaceSchemaMetadata(c_meta) + + return pyarrow_wrap_batch(c_batch) + + @property + def num_columns(self): + """ + Number of columns + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.num_columns + 2 + """ + return self.batch.num_columns() + + @property + def num_rows(self): + """ + Number of rows + + Due to the definition of a RecordBatch, all columns have the same + number of rows. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.num_rows + 6 + """ + return self.batch.num_rows() + + @property + def schema(self): + """ + Schema of the RecordBatch and its columns + + Returns + ------- + pyarrow.Schema + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.schema + n_legs: int64 + animals: string + """ + if self._schema is None: + self._schema = pyarrow_wrap_schema(self.batch.schema()) + + return self._schema + + def _column(self, int i): + """ + Select single column from record batch by its numeric index. + + Parameters + ---------- + i : int + The index of the column to retrieve. + + Returns + ------- + column : pyarrow.Array + """ + cdef int index = _normalize_index(i, self.num_columns) + cdef Array result = pyarrow_wrap_array(self.batch.column(index)) + result._name = self.schema[index].name + return result + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the record batch. + + In other words, the sum of bytes from all buffer ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will only be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.nbytes + 116 + """ + cdef: + CResult[int64_t] c_res_buffer + + with nogil: + c_res_buffer = ReferencedBufferSize(deref(self.batch)) + size = GetResultValue(c_res_buffer) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the record batch + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.get_total_buffer_size() + 120 + """ + cdef: + int64_t total_buffer_size + + total_buffer_size = TotalBufferSize(deref(self.batch)) + return total_buffer_size + + def __sizeof__(self): + return super(RecordBatch, self).__sizeof__() + self.nbytes + + def serialize(self, memory_pool=None): + """ + Write RecordBatch to Buffer as encapsulated IPC message, which does not + include a Schema. + + To reconstruct a RecordBatch from the encapsulated IPC message Buffer + returned by this function, a Schema must be passed separately. See + Examples. + + Parameters + ---------- + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + + Returns + ------- + serialized : Buffer + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> buf = batch.serialize() + >>> buf + + + Reconstruct RecordBatch from IPC message Buffer and original Schema + + >>> pa.ipc.read_record_batch(buf, batch.schema) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + """ + cdef shared_ptr[CBuffer] buffer + cdef CIpcWriteOptions options = CIpcWriteOptions.Defaults() + options.memory_pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + buffer = GetResultValue( + SerializeRecordBatch(deref(self.batch), options)) + return pyarrow_wrap_buffer(buffer) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this RecordBatch + + Parameters + ---------- + offset : int, default 0 + Offset from start of record batch to slice + length : int, default None + Length of slice (default is until end of batch starting from + offset) + + Returns + ------- + sliced : RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + >>> batch.slice(offset=3).to_pandas() + n_legs animals + 0 4 Horse + 1 5 Brittle stars + 2 100 Centipede + >>> batch.slice(length=2).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + >>> batch.slice(offset=3, length=1).to_pandas() + n_legs animals + 0 4 Horse + """ + cdef shared_ptr[CRecordBatch] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.batch.Slice(offset) + else: + result = self.batch.Slice(offset, length) + + return pyarrow_wrap_batch(result) + + def filter(self, mask, object null_selection_behavior="drop"): + """ + Select rows from the record batch. + + See :func:`pyarrow.compute.filter` for full usage. + + Parameters + ---------- + mask : Array or array-like + The boolean mask to filter the record batch with. + null_selection_behavior : str, default "drop" + How nulls in the mask should be handled. + + Returns + ------- + filtered : RecordBatch + A record batch of the same schema, with only the rows selected + by the boolean mask. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch.to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + Define a mask and select rows: + + >>> mask=[True, True, False, True, False, None] + >>> batch.filter(mask).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Horse + >>> batch.filter(mask, null_selection_behavior='emit_null').to_pandas() + n_legs animals + 0 2.0 Flamingo + 1 2.0 Parrot + 2 4.0 Horse + 3 NaN None + """ + return _pc().filter(self, mask, null_selection_behavior) + + def equals(self, object other, bint check_metadata=False): + """ + Check if contents of two record batches are equal. + + Parameters + ---------- + other : pyarrow.RecordBatch + RecordBatch to compare against. + check_metadata : bool, default False + Whether schema metadata equality should be checked as well. + + Returns + ------- + are_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"]) + >>> batch_0 = pa.record_batch([]) + >>> batch_1 = pa.RecordBatch.from_arrays([n_legs, animals], + ... names=["n_legs", "animals"], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> batch.equals(batch) + True + >>> batch.equals(batch_0) + False + >>> batch.equals(batch_1) + True + >>> batch.equals(batch_1, check_metadata=True) + False + """ + cdef: + CRecordBatch* this_batch = self.batch + shared_ptr[CRecordBatch] other_batch = pyarrow_unwrap_batch(other) + c_bool result + + if not other_batch: + return False + + with nogil: + result = this_batch.Equals(deref(other_batch), check_metadata) + + return result + + def select(self, object columns): + """ + Select columns of the RecordBatch. + + Returns a new RecordBatch with the specified columns, and metadata + preserved. + + Parameters + ---------- + columns : list-like + The column names or integer indices to select. + + Returns + ------- + RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> batch = pa.record_batch([n_legs, animals], + ... names=["n_legs", "animals"]) + + Select columns my indices: + + >>> batch.select([1]) + pyarrow.RecordBatch + animals: string + ---- + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + + Select columns by names: + + >>> batch.select(["n_legs"]) + pyarrow.RecordBatch + n_legs: int64 + ---- + n_legs: [2,2,4,4,5,100] + """ + cdef: + shared_ptr[CRecordBatch] c_batch + vector[int] c_indices + + for idx in columns: + idx = self._ensure_integer_index(idx) + idx = _normalize_index(idx, self.num_columns) + c_indices.push_back( idx) + + with nogil: + c_batch = GetResultValue(self.batch.SelectColumns(move(c_indices))) + + return pyarrow_wrap_batch(c_batch) + + def _to_pandas(self, options, **kwargs): + return Table.from_batches([self])._to_pandas(options, **kwargs) + + @classmethod + def from_pandas(cls, df, Schema schema=None, preserve_index=None, + nthreads=None, columns=None): + """ + Convert pandas.DataFrame to an Arrow RecordBatch + + Parameters + ---------- + df : pandas.DataFrame + schema : pyarrow.Schema, optional + The expected schema of the RecordBatch. This can be used to + indicate the type of columns if we cannot infer it automatically. + If passed, the output will have exactly this schema. Columns + specified in the schema that are not found in the DataFrame columns + or its index will raise an error. Additional columns or index + levels in the DataFrame which are not specified in the schema will + be ignored. + preserve_index : bool, optional + Whether to store the index as an additional column in the resulting + ``RecordBatch``. The default of None will store the index as a + column, except for RangeIndex which is stored as metadata only. Use + ``preserve_index=True`` to force it to be stored as a column. + nthreads : int, default None + If greater than 1, convert columns to Arrow in parallel using + indicated number of threads. By default, this follows + :func:`pyarrow.cpu_count` (may use up to system CPU count threads). + columns : list, optional + List of column to be converted. If None, use all columns. + + Returns + ------- + pyarrow.RecordBatch + + + Examples + -------- + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022], + ... 'month': [3, 5, 7, 9], + ... 'day': [1, 5, 9, 13], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + + Convert pandas DataFrame to RecordBatch: + + >>> import pyarrow as pa + >>> pa.RecordBatch.from_pandas(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + + Convert pandas DataFrame to RecordBatch using schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.RecordBatch.from_pandas(df, schema=my_schema) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + + Convert pandas DataFrame to RecordBatch specifying columns: + + >>> pa.RecordBatch.from_pandas(df, columns=["n_legs"]) + pyarrow.RecordBatch + n_legs: int64 + ---- + n_legs: [2,4,5,100] + """ + from pyarrow.pandas_compat import dataframe_to_arrays + arrays, schema, n_rows = dataframe_to_arrays( + df, schema, preserve_index, nthreads=nthreads, columns=columns + ) + + # If df is empty but row index is not, create empty RecordBatch with rows >0 + cdef vector[shared_ptr[CArray]] c_arrays + if n_rows: + return pyarrow_wrap_batch(CRecordBatch.Make(( schema).sp_schema, + n_rows, c_arrays)) + else: + return cls.from_arrays(arrays, schema=schema) + + @staticmethod + def from_arrays(list arrays, names=None, schema=None, metadata=None): + """ + Construct a RecordBatch from multiple pyarrow.Arrays + + Parameters + ---------- + arrays : list of pyarrow.Array + One for each field in RecordBatch + names : list of str, optional + Names for the batch fields. If not passed, schema must be passed + schema : Schema, default None + Schema for the created batch. If not passed, names must be passed + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + pyarrow.RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a RecordBatch from pyarrow Arrays using names: + + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.RecordBatch.from_arrays([n_legs, animals], names=names).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + Construct a RecordBatch from pyarrow Arrays using schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.RecordBatch.from_arrays([n_legs, animals], schema=my_schema).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + >>> pa.RecordBatch.from_arrays([n_legs, animals], schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + cdef: + Array arr + shared_ptr[CSchema] c_schema + vector[shared_ptr[CArray]] c_arrays + int64_t num_rows + + if len(arrays) > 0: + num_rows = len(arrays[0]) + else: + num_rows = 0 + + if isinstance(names, Schema): + import warnings + warnings.warn("Schema passed to names= option, please " + "pass schema= explicitly. " + "Will raise exception in future", FutureWarning) + schema = names + names = None + + converted_arrays = _sanitize_arrays(arrays, names, schema, metadata, + &c_schema) + + c_arrays.reserve(len(arrays)) + for arr in converted_arrays: + if len(arr) != num_rows: + raise ValueError('Arrays were not all the same length: ' + '{0} vs {1}'.format(len(arr), num_rows)) + c_arrays.push_back(arr.sp_array) + + result = pyarrow_wrap_batch(CRecordBatch.Make(c_schema, num_rows, + c_arrays)) + result.validate() + return result + + @staticmethod + def from_struct_array(StructArray struct_array): + """ + Construct a RecordBatch from a StructArray. + + Each field in the StructArray will become a column in the resulting + ``RecordBatch``. + + Parameters + ---------- + struct_array : StructArray + Array to construct the record batch from. + + Returns + ------- + pyarrow.RecordBatch + + Examples + -------- + >>> import pyarrow as pa + >>> struct = pa.array([{'n_legs': 2, 'animals': 'Parrot'}, + ... {'year': 2022, 'n_legs': 4}]) + >>> pa.RecordBatch.from_struct_array(struct).to_pandas() + animals n_legs year + 0 Parrot 2 NaN + 1 None 4 2022.0 + """ + cdef: + shared_ptr[CRecordBatch] c_record_batch + with nogil: + c_record_batch = GetResultValue( + CRecordBatch.FromStructArray(struct_array.sp_array)) + return pyarrow_wrap_batch(c_record_batch) + + def to_struct_array(self): + """ + Convert to a struct array. + """ + cdef: + shared_ptr[CRecordBatch] c_record_batch + shared_ptr[CArray] c_array + + c_record_batch = pyarrow_unwrap_batch(self) + with nogil: + c_array = GetResultValue( + deref(c_record_batch).ToStructArray()) + return pyarrow_wrap_array(c_array) + + def _export_to_c(self, out_ptr, out_schema_ptr=0): + """ + Export to a C ArrowArray struct, given its pointer. + + If a C ArrowSchema struct pointer is also given, the record batch + schema is exported to it at the same time. + + Parameters + ---------- + out_ptr: int + The raw pointer to a C ArrowArray struct. + out_schema_ptr: int (optional) + The raw pointer to a C ArrowSchema struct. + + Be careful: if you don't pass the ArrowArray struct to a consumer, + array memory will leak. This is a low-level function intended for + expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(out_ptr) + void* c_schema_ptr = _as_c_pointer(out_schema_ptr, + allow_null=True) + with nogil: + check_status(ExportRecordBatch(deref(self.sp_batch), + c_ptr, + c_schema_ptr)) + + @staticmethod + def _import_from_c(in_ptr, schema): + """ + Import RecordBatch from a C ArrowArray struct, given its pointer + and the imported schema. + + Parameters + ---------- + in_ptr: int + The raw pointer to a C ArrowArray struct. + type: Schema or int + Either a Schema object, or the raw pointer to a C ArrowSchema + struct. + + This is a low-level function intended for expert users. + """ + cdef: + void* c_ptr = _as_c_pointer(in_ptr) + void* c_schema_ptr + shared_ptr[CRecordBatch] c_batch + + c_schema = pyarrow_unwrap_schema(schema) + if c_schema == nullptr: + # Not a Schema object, perhaps a raw ArrowSchema pointer + c_schema_ptr = _as_c_pointer(schema, allow_null=True) + with nogil: + c_batch = GetResultValue(ImportRecordBatch( + c_ptr, c_schema_ptr)) + else: + with nogil: + c_batch = GetResultValue(ImportRecordBatch( + c_ptr, c_schema)) + return pyarrow_wrap_batch(c_batch) + + def __arrow_c_array__(self, requested_schema=None): + """ + Get a pair of PyCapsules containing a C ArrowArray representation of the object. + + Parameters + ---------- + requested_schema : PyCapsule | None + A PyCapsule containing a C ArrowSchema representation of a requested + schema. PyArrow will attempt to cast the batch to this schema. + If None, the schema will be returned as-is, with a schema matching the + one returned by :meth:`__arrow_c_schema__()`. + + Returns + ------- + Tuple[PyCapsule, PyCapsule] + A pair of PyCapsules containing a C ArrowSchema and ArrowArray, + respectively. + """ + cdef: + ArrowArray* c_array + ArrowSchema* c_schema + + if requested_schema is not None: + target_schema = Schema._import_from_c_capsule(requested_schema) + + if target_schema != self.schema: + try: + # We don't expose .cast() on RecordBatch, only on Table. + casted_batch = Table.from_batches([self]).cast( + target_schema, safe=True).to_batches()[0] + inner_batch = pyarrow_unwrap_batch(casted_batch) + except ArrowInvalid as e: + raise ValueError( + f"Could not cast {self.schema} to requested schema {target_schema}: {e}" + ) + else: + inner_batch = self.sp_batch + else: + inner_batch = self.sp_batch + + schema_capsule = alloc_c_schema(&c_schema) + array_capsule = alloc_c_array(&c_array) + + with nogil: + check_status(ExportRecordBatch(deref(inner_batch), c_array, c_schema)) + + return schema_capsule, array_capsule + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export the batch as an Arrow C stream PyCapsule. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the stream should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + Currently, this is not supported and will raise a + NotImplementedError if the schema doesn't match the current schema. + + Returns + ------- + PyCapsule + """ + return Table.from_batches([self]).__arrow_c_stream__(requested_schema) + + @staticmethod + def _import_from_c_capsule(schema_capsule, array_capsule): + """ + Import RecordBatch from a pair of PyCapsules containing a C ArrowArray + and ArrowSchema, respectively. + + Parameters + ---------- + schema_capsule : PyCapsule + A PyCapsule containing a C ArrowSchema representation of the schema. + array_capsule : PyCapsule + A PyCapsule containing a C ArrowArray representation of the array. + + Returns + ------- + pyarrow.RecordBatch + """ + cdef: + ArrowSchema* c_schema + ArrowArray* c_array + shared_ptr[CRecordBatch] c_batch + + c_schema = PyCapsule_GetPointer(schema_capsule, 'arrow_schema') + c_array = PyCapsule_GetPointer(array_capsule, 'arrow_array') + + with nogil: + c_batch = GetResultValue(ImportRecordBatch(c_array, c_schema)) + + return pyarrow_wrap_batch(c_batch) + + +def _reconstruct_record_batch(columns, schema): + """ + Internal: reconstruct RecordBatch from pickled components. + """ + return RecordBatch.from_arrays(columns, schema=schema) + + +def table_to_blocks(options, Table table, categories, extension_columns): + cdef: + PyObject* result_obj + shared_ptr[CTable] c_table + CMemoryPool* pool + PandasOptions c_options = _convert_pandas_options(options) + + if categories is not None: + c_options.categorical_columns = {tobytes(cat) for cat in categories} + if extension_columns is not None: + c_options.extension_columns = {tobytes(col) + for col in extension_columns} + + if pandas_api.is_v1(): + # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + c_options.coerce_temporal_nanoseconds = True + + if c_options.self_destruct: + # Move the shared_ptr, table is now unsafe to use further + c_table = move(table.sp_table) + table.table = NULL + else: + c_table = table.sp_table + + with nogil: + check_status( + libarrow_python.ConvertTableToPandas(c_options, move(c_table), + &result_obj) + ) + + return PyObject_to_object(result_obj) + + +cdef class Table(_Tabular): + """ + A collection of top-level named, equal length Arrow arrays. + + Warnings + -------- + Do not call this class's constructor directly, use one of the ``from_*`` + methods instead. + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a Table from arrays: + + >>> pa.Table.from_arrays([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from a RecordBatch: + + >>> batch = pa.record_batch([n_legs, animals], names=names) + >>> pa.Table.from_batches([batch]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.Table.from_pandas(df) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,2019,2021]] + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from a dictionary of arrays: + + >>> pydict = {'n_legs': n_legs, 'animals': animals} + >>> pa.Table.from_pydict(pydict) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_pydict(pydict).schema + n_legs: int64 + animals: string + + Construct a Table from a dictionary of arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_pydict(pydict, metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from a list of rows: + + >>> pylist = [{'n_legs': 2, 'animals': 'Flamingo'}, {'year': 2021, 'animals': 'Centipede'}] + >>> pa.Table.from_pylist(pylist) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,null]] + animals: [["Flamingo","Centipede"]] + + Construct a Table from a list of rows with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('year', pa.int64()), + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"year": "Year of entry"}) + >>> pa.Table.from_pylist(pylist, schema=my_schema).schema + year: int64 + n_legs: int64 + animals: string + -- schema metadata -- + year: 'Year of entry' + + Construct a Table with :func:`pyarrow.table`: + + >>> pa.table([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + + def __cinit__(self): + self.table = NULL + + cdef void init(self, const shared_ptr[CTable]& table): + self.sp_table = table + self.table = table.get() + + def _is_initialized(self): + return self.table != NULL + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.table.ValidateFull()) + else: + with nogil: + check_status(self.table.Validate()) + + def __reduce__(self): + # Reduce the columns as ChunkedArrays to avoid serializing schema + # data twice + columns = [col for col in self.columns] + return _reconstruct_table, (columns, self.schema) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this Table. + + Parameters + ---------- + offset : int, default 0 + Offset from start of table to slice. + length : int, default None + Length of slice (default is until end of table starting from + offset). + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.slice(length=3) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,2019]] + n_legs: [[2,4,5]] + animals: [["Flamingo","Horse","Brittle stars"]] + >>> table.slice(offset=2) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2019,2021]] + n_legs: [[5,100]] + animals: [["Brittle stars","Centipede"]] + >>> table.slice(offset=2, length=1) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2019]] + n_legs: [[5]] + animals: [["Brittle stars"]] + """ + cdef shared_ptr[CTable] result + + if offset < 0: + raise IndexError('Offset must be non-negative') + + offset = min(len(self), offset) + if length is None: + result = self.table.Slice(offset) + else: + result = self.table.Slice(offset, length) + + return pyarrow_wrap_table(result) + + def filter(self, mask, object null_selection_behavior="drop"): + """ + Select rows from the table. + + The Table can be filtered based on a mask, which will be passed to + :func:`pyarrow.compute.filter` to perform the filtering, or it can + be filtered through a boolean :class:`.Expression` + + Parameters + ---------- + mask : Array or array-like or .Expression + The boolean mask or the :class:`.Expression` to filter the table with. + null_selection_behavior : str, default "drop" + How nulls in the mask should be handled, does nothing if + an :class:`.Expression` is used. + + Returns + ------- + filtered : Table + A table of the same schema, with only the rows selected + by applied filtering + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Define an expression and select rows: + + >>> import pyarrow.compute as pc + >>> expr = pc.field("year") <= 2020 + >>> table.filter(expr) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2019]] + n_legs: [[2,5]] + animals: [["Flamingo","Brittle stars"]] + + Define a mask and select rows: + + >>> mask=[True, True, False, None] + >>> table.filter(mask) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022]] + n_legs: [[2,4]] + animals: [["Flamingo","Horse"]] + >>> table.filter(mask, null_selection_behavior='emit_null') + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,null]] + n_legs: [[2,4,null]] + animals: [["Flamingo","Horse",null]] + """ + if isinstance(mask, _pc().Expression): + return _pac()._filter_table(self, mask) + else: + return _pc().filter(self, mask, null_selection_behavior) + + def select(self, object columns): + """ + Select columns of the Table. + + Returns a new Table with the specified columns, and metadata + preserved. + + Parameters + ---------- + columns : list-like + The column names or integer indices to select. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.select([0,1]) + pyarrow.Table + year: int64 + n_legs: int64 + ---- + year: [[2020,2022,2019,2021]] + n_legs: [[2,4,5,100]] + >>> table.select(["year"]) + pyarrow.Table + year: int64 + ---- + year: [[2020,2022,2019,2021]] + """ + cdef: + shared_ptr[CTable] c_table + vector[int] c_indices + + for idx in columns: + idx = self._ensure_integer_index(idx) + idx = _normalize_index(idx, self.num_columns) + c_indices.push_back( idx) + + with nogil: + c_table = GetResultValue(self.table.SelectColumns(move(c_indices))) + + return pyarrow_wrap_table(c_table) + + def replace_schema_metadata(self, metadata=None): + """ + Create shallow copy of table by replacing schema + key-value metadata with the indicated new metadata (which may be None), + which deletes any existing metadata. + + Parameters + ---------- + metadata : dict, default None + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Constructing a Table with pyarrow schema and metadata: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> table= pa.table(df, my_schema) + >>> table.schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + pandas: ... + + Create a shallow copy of a Table with deleted schema metadata: + + >>> table.replace_schema_metadata().schema + n_legs: int64 + animals: string + + Create a shallow copy of a Table with new schema metadata: + + >>> metadata={"animals": "Which animal"} + >>> table.replace_schema_metadata(metadata = metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + animals: 'Which animal' + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_meta + shared_ptr[CTable] c_table + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + with nogil: + c_table = self.table.ReplaceSchemaMetadata(c_meta) + + return pyarrow_wrap_table(c_table) + + def flatten(self, MemoryPool memory_pool=None): + """ + Flatten this Table. + + Each column with a struct type is flattened + into one column per struct field. Other columns are left unchanged. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> struct = pa.array([{'n_legs': 2, 'animals': 'Parrot'}, + ... {'year': 2022, 'n_legs': 4}]) + >>> month = pa.array([4, 6]) + >>> table = pa.Table.from_arrays([struct,month], + ... names = ["a", "month"]) + >>> table + pyarrow.Table + a: struct + child 0, animals: string + child 1, n_legs: int64 + child 2, year: int64 + month: int64 + ---- + a: [ + -- is_valid: all not null + -- child 0 type: string + ["Parrot",null] + -- child 1 type: int64 + [2,4] + -- child 2 type: int64 + [null,2022]] + month: [[4,6]] + + Flatten the columns with struct field: + + >>> table.flatten() + pyarrow.Table + a.animals: string + a.n_legs: int64 + a.year: int64 + month: int64 + ---- + a.animals: [["Parrot",null]] + a.n_legs: [[2,4]] + a.year: [[null,2022]] + month: [[4,6]] + """ + cdef: + shared_ptr[CTable] flattened + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + flattened = GetResultValue(self.table.Flatten(pool)) + + return pyarrow_wrap_table(flattened) + + def combine_chunks(self, MemoryPool memory_pool=None): + """ + Make a new table by combining the chunks this table has. + + All the underlying chunks in the ChunkedArray of each column are + concatenated into zero or one chunk. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> animals = pa.chunked_array([["Flamingo", "Parrot", "Dog"], ["Horse", "Brittle stars", "Centipede"]]) + >>> names = ["n_legs", "animals"] + >>> table = pa.table([n_legs, animals], names=names) + >>> table + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,2,4],[4,5,100]] + animals: [["Flamingo","Parrot","Dog"],["Horse","Brittle stars","Centipede"]] + >>> table.combine_chunks() + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,2,4,4,5,100]] + animals: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CTable] combined + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + combined = GetResultValue(self.table.CombineChunks(pool)) + + return pyarrow_wrap_table(combined) + + def unify_dictionaries(self, MemoryPool memory_pool=None): + """ + Unify dictionaries across all chunks. + + This method returns an equivalent table, but where all chunks of + each column share the same dictionary values. Dictionary indices + are transposed accordingly. + + Columns without dictionaries are returned unchanged. + + Parameters + ---------- + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> arr_1 = pa.array(["Flamingo", "Parrot", "Dog"]).dictionary_encode() + >>> arr_2 = pa.array(["Horse", "Brittle stars", "Centipede"]).dictionary_encode() + >>> c_arr = pa.chunked_array([arr_1, arr_2]) + >>> table = pa.table([c_arr], names=["animals"]) + >>> table + pyarrow.Table + animals: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Parrot","Dog"] -- indices: + [0,1,2], -- dictionary: + ["Horse","Brittle stars","Centipede"] -- indices: + [0,1,2]] + + Unify dictionaries across both chunks: + + >>> table.unify_dictionaries() + pyarrow.Table + animals: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] -- indices: + [0,1,2], -- dictionary: + ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] -- indices: + [3,4,5]] + """ + cdef: + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + shared_ptr[CTable] c_result + + with nogil: + c_result = GetResultValue(CDictionaryUnifier.UnifyTable( + deref(self.table), pool)) + + return pyarrow_wrap_table(c_result) + + def equals(self, Table other, bint check_metadata=False): + """ + Check if contents of two tables are equal. + + Parameters + ---------- + other : pyarrow.Table + Table to compare against. + check_metadata : bool, default False + Whether schema metadata equality should be checked as well. + + Returns + ------- + bool + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names=["n_legs", "animals"] + >>> table = pa.Table.from_arrays([n_legs, animals], names=names) + >>> table_0 = pa.Table.from_arrays([]) + >>> table_1 = pa.Table.from_arrays([n_legs, animals], + ... names=names, + ... metadata={"n_legs": "Number of legs per animal"}) + >>> table.equals(table) + True + >>> table.equals(table_0) + False + >>> table.equals(table_1) + True + >>> table.equals(table_1, check_metadata=True) + False + """ + if other is None: + return False + + cdef: + CTable* this_table = self.table + CTable* other_table = other.table + c_bool result + + with nogil: + result = this_table.Equals(deref(other_table), check_metadata) + + return result + + def cast(self, Schema target_schema, safe=None, options=None): + """ + Cast table values to another schema. + + Parameters + ---------- + target_schema : Schema + Schema to cast to, the names and order of fields must match. + safe : bool, default True + Check for overflows or other unsafe conversions. + options : CastOptions, default None + Additional checks pass by CastOptions + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.schema + n_legs: int64 + animals: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, ... + + Define new schema and cast table values: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.duration('s')), + ... pa.field('animals', pa.string())] + ... ) + >>> table.cast(target_schema=my_schema) + pyarrow.Table + n_legs: duration[s] + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + ChunkedArray column, casted + Field field + list newcols = [] + + if self.schema.names != target_schema.names: + raise ValueError("Target schema's field names are not matching " + "the table's field names: {!r}, {!r}" + .format(self.schema.names, target_schema.names)) + + for column, field in zip(self.itercolumns(), target_schema): + if not field.nullable and column.null_count > 0: + raise ValueError("Casting field {!r} with null values to non-nullable" + .format(field.name)) + casted = column.cast(field.type, safe=safe, options=options) + newcols.append(casted) + + return Table.from_arrays(newcols, schema=target_schema) + + @classmethod + def from_pandas(cls, df, Schema schema=None, preserve_index=None, + nthreads=None, columns=None, bint safe=True): + """ + Convert pandas.DataFrame to an Arrow Table. + + The column types in the resulting Arrow Table are inferred from the + dtypes of the pandas.Series in the DataFrame. In the case of non-object + Series, the NumPy dtype is translated to its Arrow equivalent. In the + case of `object`, we need to guess the datatype by looking at the + Python objects in this Series. + + Be aware that Series of the `object` dtype don't carry enough + information to always lead to a meaningful Arrow type. In the case that + we cannot infer a type, e.g. because the DataFrame is of length 0 or + the Series only contains None/nan objects, the type is set to + null. This behavior can be avoided by constructing an explicit schema + and passing it to this function. + + Parameters + ---------- + df : pandas.DataFrame + schema : pyarrow.Schema, optional + The expected schema of the Arrow Table. This can be used to + indicate the type of columns if we cannot infer it automatically. + If passed, the output will have exactly this schema. Columns + specified in the schema that are not found in the DataFrame columns + or its index will raise an error. Additional columns or index + levels in the DataFrame which are not specified in the schema will + be ignored. + preserve_index : bool, optional + Whether to store the index as an additional column in the resulting + ``Table``. The default of None will store the index as a column, + except for RangeIndex which is stored as metadata only. Use + ``preserve_index=True`` to force it to be stored as a column. + nthreads : int, default None + If greater than 1, convert columns to Arrow in parallel using + indicated number of threads. By default, this follows + :func:`pyarrow.cpu_count` (may use up to system CPU count threads). + columns : list, optional + List of column to be converted. If None, use all columns. + safe : bool, default True + Check for overflows or other unsafe conversions. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.Table.from_pandas(df) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + from pyarrow.pandas_compat import dataframe_to_arrays + arrays, schema, n_rows = dataframe_to_arrays( + df, + schema=schema, + preserve_index=preserve_index, + nthreads=nthreads, + columns=columns, + safe=safe + ) + + # If df is empty but row index is not, create empty Table with rows >0 + cdef vector[shared_ptr[CChunkedArray]] c_arrays + if n_rows: + return pyarrow_wrap_table( + CTable.MakeWithRows(( schema).sp_schema, c_arrays, n_rows)) + else: + return cls.from_arrays(arrays, schema=schema) + + @staticmethod + def from_arrays(arrays, names=None, schema=None, metadata=None): + """ + Construct a Table from Arrow arrays. + + Parameters + ---------- + arrays : list of pyarrow.Array or pyarrow.ChunkedArray + Equal-length arrays that should form the table. + names : list of str, optional + Names for the table columns. If not passed, schema must be passed. + schema : Schema, default None + Schema for the created table. If not passed, names must be passed. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a Table from arrays: + + >>> pa.Table.from_arrays([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.Table.from_arrays([n_legs, animals], + ... names=names, + ... metadata=my_metadata) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_arrays([n_legs, animals], + ... names=names, + ... metadata=my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from arrays with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"animals": "Name of the animal species"}) + >>> pa.Table.from_arrays([n_legs, animals], + ... schema=my_schema) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + >>> pa.Table.from_arrays([n_legs, animals], + ... schema=my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + animals: 'Name of the animal species' + """ + cdef: + vector[shared_ptr[CChunkedArray]] columns + shared_ptr[CSchema] c_schema + int i, K = len(arrays) + + converted_arrays = _sanitize_arrays(arrays, names, schema, metadata, + &c_schema) + + columns.reserve(K) + for item in converted_arrays: + if isinstance(item, Array): + columns.push_back( + make_shared[CChunkedArray]( + ( item).sp_array + ) + ) + elif isinstance(item, ChunkedArray): + columns.push_back(( item).sp_chunked_array) + else: + raise TypeError(type(item)) + + result = pyarrow_wrap_table(CTable.Make(c_schema, columns)) + result.validate() + return result + + @staticmethod + def from_struct_array(struct_array): + """ + Construct a Table from a StructArray. + + Each field in the StructArray will become a column in the resulting + ``Table``. + + Parameters + ---------- + struct_array : StructArray or ChunkedArray + Array to construct the table from. + + Returns + ------- + pyarrow.Table + + Examples + -------- + >>> import pyarrow as pa + >>> struct = pa.array([{'n_legs': 2, 'animals': 'Parrot'}, + ... {'year': 2022, 'n_legs': 4}]) + >>> pa.Table.from_struct_array(struct).to_pandas() + animals n_legs year + 0 Parrot 2 NaN + 1 None 4 2022.0 + """ + if isinstance(struct_array, Array): + return Table.from_batches([RecordBatch.from_struct_array(struct_array)]) + else: + return Table.from_batches([ + RecordBatch.from_struct_array(chunk) + for chunk in struct_array.chunks + ]) + + def to_struct_array(self, max_chunksize=None): + """ + Convert to a chunked array of struct type. + + Parameters + ---------- + max_chunksize : int, default None + Maximum size for ChunkedArray chunks. Individual chunks may be + smaller depending on the chunk layout of individual columns. + + Returns + ------- + ChunkedArray + """ + return chunked_array([ + batch.to_struct_array() + for batch in self.to_batches(max_chunksize=max_chunksize) + ]) + + @staticmethod + def from_batches(batches, Schema schema=None): + """ + Construct a Table from a sequence or iterator of Arrow RecordBatches. + + Parameters + ---------- + batches : sequence or iterator of RecordBatch + Sequence of RecordBatch to be converted, all schemas must be equal. + schema : Schema, default None + If not passed, will be inferred from the first RecordBatch. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + >>> batch = pa.record_batch([n_legs, animals], names=names) + >>> batch.to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + + Construct a Table from a RecordBatch: + + >>> pa.Table.from_batches([batch]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from a sequence of RecordBatches: + + >>> pa.Table.from_batches([batch, batch]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100],[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"],["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + vector[shared_ptr[CRecordBatch]] c_batches + shared_ptr[CTable] c_table + shared_ptr[CSchema] c_schema + RecordBatch batch + + for batch in batches: + c_batches.push_back(batch.sp_batch) + + if schema is None: + if c_batches.size() == 0: + raise ValueError('Must pass schema, or at least ' + 'one RecordBatch') + c_schema = c_batches[0].get().schema() + else: + c_schema = schema.sp_schema + + with nogil: + c_table = GetResultValue( + CTable.FromRecordBatches(c_schema, move(c_batches))) + + return pyarrow_wrap_table(c_table) + + def to_batches(self, max_chunksize=None): + """ + Convert Table to a list of RecordBatch objects. + + Note that this method is zero-copy, it merely exposes the same data + under a different API. + + Parameters + ---------- + max_chunksize : int, default None + Maximum size for RecordBatch chunks. Individual chunks may be + smaller depending on the chunk layout of individual columns. + + Returns + ------- + list[RecordBatch] + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Convert a Table to a RecordBatch: + + >>> table.to_batches()[0].to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + + Convert a Table to a list of RecordBatches: + + >>> table.to_batches(max_chunksize=2)[0].to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + >>> table.to_batches(max_chunksize=2)[1].to_pandas() + n_legs animals + 0 5 Brittle stars + 1 100 Centipede + """ + cdef: + unique_ptr[TableBatchReader] reader + int64_t c_max_chunksize + list result = [] + shared_ptr[CRecordBatch] batch + + reader.reset(new TableBatchReader(deref(self.table))) + + if max_chunksize is not None: + c_max_chunksize = max_chunksize + reader.get().set_chunksize(c_max_chunksize) + + while True: + with nogil: + check_status(reader.get().ReadNext(&batch)) + + if batch.get() == NULL: + break + + result.append(pyarrow_wrap_batch(batch)) + + return result + + def to_reader(self, max_chunksize=None): + """ + Convert the Table to a RecordBatchReader. + + Note that this method is zero-copy, it merely exposes the same data + under a different API. + + Parameters + ---------- + max_chunksize : int, default None + Maximum size for RecordBatch chunks. Individual chunks may be + smaller depending on the chunk layout of individual columns. + + Returns + ------- + RecordBatchReader + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Convert a Table to a RecordBatchReader: + + >>> table.to_reader() + + + >>> reader = table.to_reader() + >>> reader.schema + n_legs: int64 + animals: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, ... + >>> reader.read_all() + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CRecordBatchReader] c_reader + RecordBatchReader reader + shared_ptr[TableBatchReader] t_reader + t_reader = make_shared[TableBatchReader](self.sp_table) + + if max_chunksize is not None: + t_reader.get().set_chunksize(max_chunksize) + + c_reader = dynamic_pointer_cast[CRecordBatchReader, TableBatchReader]( + t_reader) + reader = RecordBatchReader.__new__(RecordBatchReader) + reader.reader = c_reader + return reader + + def _to_pandas(self, options, categories=None, ignore_metadata=False, + types_mapper=None): + from pyarrow.pandas_compat import table_to_dataframe + df = table_to_dataframe( + options, self, categories, + ignore_metadata=ignore_metadata, + types_mapper=types_mapper) + return df + + @property + def schema(self): + """ + Schema of the table and its columns. + + Returns + ------- + Schema + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.schema + n_legs: int64 + animals: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, "' ... + """ + return pyarrow_wrap_schema(self.table.schema()) + + def _column(self, int i): + """ + Select a column by its numeric index. + + Parameters + ---------- + i : int + The index of the column to retrieve. + + Returns + ------- + ChunkedArray + """ + cdef int index = _normalize_index(i, self.num_columns) + cdef ChunkedArray result = pyarrow_wrap_chunked_array( + self.table.column(index)) + result._name = self.schema[index].name + return result + + @property + def num_columns(self): + """ + Number of columns in this table. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.num_columns + 2 + """ + return self.table.num_columns() + + @property + def num_rows(self): + """ + Number of rows in this table. + + Due to the definition of a table, all columns have the same number of + rows. + + Returns + ------- + int + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.num_rows + 4 + """ + return self.table.num_rows() + + @property + def shape(self): + """ + Dimensions of the table: (#rows, #columns). + + Returns + ------- + (int, int) + Number of rows and number of columns. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.shape + (4, 2) + """ + return (self.num_rows, self.num_columns) + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the table. + + In other words, the sum of bytes from all buffer ranges referenced. + + Unlike `get_total_buffer_size` this method will account for array + offsets. + + If buffers are shared between arrays then the shared + portion will only be counted multiple times. + + The dictionary of dictionary arrays will always be counted in their + entirety even if the array only references a portion of the dictionary. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.nbytes + 72 + """ + cdef: + CResult[int64_t] c_res_buffer + + with nogil: + c_res_buffer = ReferencedBufferSize(deref(self.table)) + size = GetResultValue(c_res_buffer) + return size + + def get_total_buffer_size(self): + """ + The sum of bytes in each buffer referenced by the table. + + An array may only reference a portion of a buffer. + This method will overestimate in this case and return the + byte size of the entire buffer. + + If a buffer is referenced multiple times then it will + only be counted once. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [None, 4, 5, None], + ... 'animals': ["Flamingo", "Horse", None, "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.get_total_buffer_size() + 76 + """ + cdef: + int64_t total_buffer_size + + total_buffer_size = TotalBufferSize(deref(self.table)) + return total_buffer_size + + def __sizeof__(self): + return super(Table, self).__sizeof__() + self.nbytes + + def add_column(self, int i, field_, column): + """ + Add column to Table at position. + + A new table is returned with the column added, the original table + object is left unchanged. + + Parameters + ---------- + i : int + Index to place the column at. + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array, list of Array, or values coercible to arrays + Column data. + + Returns + ------- + Table + New table with the passed column added. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Add column: + + >>> year = [2021, 2022, 2019, 2021] + >>> table.add_column(0,"year", [year]) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2021,2022,2019,2021]] + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Original table is left unchanged: + + >>> table + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CTable] c_table + Field c_field + ChunkedArray c_arr + + if isinstance(column, ChunkedArray): + c_arr = column + else: + c_arr = chunked_array(column) + + if isinstance(field_, Field): + c_field = field_ + else: + c_field = field(field_, c_arr.type) + + with nogil: + c_table = GetResultValue(self.table.AddColumn( + i, c_field.sp_field, c_arr.sp_chunked_array)) + + return pyarrow_wrap_table(c_table) + + def append_column(self, field_, column): + """ + Append column at end of columns. + + Parameters + ---------- + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array, list of Array, or values coercible to arrays + Column data. + + Returns + ------- + Table + New table with the passed column added. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Append column at the end: + + >>> year = [2021, 2022, 2019, 2021] + >>> table.append_column('year', [year]) + pyarrow.Table + n_legs: int64 + animals: string + year: int64 + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + year: [[2021,2022,2019,2021]] + """ + return self.add_column(self.num_columns, field_, column) + + def remove_column(self, int i): + """ + Create new Table with the indicated column removed. + + Parameters + ---------- + i : int + Index of column to remove. + + Returns + ------- + Table + New table without the column. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.remove_column(1) + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[2,4,5,100]] + """ + cdef shared_ptr[CTable] c_table + + with nogil: + c_table = GetResultValue(self.table.RemoveColumn(i)) + + return pyarrow_wrap_table(c_table) + + def set_column(self, int i, field_, column): + """ + Replace column in Table at position. + + Parameters + ---------- + i : int + Index to place the column at. + field_ : str or Field + If a string is passed then the type is deduced from the column + data. + column : Array, list of Array, or values coercible to arrays + Column data. + + Returns + ------- + Table + New table with the passed column set. + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Replace a column: + + >>> year = [2021, 2022, 2019, 2021] + >>> table.set_column(1,'year', [year]) + pyarrow.Table + n_legs: int64 + year: int64 + ---- + n_legs: [[2,4,5,100]] + year: [[2021,2022,2019,2021]] + """ + cdef: + shared_ptr[CTable] c_table + Field c_field + ChunkedArray c_arr + + if isinstance(column, ChunkedArray): + c_arr = column + else: + c_arr = chunked_array(column) + + if isinstance(field_, Field): + c_field = field_ + else: + c_field = field(field_, c_arr.type) + + with nogil: + c_table = GetResultValue(self.table.SetColumn( + i, c_field.sp_field, c_arr.sp_chunked_array)) + + return pyarrow_wrap_table(c_table) + + def rename_columns(self, names): + """ + Create new table with columns renamed to provided names. + + Parameters + ---------- + names : list of str + List of new column names. + + Returns + ------- + Table + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> new_names = ["n", "name"] + >>> table.rename_columns(new_names) + pyarrow.Table + n: int64 + name: string + ---- + n: [[2,4,5,100]] + name: [["Flamingo","Horse","Brittle stars","Centipede"]] + """ + cdef: + shared_ptr[CTable] c_table + vector[c_string] c_names + + for name in names: + c_names.push_back(tobytes(name)) + + with nogil: + c_table = GetResultValue(self.table.RenameColumns(move(c_names))) + + return pyarrow_wrap_table(c_table) + + def drop_columns(self, columns): + """ + Drop one or more columns and return a new table. + + Parameters + ---------- + columns : str or list[str] + Field name(s) referencing existing column(s). + + Raises + ------ + KeyError + If any of the passed column names do not exist. + + Returns + ------- + Table + New table without the column(s). + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + + Drop one column: + + >>> table.drop_columns("animals") + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[2,4,5,100]] + + Drop one or more columns: + + >>> table.drop_columns(["n_legs", "animals"]) + pyarrow.Table + ... + ---- + """ + if isinstance(columns, str): + columns = [columns] + + indices = [] + for col in columns: + idx = self.schema.get_field_index(col) + if idx == -1: + raise KeyError("Column {!r} not found".format(col)) + indices.append(idx) + + indices.sort() + indices.reverse() + + table = self + for idx in indices: + table = table.remove_column(idx) + + return table + + def drop(self, columns): + """ + Drop one or more columns and return a new table. + + Alias of Table.drop_columns, but kept for backwards compatibility. + + Parameters + ---------- + columns : str or list[str] + Field name(s) referencing existing column(s). + + Returns + ------- + Table + New table without the column(s). + """ + return self.drop_columns(columns) + + def group_by(self, keys, use_threads=True): + """ + Declare a grouping over the columns of the table. + + Resulting grouping can then be used to perform aggregations + with a subsequent ``aggregate()`` method. + + Parameters + ---------- + keys : str or list[str] + Name of the columns that should be used as the grouping key. + use_threads : bool, default True + Whether to use multithreading or not. When set to True (the + default), no stable ordering of the output is guaranteed. + + Returns + ------- + TableGroupBy + + See Also + -------- + TableGroupBy.aggregate + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> table.group_by('year').aggregate([('n_legs', 'sum')]) + pyarrow.Table + year: int64 + n_legs_sum: int64 + ---- + year: [[2020,2022,2021,2019]] + n_legs_sum: [[2,6,104,5]] + """ + return TableGroupBy(self, keys, use_threads=use_threads) + + def join(self, right_table, keys, right_keys=None, join_type="left outer", + left_suffix=None, right_suffix=None, coalesce_keys=True, + use_threads=True): + """ + Perform a join between this table and another one. + + Result of the join will be a new Table, where further + operations can be applied. + + Parameters + ---------- + right_table : Table + The table to join to the current one, acting as the right table + in the join operation. + keys : str or list[str] + The columns from current table that should be used as keys + of the join operation left side. + right_keys : str or list[str], default None + The columns from the right_table that should be used as keys + on the join operation right side. + When ``None`` use the same key names as the left table. + join_type : str, default "left outer" + The kind of join that should be performed, one of + ("left semi", "right semi", "left anti", "right anti", + "inner", "left outer", "right outer", "full outer") + left_suffix : str, default None + Which suffix to add to left column names. This prevents confusion + when the columns in left and right tables have colliding names. + right_suffix : str, default None + Which suffix to add to the right column names. This prevents confusion + when the columns in left and right tables have colliding names. + coalesce_keys : bool, default True + If the duplicated keys should be omitted from one of the sides + in the join result. + use_threads : bool, default True + Whether to use multithreading or not. + + Returns + ------- + Table + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> df1 = pd.DataFrame({'id': [1, 2, 3], + ... 'year': [2020, 2022, 2019]}) + >>> df2 = pd.DataFrame({'id': [3, 4], + ... 'n_legs': [5, 100], + ... 'animal': ["Brittle stars", "Centipede"]}) + >>> t1 = pa.Table.from_pandas(df1) + >>> t2 = pa.Table.from_pandas(df2) + + Left outer join: + + >>> t1.join(t2, 'id').combine_chunks().sort_by('year') + pyarrow.Table + id: int64 + year: int64 + n_legs: int64 + animal: string + ---- + id: [[3,1,2]] + year: [[2019,2020,2022]] + n_legs: [[5,null,null]] + animal: [["Brittle stars",null,null]] + + Full outer join: + + >>> t1.join(t2, 'id', join_type="full outer").combine_chunks().sort_by('year') + pyarrow.Table + id: int64 + year: int64 + n_legs: int64 + animal: string + ---- + id: [[3,1,2,4]] + year: [[2019,2020,2022,null]] + n_legs: [[5,null,null,100]] + animal: [["Brittle stars",null,null,"Centipede"]] + + Right outer join: + + >>> t1.join(t2, 'id', join_type="right outer").combine_chunks().sort_by('year') + pyarrow.Table + year: int64 + id: int64 + n_legs: int64 + animal: string + ---- + year: [[2019,null]] + id: [[3,4]] + n_legs: [[5,100]] + animal: [["Brittle stars","Centipede"]] + + Right anti join + + >>> t1.join(t2, 'id', join_type="right anti") + pyarrow.Table + id: int64 + n_legs: int64 + animal: string + ---- + id: [[4]] + n_legs: [[100]] + animal: [["Centipede"]] + """ + if right_keys is None: + right_keys = keys + return _pac()._perform_join( + join_type, self, keys, right_table, right_keys, + left_suffix=left_suffix, right_suffix=right_suffix, + use_threads=use_threads, coalesce_keys=coalesce_keys, + output_type=Table + ) + + def __arrow_c_stream__(self, requested_schema=None): + """ + Export the table as an Arrow C stream PyCapsule. + + Parameters + ---------- + requested_schema : PyCapsule, default None + The schema to which the stream should be casted, passed as a + PyCapsule containing a C ArrowSchema representation of the + requested schema. + Currently, this is not supported and will raise a + NotImplementedError if the schema doesn't match the current schema. + + Returns + ------- + PyCapsule + """ + return self.to_reader().__arrow_c_stream__(requested_schema) + + +def _reconstruct_table(arrays, schema): + """ + Internal: reconstruct pa.Table from pickled components. + """ + return Table.from_arrays(arrays, schema=schema) + + +def record_batch(data, names=None, schema=None, metadata=None): + """ + Create a pyarrow.RecordBatch from another Python data structure or sequence + of arrays. + + Parameters + ---------- + data : pandas.DataFrame, list, Arrow-compatible table + A DataFrame, list of arrays or chunked arrays, or a tabular object + implementing the Arrow PyCapsule Protocol (has an + ``__arrow_c_array__`` method). + names : list, default None + Column names if list of arrays passed as data. Mutually exclusive with + 'schema' argument. + schema : Schema, default None + The expected schema of the RecordBatch. If not passed, will be inferred + from the data. Mutually exclusive with 'names' argument. + metadata : dict or Mapping, default None + Optional metadata for the schema (if schema not passed). + + Returns + ------- + RecordBatch + + See Also + -------- + RecordBatch.from_arrays, RecordBatch.from_pandas, table + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 2, 4, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Parrot", "Dog", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Creating a RecordBatch from a list of arrays with names: + + >>> pa.record_batch([n_legs, animals], names=names) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.record_batch([n_legs, animals], names=["n_legs", "animals"]).to_pandas() + n_legs animals + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + + Creating a RecordBatch from a list of arrays with names and metadata: + + >>> my_metadata={"n_legs": "How many legs does an animal have?"} + >>> pa.record_batch([n_legs, animals], + ... names=names, + ... metadata = my_metadata) + pyarrow.RecordBatch + n_legs: int64 + animals: string + ---- + n_legs: [2,2,4,4,5,100] + animals: ["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"] + >>> pa.record_batch([n_legs, animals], + ... names=names, + ... metadata = my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'How many legs does an animal have?' + + Creating a RecordBatch from a pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022], + ... 'month': [3, 5, 7, 9], + ... 'day': [1, 5, 9, 13], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.record_batch(df) + pyarrow.RecordBatch + year: int64 + month: int64 + day: int64 + n_legs: int64 + animals: string + ---- + year: [2020,2022,2021,2022] + month: [3,5,7,9] + day: [1,5,9,13] + n_legs: [2,4,5,100] + animals: ["Flamingo","Horse","Brittle stars","Centipede"] + + >>> pa.record_batch(df).to_pandas() + year month day n_legs animals + 0 2020 3 1 2 Flamingo + 1 2022 5 5 4 Horse + 2 2021 7 9 5 Brittle stars + 3 2022 9 13 100 Centipede + + Creating a RecordBatch from a pandas DataFrame with schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.record_batch(df, my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + pandas: ... + >>> pa.record_batch(df, my_schema).to_pandas() + n_legs animals + 0 2 Flamingo + 1 4 Horse + 2 5 Brittle stars + 3 100 Centipede + """ + # accept schema as first argument for backwards compatibility / usability + if isinstance(names, Schema) and schema is None: + schema = names + names = None + + if isinstance(data, (list, tuple)): + return RecordBatch.from_arrays(data, names=names, schema=schema, + metadata=metadata) + elif hasattr(data, "__arrow_c_array__"): + if schema is not None: + requested_schema = schema.__arrow_c_schema__() + else: + requested_schema = None + schema_capsule, array_capsule = data.__arrow_c_array__(requested_schema) + batch = RecordBatch._import_from_c_capsule(schema_capsule, array_capsule) + if schema is not None and batch.schema != schema: + # __arrow_c_array__ coerces schema with best effort, so we might + # need to cast it if the producer wasn't able to cast to exact schema. + batch = Table.from_batches([batch]).cast(schema).to_batches()[0] + return batch + elif _pandas_api.is_data_frame(data): + return RecordBatch.from_pandas(data, schema=schema) + else: + raise TypeError("Expected pandas DataFrame or list of arrays") + + +def table(data, names=None, schema=None, metadata=None, nthreads=None): + """ + Create a pyarrow.Table from a Python data structure or sequence of arrays. + + Parameters + ---------- + data : pandas.DataFrame, dict, list + A DataFrame, mapping of strings to Arrays or Python lists, or list of + arrays or chunked arrays. + names : list, default None + Column names if list of arrays passed as data. Mutually exclusive with + 'schema' argument. + schema : Schema, default None + The expected schema of the Arrow Table. If not passed, will be inferred + from the data. Mutually exclusive with 'names' argument. + If passed, the output will have exactly this schema (raising an error + when columns are not found in the data and ignoring additional data not + specified in the schema, when data is a dict or DataFrame). + metadata : dict or Mapping, default None + Optional metadata for the schema (if schema not passed). + nthreads : int, default None + For pandas.DataFrame inputs: if greater than 1, convert columns to + Arrow in parallel using indicated number of threads. By default, + this follows :func:`pyarrow.cpu_count` (may use up to system CPU count + threads). + + Returns + ------- + Table + + See Also + -------- + Table.from_arrays, Table.from_pandas, Table.from_pydict + + Examples + -------- + >>> import pyarrow as pa + >>> n_legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> names = ["n_legs", "animals"] + + Construct a Table from arrays: + + >>> pa.table([n_legs, animals], names=names) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from arrays with metadata: + + >>> my_metadata={"n_legs": "Number of legs per animal"} + >>> pa.table([n_legs, animals], names=names, metadata = my_metadata).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Construct a Table from pandas DataFrame: + + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2019, 2021], + ... 'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> pa.table(df) + pyarrow.Table + year: int64 + n_legs: int64 + animals: string + ---- + year: [[2020,2022,2019,2021]] + n_legs: [[2,4,5,100]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + + Construct a Table from pandas DataFrame with pyarrow schema: + + >>> my_schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> pa.table(df, my_schema).schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + pandas: '{"index_columns": [], "column_indexes": [{"name": null, ... + + Construct a Table from chunked arrays: + + >>> n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + >>> animals = pa.chunked_array([["Flamingo", "Parrot", "Dog"], ["Horse", "Brittle stars", "Centipede"]]) + >>> table = pa.table([n_legs, animals], names=names) + >>> table + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,2,4],[4,5,100]] + animals: [["Flamingo","Parrot","Dog"],["Horse","Brittle stars","Centipede"]] + """ + # accept schema as first argument for backwards compatibility / usability + if isinstance(names, Schema) and schema is None: + schema = names + names = None + + if isinstance(data, (list, tuple)): + return Table.from_arrays(data, names=names, schema=schema, + metadata=metadata) + elif isinstance(data, dict): + if names is not None: + raise ValueError( + "The 'names' argument is not valid when passing a dictionary") + return Table.from_pydict(data, schema=schema, metadata=metadata) + elif _pandas_api.is_data_frame(data): + if names is not None or metadata is not None: + raise ValueError( + "The 'names' and 'metadata' arguments are not valid when " + "passing a pandas DataFrame") + return Table.from_pandas(data, schema=schema, nthreads=nthreads) + elif hasattr(data, "__arrow_c_stream__"): + if names is not None or metadata is not None: + raise ValueError( + "The 'names' and 'metadata' arguments are not valid when " + "using Arrow PyCapsule Interface") + if schema is not None: + requested = schema.__arrow_c_schema__() + else: + requested = None + capsule = data.__arrow_c_stream__(requested) + reader = RecordBatchReader._import_from_c_capsule(capsule) + table = reader.read_all() + if schema is not None and table.schema != schema: + # __arrow_c_array__ coerces schema with best effort, so we might + # need to cast it if the producer wasn't able to cast to exact schema. + table = table.cast(schema) + return table + elif hasattr(data, "__arrow_c_array__"): + if names is not None or metadata is not None: + raise ValueError( + "The 'names' and 'metadata' arguments are not valid when " + "using Arrow PyCapsule Interface") + batch = record_batch(data, schema) + return Table.from_batches([batch]) + else: + raise TypeError( + "Expected pandas DataFrame, python dictionary or list of arrays") + + +def concat_tables(tables, MemoryPool memory_pool=None, str promote_options="none", **kwargs): + """ + Concatenate pyarrow.Table objects. + + If promote_options="none", a zero-copy concatenation will be performed. The schemas + of all the Tables must be the same (except the metadata), otherwise an + exception will be raised. The result Table will share the metadata with the + first table. + + If promote_options="default", any null type arrays will be casted to the type of other + arrays in the column of the same name. If a table is missing a particular + field, null values of the appropriate type will be generated to take the + place of the missing field. The new schema will share the metadata with the + first table. Each field in the new schema will share the metadata with the + first table which has the field defined. Note that type promotions may + involve additional allocations on the given ``memory_pool``. + + If promote_options="permissive", the behavior of default plus types will be promoted + to the common denominator that fits all the fields. + + Parameters + ---------- + tables : iterable of pyarrow.Table objects + Pyarrow tables to concatenate into a single Table. + memory_pool : MemoryPool, default None + For memory allocations, if required, otherwise use default pool. + promote_options : str, default none + Accepts strings "none", "default" and "permissive". + **kwargs : dict, optional + + Examples + -------- + >>> import pyarrow as pa + >>> t1 = pa.table([ + ... pa.array([2, 4, 5, 100]), + ... pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + ... ], names=['n_legs', 'animals']) + >>> t2 = pa.table([ + ... pa.array([2, 4]), + ... pa.array(["Parrot", "Dog"]) + ... ], names=['n_legs', 'animals']) + >>> pa.concat_tables([t1,t2]) + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[2,4,5,100],[2,4]] + animals: [["Flamingo","Horse","Brittle stars","Centipede"],["Parrot","Dog"]] + + """ + cdef: + vector[shared_ptr[CTable]] c_tables + shared_ptr[CTable] c_result_table + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + Table table + CConcatenateTablesOptions options = ( + CConcatenateTablesOptions.Defaults()) + + if "promote" in kwargs: + warnings.warn( + "promote has been superseded by promote_options='default'.", + FutureWarning, stacklevel=2) + if kwargs['promote'] is True: + promote_options = "default" + + for table in tables: + c_tables.push_back(table.sp_table) + + if promote_options == "permissive": + options.field_merge_options = CField.CMergeOptions.Permissive() + elif promote_options in {"default", "none"}: + options.field_merge_options = CField.CMergeOptions.Defaults() + else: + raise ValueError(f"Invalid promote options: {promote_options}") + + with nogil: + options.unify_schemas = promote_options != "none" + c_result_table = GetResultValue( + ConcatenateTables(c_tables, options, pool)) + + return pyarrow_wrap_table(c_result_table) + + +def _from_pydict(cls, mapping, schema, metadata): + """ + Construct a Table/RecordBatch from Arrow arrays or columns. + + Parameters + ---------- + cls : Class Table/RecordBatch + mapping : dict or Mapping + A mapping of strings to Arrays or Python lists. + schema : Schema, default None + If not passed, will be inferred from the Mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table/RecordBatch + """ + + arrays = [] + if schema is None: + names = [] + for k, v in mapping.items(): + names.append(k) + arrays.append(asarray(v)) + return cls.from_arrays(arrays, names, metadata=metadata) + elif isinstance(schema, Schema): + for field in schema: + try: + v = mapping[field.name] + except KeyError: + try: + v = mapping[tobytes(field.name)] + except KeyError: + present = mapping.keys() + missing = [n for n in schema.names if n not in present] + raise KeyError( + "The passed mapping doesn't contain the " + "following field(s) of the schema: {}". + format(', '.join(missing)) + ) + arrays.append(asarray(v, type=field.type)) + # Will raise if metadata is not None + return cls.from_arrays(arrays, schema=schema, metadata=metadata) + else: + raise TypeError('Schema must be an instance of pyarrow.Schema') + + +def _from_pylist(cls, mapping, schema, metadata): + """ + Construct a Table/RecordBatch from list of rows / dictionaries. + + Parameters + ---------- + cls : Class Table/RecordBatch + mapping : list of dicts of rows + A mapping of strings to row values. + schema : Schema, default None + If not passed, will be inferred from the first row of the + mapping values. + metadata : dict or Mapping, default None + Optional metadata for the schema (if inferred). + + Returns + ------- + Table/RecordBatch + """ + + arrays = [] + if schema is None: + names = [] + if mapping: + names = list(mapping[0].keys()) + for n in names: + v = [row[n] if n in row else None for row in mapping] + arrays.append(v) + return cls.from_arrays(arrays, names, metadata=metadata) + else: + if isinstance(schema, Schema): + for n in schema.names: + v = [row[n] if n in row else None for row in mapping] + arrays.append(v) + # Will raise if metadata is not None + return cls.from_arrays(arrays, schema=schema, metadata=metadata) + else: + raise TypeError('Schema must be an instance of pyarrow.Schema') + + +class TableGroupBy: + """ + A grouping of columns in a table on which to perform aggregations. + + Parameters + ---------- + table : pyarrow.Table + Input table to execute the aggregation on. + keys : str or list[str] + Name of the grouped columns. + use_threads : bool, default True + Whether to use multithreading or not. When set to True (the default), + no stable ordering of the output is guaranteed. + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.table([ + ... pa.array(["a", "a", "b", "b", "c"]), + ... pa.array([1, 2, 3, 4, 5]), + ... ], names=["keys", "values"]) + + Grouping of columns: + + >>> pa.TableGroupBy(t,"keys") + + + Perform aggregations: + + >>> pa.TableGroupBy(t,"keys").aggregate([("values", "sum")]) + pyarrow.Table + keys: string + values_sum: int64 + ---- + keys: [["a","b","c"]] + values_sum: [[3,7,5]] + """ + + def __init__(self, table, keys, use_threads=True): + if isinstance(keys, str): + keys = [keys] + + self._table = table + self.keys = keys + self._use_threads = use_threads + + def aggregate(self, aggregations): + """ + Perform an aggregation over the grouped columns of the table. + + Parameters + ---------- + aggregations : list[tuple(str, str)] or \ +list[tuple(str, str, FunctionOptions)] + List of tuples, where each tuple is one aggregation specification + and consists of: aggregation column name followed + by function name and optionally aggregation function option. + Pass empty list to get a single row for each group. + The column name can be a string, an empty list or a list of + column names, for unary, nullary and n-ary aggregation functions + respectively. + + For the list of function names and respective aggregation + function options see :ref:`py-grouped-aggrs`. + + Returns + ------- + Table + Results of the aggregation functions. + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.table([ + ... pa.array(["a", "a", "b", "b", "c"]), + ... pa.array([1, 2, 3, 4, 5]), + ... ], names=["keys", "values"]) + + Sum the column "values" over the grouped column "keys": + + >>> t.group_by("keys").aggregate([("values", "sum")]) + pyarrow.Table + keys: string + values_sum: int64 + ---- + keys: [["a","b","c"]] + values_sum: [[3,7,5]] + + Count the rows over the grouped column "keys": + + >>> t.group_by("keys").aggregate([([], "count_all")]) + pyarrow.Table + keys: string + count_all: int64 + ---- + keys: [["a","b","c"]] + count_all: [[2,2,1]] + + Do multiple aggregations: + + >>> t.group_by("keys").aggregate([ + ... ("values", "sum"), + ... ("keys", "count") + ... ]) + pyarrow.Table + keys: string + values_sum: int64 + keys_count: int64 + ---- + keys: [["a","b","c"]] + values_sum: [[3,7,5]] + keys_count: [[2,2,1]] + + Count the number of non-null values for column "values" + over the grouped column "keys": + + >>> import pyarrow.compute as pc + >>> t.group_by(["keys"]).aggregate([ + ... ("values", "count", pc.CountOptions(mode="only_valid")) + ... ]) + pyarrow.Table + keys: string + values_count: int64 + ---- + keys: [["a","b","c"]] + values_count: [[2,2,1]] + + Get a single row for each group in column "keys": + + >>> t.group_by("keys").aggregate([]) + pyarrow.Table + keys: string + ---- + keys: [["a","b","c"]] + """ + group_by_aggrs = [] + for aggr in aggregations: + # Set opt to None if not specified + if len(aggr) == 2: + target, func = aggr + opt = None + else: + target, func, opt = aggr + # Ensure target is a list + if not isinstance(target, (list, tuple)): + target = [target] + # Ensure aggregate function is hash_ if needed + if len(self.keys) > 0 and not func.startswith("hash_"): + func = "hash_" + func + if len(self.keys) == 0 and func.startswith("hash_"): + func = func[5:] + # Determine output field name + func_nohash = func if not func.startswith("hash_") else func[5:] + if len(target) == 0: + aggr_name = func_nohash + else: + aggr_name = "_".join(target) + "_" + func_nohash + group_by_aggrs.append((target, func, opt, aggr_name)) + + return _pac()._group_by( + self._table, group_by_aggrs, self.keys, use_threads=self._use_threads + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tensor.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/tensor.pxi new file mode 100644 index 0000000000000000000000000000000000000000..1afce7f4a10159d67896f6183641d8abdc6fa878 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tensor.pxi @@ -0,0 +1,1287 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +cdef class Tensor(_Weakrefable): + """ + A n-dimensional array a.k.a Tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + + type: int32 + shape: (2, 3) + strides: (12, 4) + """ + + def __init__(self): + raise TypeError("Do not call Tensor's constructor directly, use one " + "of the `pyarrow.Tensor.from_*` functions instead.") + + cdef void init(self, const shared_ptr[CTensor]& sp_tensor): + self.sp_tensor = sp_tensor + self.tp = sp_tensor.get() + self.type = pyarrow_wrap_data_type(self.tp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape} +strides: {0.strides}""".format(self) + + @staticmethod + def from_numpy(obj, dim_names=None): + """ + Create a Tensor from a numpy array. + + Parameters + ---------- + obj : numpy.ndarray + The source numpy array + dim_names : list, optional + Names of each dimension of the Tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + + type: int32 + shape: (2, 3) + strides: (12, 4) + """ + cdef: + vector[c_string] c_dim_names + shared_ptr[CTensor] ctensor + + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + check_status(NdarrayToTensor(c_default_memory_pool(), obj, + c_dim_names, &ctensor)) + return pyarrow_wrap_tensor(ctensor) + + def to_numpy(self): + """ + Convert arrow::Tensor to numpy.ndarray with zero copy + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.to_numpy() + array([[ 2, 2, 4], + [ 4, 5, 100]], dtype=int32) + """ + cdef PyObject* out + + check_status(TensorToNdarray(self.sp_tensor, self, &out)) + return PyObject_to_object(out) + + def equals(self, Tensor other): + """ + Return true if the tensors contains exactly equal data. + + Parameters + ---------- + other : Tensor + The other tensor to compare for equality. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> y = np.array([[2, 2, 4], [4, 5, 10]], np.int32) + >>> tensor2 = pa.Tensor.from_numpy(y, dim_names=["a","b"]) + >>> tensor.equals(tensor) + True + >>> tensor.equals(tensor2) + False + """ + return self.tp.Equals(deref(other.tp)) + + def __eq__(self, other): + if isinstance(other, Tensor): + return self.equals(other) + else: + return NotImplemented + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.dim_name(0) + 'dim1' + >>> tensor.dim_name(1) + 'dim2' + """ + return frombytes(self.tp.dim_name(i)) + + @property + def dim_names(self): + """ + Names of this tensor dimensions. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.dim_names + ['dim1', 'dim2'] + """ + return [frombytes(x) for x in tuple(self.tp.dim_names())] + + @property + def is_mutable(self): + """ + Is this tensor mutable or immutable. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.is_mutable + True + """ + return self.tp.is_mutable() + + @property + def is_contiguous(self): + """ + Is this tensor contiguous in memory. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.is_contiguous + True + """ + return self.tp.is_contiguous() + + @property + def ndim(self): + """ + The dimension (n) of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.ndim + 2 + """ + return self.tp.ndim() + + @property + def size(self): + """ + The size of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.size + 6 + """ + return self.tp.size() + + @property + def shape(self): + """ + The shape of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.shape + (2, 3) + """ + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.tp.shape()) + + @property + def strides(self): + """ + Strides of this tensor. + + Examples + -------- + >>> import pyarrow as pa + >>> import numpy as np + >>> x = np.array([[2, 2, 4], [4, 5, 100]], np.int32) + >>> tensor = pa.Tensor.from_numpy(x, dim_names=["dim1","dim2"]) + >>> tensor.strides + (12, 4) + """ + return tuple(self.tp.strides()) + + def __getbuffer__(self, cp.Py_buffer* buffer, int flags): + buffer.buf = self.tp.data().get().data() + pep3118_format = self.type.pep3118_format + if pep3118_format is None: + raise NotImplementedError("type %s not supported for buffer " + "protocol" % (self.type,)) + buffer.format = pep3118_format + buffer.itemsize = self.type.bit_width // 8 + buffer.internal = NULL + buffer.len = self.tp.size() * buffer.itemsize + buffer.ndim = self.tp.ndim() + buffer.obj = self + if self.tp.is_mutable(): + buffer.readonly = 0 + else: + buffer.readonly = 1 + # NOTE: This assumes Py_ssize_t == int64_t, and that the shape + # and strides arrays lifetime is tied to the tensor's + buffer.shape = &self.tp.shape()[0] + buffer.strides = &self.tp.strides()[0] + buffer.suboffsets = NULL + + +ctypedef CSparseCOOIndex* _CSparseCOOIndexPtr + + +cdef class SparseCOOTensor(_Weakrefable): + """ + A sparse COO tensor. + """ + + def __init__(self): + raise TypeError("Do not call SparseCOOTensor's constructor directly, " + "use one of the `pyarrow.SparseCOOTensor.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCOOTensor]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCOOTensor + + Parameters + ---------- + obj : numpy.ndarray + Data used to populate the rows. + dim_names : list[str], optional + Names of the dimensions. + + Returns + ------- + pyarrow.SparseCOOTensor + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, coords, shape, dim_names=None): + """ + Create arrow::SparseCOOTensor from numpy.ndarrays + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the rows. + coords : numpy.ndarray + Coordinates of the data. + shape : tuple + Shape of the tensor. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for SparseCOOTensor indices + coords = np.require(coords, dtype='i8', requirements='C') + if coords.ndim != 2: + raise ValueError("Expected 2-dimensional array for " + "SparseCOOTensor indices") + + check_status(NdarraysToSparseCOOTensor(c_default_memory_pool(), + data, coords, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + @staticmethod + def from_scipy(obj, dim_names=None): + """ + Convert scipy.sparse.coo_matrix to arrow::SparseCOOTensor + + Parameters + ---------- + obj : scipy.sparse.csr_matrix + The scipy matrix that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import scipy.sparse + if not isinstance(obj, scipy.sparse.coo_matrix): + raise TypeError( + "Expected scipy.sparse.coo_matrix, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + row = obj.row + col = obj.col + + # When SciPy's coo_matrix has canonical format, its indices matrix is + # sorted in column-major order. As Arrow's SparseCOOIndex is sorted + # in row-major order if it is canonical, we must sort indices matrix + # into row-major order to keep its canonicalness, here. + if obj.has_canonical_format: + order = np.lexsort((col, row)) # sort in row-major order + row = row[order] + col = col[order] + coords = np.vstack([row, col]).T + coords = np.require(coords, dtype='i8', requirements='C') + + check_status(NdarraysToSparseCOOTensor(c_default_memory_pool(), + obj.data, coords, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + @staticmethod + def from_pydata_sparse(obj, dim_names=None): + """ + Convert pydata/sparse.COO to arrow::SparseCOOTensor. + + Parameters + ---------- + obj : pydata.sparse.COO + The sparse multidimensional array that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import sparse + if not isinstance(obj, sparse.COO): + raise TypeError( + "Expected sparse.COO, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + coords = np.require(obj.coords.T, dtype='i8', requirements='C') + + check_status(NdarraysToSparseCOOTensor(c_default_memory_pool(), + obj.data, coords, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCOOTensor. + + Parameters + ---------- + obj : Tensor + The tensor that should be converted. + """ + cdef shared_ptr[CSparseCOOTensor] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCOOTensor(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_coo_tensor(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCOOTensor to numpy.ndarrays with zero copy. + """ + cdef PyObject* out_data + cdef PyObject* out_coords + + check_status(SparseCOOTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_coords)) + return PyObject_to_object(out_data), PyObject_to_object(out_coords) + + def to_scipy(self): + """ + Convert arrow::SparseCOOTensor to scipy.sparse.coo_matrix. + """ + from scipy.sparse import coo_matrix + cdef PyObject* out_data + cdef PyObject* out_coords + + check_status(SparseCOOTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_coords)) + data = PyObject_to_object(out_data) + coords = PyObject_to_object(out_coords) + row, col = coords[:, 0], coords[:, 1] + result = coo_matrix((data[:, 0], (row, col)), shape=self.shape) + + # As the description in from_scipy above, we sorted indices matrix + # in row-major order if SciPy's coo_matrix has canonical format. + # So, we must call sum_duplicates() to make the result coo_matrix + # has canonical format. + if self.has_canonical_format: + result.sum_duplicates() + return result + + def to_pydata_sparse(self): + """ + Convert arrow::SparseCOOTensor to pydata/sparse.COO. + """ + from sparse import COO + cdef PyObject* out_data + cdef PyObject* out_coords + + check_status(SparseCOOTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_coords)) + data = PyObject_to_object(out_data) + coords = PyObject_to_object(out_coords) + result = COO(data=data[:, 0], coords=coords.T, shape=self.shape) + return result + + def to_tensor(self): + """ + Convert arrow::SparseCOOTensor to arrow::Tensor. + """ + + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCOOTensor other): + """ + Return true if sparse tensors contains exactly equal data. + + Parameters + ---------- + other : SparseCOOTensor + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCOOTensor): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() + + @property + def has_canonical_format(self): + cdef: + _CSparseCOOIndexPtr csi + + csi = <_CSparseCOOIndexPtr>(self.stp.sparse_index().get()) + if csi != nullptr: + return csi.is_canonical() + return True + +cdef class SparseCSRMatrix(_Weakrefable): + """ + A sparse CSR matrix. + """ + + def __init__(self): + raise TypeError("Do not call SparseCSRMatrix's constructor directly, " + "use one of the `pyarrow.SparseCSRMatrix.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCSRMatrix]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCSRMatrix + + Parameters + ---------- + obj : numpy.ndarray + The dense numpy array that should be converted. + dim_names : list, optional + The names of the dimensions. + + Returns + ------- + pyarrow.SparseCSRMatrix + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, indptr, indices, shape, dim_names=None): + """ + Create arrow::SparseCSRMatrix from numpy.ndarrays. + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the sparse matrix. + indptr : numpy.ndarray + Range of the rows, + The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. + indices : numpy.ndarray + Column indices of the corresponding non-zero values. + shape : tuple + Shape of the matrix. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCSRMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for SparseCSRMatrix indices + indptr = np.require(indptr, dtype='i8') + indices = np.require(indices, dtype='i8') + if indptr.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSRMatrix indptr") + if indices.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSRMatrix indices") + + check_status(NdarraysToSparseCSRMatrix(c_default_memory_pool(), + data, indptr, indices, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_csr_matrix(csparse_tensor) + + @staticmethod + def from_scipy(obj, dim_names=None): + """ + Convert scipy.sparse.csr_matrix to arrow::SparseCSRMatrix. + + Parameters + ---------- + obj : scipy.sparse.csr_matrix + The scipy matrix that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import scipy.sparse + if not isinstance(obj, scipy.sparse.csr_matrix): + raise TypeError( + "Expected scipy.sparse.csr_matrix, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCSRMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for CSparseCSRMatrix indices + indptr = np.require(obj.indptr, dtype='i8') + indices = np.require(obj.indices, dtype='i8') + + check_status(NdarraysToSparseCSRMatrix(c_default_memory_pool(), + obj.data, indptr, indices, + c_shape, c_dim_names, + &csparse_tensor)) + return pyarrow_wrap_sparse_csr_matrix(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCSRMatrix. + + Parameters + ---------- + obj : Tensor + The dense tensor that should be converted. + """ + cdef shared_ptr[CSparseCSRMatrix] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCSRMatrix(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_csr_matrix(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCSRMatrix to numpy.ndarrays with zero copy. + """ + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSRMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + return (PyObject_to_object(out_data), PyObject_to_object(out_indptr), + PyObject_to_object(out_indices)) + + def to_scipy(self): + """ + Convert arrow::SparseCSRMatrix to scipy.sparse.csr_matrix. + """ + from scipy.sparse import csr_matrix + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSRMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + + data = PyObject_to_object(out_data) + indptr = PyObject_to_object(out_indptr) + indices = PyObject_to_object(out_indices) + result = csr_matrix((data[:, 0], indices, indptr), shape=self.shape) + return result + + def to_tensor(self): + """ + Convert arrow::SparseCSRMatrix to arrow::Tensor. + """ + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCSRMatrix other): + """ + Return true if sparse tensors contains exactly equal data. + + Parameters + ---------- + other : SparseCSRMatrix + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCSRMatrix): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() + +cdef class SparseCSCMatrix(_Weakrefable): + """ + A sparse CSC matrix. + """ + + def __init__(self): + raise TypeError("Do not call SparseCSCMatrix's constructor directly, " + "use one of the `pyarrow.SparseCSCMatrix.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCSCMatrix]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCSCMatrix + + Parameters + ---------- + obj : numpy.ndarray + Data used to populate the rows. + dim_names : list[str], optional + Names of the dimensions. + + Returns + ------- + pyarrow.SparseCSCMatrix + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, indptr, indices, shape, dim_names=None): + """ + Create arrow::SparseCSCMatrix from numpy.ndarrays + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the sparse matrix. + indptr : numpy.ndarray + Range of the rows, + The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. + indices : numpy.ndarray + Column indices of the corresponding non-zero values. + shape : tuple + Shape of the matrix. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCSCMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for SparseCSCMatrix indices + indptr = np.require(indptr, dtype='i8') + indices = np.require(indices, dtype='i8') + if indptr.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSCMatrix indptr") + if indices.ndim != 1: + raise ValueError("Expected 1-dimensional array for " + "SparseCSCMatrix indices") + + check_status(NdarraysToSparseCSCMatrix(c_default_memory_pool(), + data, indptr, indices, c_shape, + c_dim_names, &csparse_tensor)) + return pyarrow_wrap_sparse_csc_matrix(csparse_tensor) + + @staticmethod + def from_scipy(obj, dim_names=None): + """ + Convert scipy.sparse.csc_matrix to arrow::SparseCSCMatrix + + Parameters + ---------- + obj : scipy.sparse.csc_matrix + The scipy matrix that should be converted. + dim_names : list, optional + Names of the dimensions. + """ + import scipy.sparse + if not isinstance(obj, scipy.sparse.csc_matrix): + raise TypeError( + "Expected scipy.sparse.csc_matrix, got {}".format(type(obj))) + + cdef shared_ptr[CSparseCSCMatrix] csparse_tensor + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in obj.shape: + c_shape.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce precondition for CSparseCSCMatrix indices + indptr = np.require(obj.indptr, dtype='i8') + indices = np.require(obj.indices, dtype='i8') + + check_status(NdarraysToSparseCSCMatrix(c_default_memory_pool(), + obj.data, indptr, indices, + c_shape, c_dim_names, + &csparse_tensor)) + return pyarrow_wrap_sparse_csc_matrix(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCSCMatrix + + Parameters + ---------- + obj : Tensor + The dense tensor that should be converted. + """ + cdef shared_ptr[CSparseCSCMatrix] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCSCMatrix(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_csc_matrix(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCSCMatrix to numpy.ndarrays with zero copy + """ + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSCMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + return (PyObject_to_object(out_data), PyObject_to_object(out_indptr), + PyObject_to_object(out_indices)) + + def to_scipy(self): + """ + Convert arrow::SparseCSCMatrix to scipy.sparse.csc_matrix + """ + from scipy.sparse import csc_matrix + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSCMatrixToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + + data = PyObject_to_object(out_data) + indptr = PyObject_to_object(out_indptr) + indices = PyObject_to_object(out_indices) + result = csc_matrix((data[:, 0], indices, indptr), shape=self.shape) + return result + + def to_tensor(self): + """ + Convert arrow::SparseCSCMatrix to arrow::Tensor + """ + + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCSCMatrix other): + """ + Return true if sparse tensors contains exactly equal data + + Parameters + ---------- + other : SparseCSCMatrix + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCSCMatrix): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() + + +cdef class SparseCSFTensor(_Weakrefable): + """ + A sparse CSF tensor. + + CSF is a generalization of compressed sparse row (CSR) index. + + CSF index recursively compresses each dimension of a tensor into a set + of prefix trees. Each path from a root to leaf forms one tensor + non-zero index. CSF is implemented with two arrays of buffers and one + arrays of integers. + """ + + def __init__(self): + raise TypeError("Do not call SparseCSFTensor's constructor directly, " + "use one of the `pyarrow.SparseCSFTensor.from_*` " + "functions instead.") + + cdef void init(self, const shared_ptr[CSparseCSFTensor]& sp_sparse_tensor): + self.sp_sparse_tensor = sp_sparse_tensor + self.stp = sp_sparse_tensor.get() + self.type = pyarrow_wrap_data_type(self.stp.type()) + + def __repr__(self): + return """ +type: {0.type} +shape: {0.shape}""".format(self) + + @classmethod + def from_dense_numpy(cls, obj, dim_names=None): + """ + Convert numpy.ndarray to arrow::SparseCSFTensor + + Parameters + ---------- + obj : numpy.ndarray + Data used to populate the rows. + dim_names : list[str], optional + Names of the dimensions. + + Returns + ------- + pyarrow.SparseCSFTensor + """ + return cls.from_tensor(Tensor.from_numpy(obj, dim_names=dim_names)) + + @staticmethod + def from_numpy(data, indptr, indices, shape, axis_order=None, + dim_names=None): + """ + Create arrow::SparseCSFTensor from numpy.ndarrays + + Parameters + ---------- + data : numpy.ndarray + Data used to populate the sparse tensor. + indptr : numpy.ndarray + The sparsity structure. + Each two consecutive dimensions in a tensor correspond to + a buffer in indices. + A pair of consecutive values at `indptr[dim][i]` + `indptr[dim][i + 1]` signify a range of nodes in + `indices[dim + 1]` who are children of `indices[dim][i]` node. + indices : numpy.ndarray + Stores values of nodes. + Each tensor dimension corresponds to a buffer in indptr. + shape : tuple + Shape of the matrix. + axis_order : list, optional + the sequence in which dimensions were traversed to + produce the prefix tree. + dim_names : list, optional + Names of the dimensions. + """ + cdef shared_ptr[CSparseCSFTensor] csparse_tensor + cdef vector[int64_t] c_axis_order + cdef vector[int64_t] c_shape + cdef vector[c_string] c_dim_names + + for x in shape: + c_shape.push_back(x) + if not axis_order: + axis_order = np.argsort(shape) + for x in axis_order: + c_axis_order.push_back(x) + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + # Enforce preconditions for SparseCSFTensor indices + if not (isinstance(indptr, (list, tuple)) and + isinstance(indices, (list, tuple))): + raise TypeError("Expected list or tuple, got {}, {}" + .format(type(indptr), type(indices))) + if len(indptr) != len(shape) - 1: + raise ValueError("Expected list of {ndim} np.arrays for " + "SparseCSFTensor.indptr".format(ndim=len(shape))) + if len(indices) != len(shape): + raise ValueError("Expected list of {ndim} np.arrays for " + "SparseCSFTensor.indices".format(ndim=len(shape))) + if any([x.ndim != 1 for x in indptr]): + raise ValueError("Expected a list of 1-dimensional arrays for " + "SparseCSFTensor.indptr") + if any([x.ndim != 1 for x in indices]): + raise ValueError("Expected a list of 1-dimensional arrays for " + "SparseCSFTensor.indices") + indptr = [np.require(arr, dtype='i8') for arr in indptr] + indices = [np.require(arr, dtype='i8') for arr in indices] + + check_status(NdarraysToSparseCSFTensor(c_default_memory_pool(), data, + indptr, indices, c_shape, + c_axis_order, c_dim_names, + &csparse_tensor)) + return pyarrow_wrap_sparse_csf_tensor(csparse_tensor) + + @staticmethod + def from_tensor(obj): + """ + Convert arrow::Tensor to arrow::SparseCSFTensor + + Parameters + ---------- + obj : Tensor + The dense tensor that should be converted. + """ + cdef shared_ptr[CSparseCSFTensor] csparse_tensor + cdef shared_ptr[CTensor] ctensor = pyarrow_unwrap_tensor(obj) + + with nogil: + check_status(TensorToSparseCSFTensor(ctensor, &csparse_tensor)) + + return pyarrow_wrap_sparse_csf_tensor(csparse_tensor) + + def to_numpy(self): + """ + Convert arrow::SparseCSFTensor to numpy.ndarrays with zero copy + """ + cdef PyObject* out_data + cdef PyObject* out_indptr + cdef PyObject* out_indices + + check_status(SparseCSFTensorToNdarray(self.sp_sparse_tensor, self, + &out_data, &out_indptr, + &out_indices)) + return (PyObject_to_object(out_data), PyObject_to_object(out_indptr), + PyObject_to_object(out_indices)) + + def to_tensor(self): + """ + Convert arrow::SparseCSFTensor to arrow::Tensor + """ + + cdef shared_ptr[CTensor] ctensor + with nogil: + ctensor = GetResultValue(self.stp.ToTensor()) + + return pyarrow_wrap_tensor(ctensor) + + def equals(self, SparseCSFTensor other): + """ + Return true if sparse tensors contains exactly equal data + + Parameters + ---------- + other : SparseCSFTensor + The other tensor to compare for equality. + """ + return self.stp.Equals(deref(other.stp)) + + def __eq__(self, other): + if isinstance(other, SparseCSFTensor): + return self.equals(other) + else: + return NotImplemented + + @property + def is_mutable(self): + return self.stp.is_mutable() + + @property + def ndim(self): + return self.stp.ndim() + + @property + def shape(self): + # Cython knows how to convert a vector[T] to a Python list + return tuple(self.stp.shape()) + + @property + def size(self): + return self.stp.size() + + def dim_name(self, i): + """ + Returns the name of the i-th tensor dimension. + + Parameters + ---------- + i : int + The physical index of the tensor dimension. + + Returns + ------- + str + """ + return frombytes(self.stp.dim_name(i)) + + @property + def dim_names(self): + names_tuple = tuple(self.stp.dim_names()) + return tuple(frombytes(x) for x in names_tuple) + + @property + def non_zero_length(self): + return self.stp.non_zero_length() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/types.pxi b/env-llmeval/lib/python3.10/site-packages/pyarrow/types.pxi new file mode 100644 index 0000000000000000000000000000000000000000..9639dd7d894c1f04c1c01749f3e500a5cc3c0005 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/types.pxi @@ -0,0 +1,5306 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from cpython.pycapsule cimport ( + PyCapsule_CheckExact, + PyCapsule_GetPointer, + PyCapsule_GetName, + PyCapsule_New, + PyCapsule_IsValid +) + +import atexit +from collections.abc import Mapping +import pickle +import re +import sys +import warnings +from cython import sizeof + +# These are imprecise because the type (in pandas 0.x) depends on the presence +# of nulls +cdef dict _pandas_type_map = { + _Type_NA: np.object_, # NaNs + _Type_BOOL: np.bool_, + _Type_INT8: np.int8, + _Type_INT16: np.int16, + _Type_INT32: np.int32, + _Type_INT64: np.int64, + _Type_UINT8: np.uint8, + _Type_UINT16: np.uint16, + _Type_UINT32: np.uint32, + _Type_UINT64: np.uint64, + _Type_HALF_FLOAT: np.float16, + _Type_FLOAT: np.float32, + _Type_DOUBLE: np.float64, + # Pandas does not support [D]ay, so default to [ms] for date32 + _Type_DATE32: np.dtype('datetime64[ms]'), + _Type_DATE64: np.dtype('datetime64[ms]'), + _Type_TIMESTAMP: { + 's': np.dtype('datetime64[s]'), + 'ms': np.dtype('datetime64[ms]'), + 'us': np.dtype('datetime64[us]'), + 'ns': np.dtype('datetime64[ns]'), + }, + _Type_DURATION: { + 's': np.dtype('timedelta64[s]'), + 'ms': np.dtype('timedelta64[ms]'), + 'us': np.dtype('timedelta64[us]'), + 'ns': np.dtype('timedelta64[ns]'), + }, + _Type_BINARY: np.object_, + _Type_FIXED_SIZE_BINARY: np.object_, + _Type_STRING: np.object_, + _Type_LIST: np.object_, + _Type_MAP: np.object_, + _Type_DECIMAL128: np.object_, +} + +cdef dict _pep3118_type_map = { + _Type_INT8: b'b', + _Type_INT16: b'h', + _Type_INT32: b'i', + _Type_INT64: b'q', + _Type_UINT8: b'B', + _Type_UINT16: b'H', + _Type_UINT32: b'I', + _Type_UINT64: b'Q', + _Type_HALF_FLOAT: b'e', + _Type_FLOAT: b'f', + _Type_DOUBLE: b'd', +} + + +cdef bytes _datatype_to_pep3118(CDataType* type): + """ + Construct a PEP 3118 format string describing the given datatype. + None is returned for unsupported types. + """ + try: + char = _pep3118_type_map[type.id()] + except KeyError: + return None + else: + if char in b'bBhHiIqQ': + # Use "standard" int widths, not native + return b'=' + char + else: + return char + + +cdef void* _as_c_pointer(v, allow_null=False) except *: + """ + Convert a Python object to a raw C pointer. + + Used mainly for the C data interface. + Integers are accepted as well as capsule objects with a NULL name. + (the latter for compatibility with raw pointers exported by reticulate) + """ + cdef void* c_ptr + cdef const char* capsule_name + if isinstance(v, int): + c_ptr = v + elif isinstance(v, float): + warnings.warn( + "Passing a pointer value as a float is unsafe and only " + "supported for compatibility with older versions of the R " + "Arrow library", UserWarning, stacklevel=2) + c_ptr = v + elif PyCapsule_CheckExact(v): + # An R external pointer was how the R bindings passed pointer values to + # Python from versions 7 to 15 (inclusive); however, the reticulate 1.35.0 + # update changed the name of the capsule from NULL to "r_extptr". + # Newer versions of the R package pass a Python integer; however, this + # workaround ensures that old versions of the R package continue to work + # with newer versions of pyarrow. + capsule_name = PyCapsule_GetName(v) + if capsule_name == NULL or capsule_name == b"r_extptr": + c_ptr = PyCapsule_GetPointer(v, capsule_name) + else: + capsule_name_str = capsule_name.decode() + raise ValueError( + f"Can't convert PyCapsule with name '{capsule_name_str}' to pointer address" + ) + else: + raise TypeError(f"Expected a pointer value, got {type(v)!r}") + if not allow_null and c_ptr == NULL: + raise ValueError(f"Null pointer (value before cast = {v!r})") + return c_ptr + + +def _is_primitive(Type type): + # This is simply a redirect, the official API is in pyarrow.types. + return is_primitive(type) + + +def _get_pandas_type(arrow_type, coerce_to_ns=False): + cdef Type type_id = arrow_type.id + if type_id not in _pandas_type_map: + return None + if coerce_to_ns: + # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + if type_id == _Type_DURATION: + return np.dtype('timedelta64[ns]') + return np.dtype('datetime64[ns]') + pandas_type = _pandas_type_map[type_id] + if isinstance(pandas_type, dict): + unit = getattr(arrow_type, 'unit', None) + pandas_type = pandas_type.get(unit, None) + return pandas_type + + +def _get_pandas_tz_type(arrow_type, coerce_to_ns=False): + from pyarrow.pandas_compat import make_datetimetz + unit = 'ns' if coerce_to_ns else arrow_type.unit + return make_datetimetz(unit, arrow_type.tz) + + +def _to_pandas_dtype(arrow_type, options=None): + coerce_to_ns = (options and options.get('coerce_temporal_nanoseconds', False)) or ( + _pandas_api.is_v1() and arrow_type.id in + [_Type_DATE32, _Type_DATE64, _Type_TIMESTAMP, _Type_DURATION]) + + if getattr(arrow_type, 'tz', None): + dtype = _get_pandas_tz_type(arrow_type, coerce_to_ns) + else: + dtype = _get_pandas_type(arrow_type, coerce_to_ns) + + if not dtype: + raise NotImplementedError(str(arrow_type)) + + return dtype + + +# Workaround for Cython parsing bug +# https://github.com/cython/cython/issues/2143 +ctypedef CFixedWidthType* _CFixedWidthTypePtr + + +cdef class DataType(_Weakrefable): + """ + Base class of all Arrow data types. + + Each data type is an *instance* of this class. + + Examples + -------- + Instance of int64 type: + + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use public " + "functions like pyarrow.int64, pyarrow.list_, etc. " + "instead.".format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + assert type != nullptr + self.sp_type = type + self.type = type.get() + self.pep3118_format = _datatype_to_pep3118(self.type) + + cpdef Field field(self, i): + """ + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Field + """ + if not isinstance(i, int): + raise TypeError(f"Expected int index, got type '{type(i)}'") + cdef int index = _normalize_index(i, self.type.num_fields()) + return pyarrow_wrap_field(self.type.field(index)) + + @property + def id(self): + return self.type.id() + + @property + def bit_width(self): + """ + Bit width for fixed width type. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + >>> pa.int64().bit_width + 64 + """ + cdef _CFixedWidthTypePtr ty + ty = dynamic_cast[_CFixedWidthTypePtr](self.type) + if ty == nullptr: + raise ValueError("Non-fixed width type") + return ty.bit_width() + + @property + def num_fields(self): + """ + The number of child fields. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + >>> pa.int64().num_fields + 0 + >>> pa.list_(pa.string()) + ListType(list) + >>> pa.list_(pa.string()).num_fields + 1 + >>> struct = pa.struct({'x': pa.int32(), 'y': pa.string()}) + >>> struct.num_fields + 2 + """ + return self.type.num_fields() + + @property + def num_buffers(self): + """ + Number of data buffers required to construct Array type + excluding children. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64().num_buffers + 2 + >>> pa.string().num_buffers + 3 + """ + return self.type.layout().buffers.size() + + def __str__(self): + return frombytes(self.type.ToString(), safe=True) + + def __hash__(self): + return hash(str(self)) + + def __reduce__(self): + return type_for_alias, (str(self),) + + def __repr__(self): + return '{0.__class__.__name__}({0})'.format(self) + + def __eq__(self, other): + try: + return self.equals(other) + except (TypeError, ValueError): + return NotImplemented + + def equals(self, other, *, check_metadata=False): + """ + Return true if type is equivalent to passed value. + + Parameters + ---------- + other : DataType or string convertible to DataType + check_metadata : bool + Whether nested Field metadata equality should be checked as well. + + Returns + ------- + is_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64().equals(pa.string()) + False + >>> pa.int64().equals(pa.int64()) + True + """ + cdef: + DataType other_type + c_bool c_check_metadata + + other_type = ensure_type(other) + c_check_metadata = check_metadata + return self.type.Equals(deref(other_type.type), c_check_metadata) + + def to_pandas_dtype(self): + """ + Return the equivalent NumPy / Pandas dtype. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.int64().to_pandas_dtype() + + """ + return _to_pandas_dtype(self) + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowSchema struct, given its pointer. + + Be careful: if you don't pass the ArrowSchema struct to a consumer, + its memory will leak. This is a low-level function intended for + expert users. + """ + check_status(ExportType(deref(self.type), + _as_c_pointer(out_ptr))) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import DataType from a C ArrowSchema struct, given its pointer. + + This is a low-level function intended for expert users. + """ + result = GetResultValue(ImportType( + _as_c_pointer(in_ptr))) + return pyarrow_wrap_data_type(result) + + def __arrow_c_schema__(self): + """ + Export to a ArrowSchema PyCapsule + + Unlike _export_to_c, this will not leak memory if the capsule is not used. + """ + cdef ArrowSchema* c_schema + capsule = alloc_c_schema(&c_schema) + + with nogil: + check_status(ExportType(deref(self.type), c_schema)) + + return capsule + + @staticmethod + def _import_from_c_capsule(schema): + """ + Import a DataType from a ArrowSchema PyCapsule + + Parameters + ---------- + schema : PyCapsule + A valid PyCapsule with name 'arrow_schema' containing an + ArrowSchema pointer. + """ + cdef: + ArrowSchema* c_schema + shared_ptr[CDataType] c_type + + if not PyCapsule_IsValid(schema, 'arrow_schema'): + raise TypeError( + "Not an ArrowSchema object" + ) + c_schema = PyCapsule_GetPointer(schema, 'arrow_schema') + + with nogil: + c_type = GetResultValue(ImportType(c_schema)) + + return pyarrow_wrap_data_type(c_type) + + +cdef class DictionaryMemo(_Weakrefable): + """ + Tracking container for dictionary-encoded fields. + """ + + def __cinit__(self): + self.sp_memo.reset(new CDictionaryMemo()) + self.memo = self.sp_memo.get() + + +cdef class DictionaryType(DataType): + """ + Concrete class for dictionary data types. + + Examples + -------- + Create an instance of dictionary type: + + >>> import pyarrow as pa + >>> pa.dictionary(pa.int64(), pa.utf8()) + DictionaryType(dictionary) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.dict_type = type.get() + + def __reduce__(self): + return dictionary, (self.index_type, self.value_type, self.ordered) + + @property + def ordered(self): + """ + Whether the dictionary is ordered, i.e. whether the ordering of values + in the dictionary is important. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.dictionary(pa.int64(), pa.utf8()).ordered + False + """ + return self.dict_type.ordered() + + @property + def index_type(self): + """ + The data type of dictionary indices (a signed integer type). + + Examples + -------- + >>> import pyarrow as pa + >>> pa.dictionary(pa.int16(), pa.utf8()).index_type + DataType(int16) + """ + return pyarrow_wrap_data_type(self.dict_type.index_type()) + + @property + def value_type(self): + """ + The dictionary value type. + + The dictionary values are found in an instance of DictionaryArray. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.dictionary(pa.int16(), pa.utf8()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.dict_type.value_type()) + + +cdef class ListType(DataType): + """ + Concrete class for list data types. + + Examples + -------- + Create an instance of ListType: + + >>> import pyarrow as pa + >>> pa.list_(pa.string()) + ListType(list) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_type = type.get() + + def __reduce__(self): + return list_, (self.value_field,) + + @property + def value_field(self): + """ + The field for list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.string()).value_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.list_type.value_field()) + + @property + def value_type(self): + """ + The data type of list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.string()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.list_type.value_type()) + + +cdef class LargeListType(DataType): + """ + Concrete class for large list data types + (like ListType, but with 64-bit offsets). + + Examples + -------- + Create an instance of LargeListType: + + >>> import pyarrow as pa + >>> pa.large_list(pa.string()) + LargeListType(large_list) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_type = type.get() + + def __reduce__(self): + return large_list, (self.value_field,) + + @property + def value_field(self): + return pyarrow_wrap_field(self.list_type.value_field()) + + @property + def value_type(self): + """ + The data type of large list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.large_list(pa.string()).value_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.list_type.value_type()) + + +cdef class MapType(DataType): + """ + Concrete class for map data types. + + Examples + -------- + Create an instance of MapType: + + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()) + MapType(map) + >>> pa.map_(pa.string(), pa.int32(), keys_sorted=True) + MapType(map) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.map_type = type.get() + + def __reduce__(self): + return map_, (self.key_field, self.item_field) + + @property + def key_field(self): + """ + The field for keys in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).key_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.map_type.key_field()) + + @property + def key_type(self): + """ + The data type of keys in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).key_type + DataType(string) + """ + return pyarrow_wrap_data_type(self.map_type.key_type()) + + @property + def item_field(self): + """ + The field for items in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).item_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.map_type.item_field()) + + @property + def item_type(self): + """ + The data type of items in the map entries. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()).item_type + DataType(int32) + """ + return pyarrow_wrap_data_type(self.map_type.item_type()) + + @property + def keys_sorted(self): + """ + Should the entries be sorted according to keys. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32(), keys_sorted=True).keys_sorted + True + """ + return self.map_type.keys_sorted() + + +cdef class FixedSizeListType(DataType): + """ + Concrete class for fixed size list data types. + + Examples + -------- + Create an instance of FixedSizeListType: + + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2) + FixedSizeListType(fixed_size_list[2]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.list_type = type.get() + + def __reduce__(self): + return list_, (self.value_type, self.list_size) + + @property + def value_field(self): + """ + The field for list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2).value_field + pyarrow.Field + """ + return pyarrow_wrap_field(self.list_type.value_field()) + + @property + def value_type(self): + """ + The data type of large list values. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2).value_type + DataType(int32) + """ + return pyarrow_wrap_data_type(self.list_type.value_type()) + + @property + def list_size(self): + """ + The size of the fixed size lists. + + Examples + -------- + >>> import pyarrow as pa + >>> pa.list_(pa.int32(), 2).list_size + 2 + """ + return self.list_type.list_size() + + +cdef class StructType(DataType): + """ + Concrete class for struct data types. + + ``StructType`` supports direct indexing using ``[...]`` (implemented via + ``__getitem__``) to access its fields. + It will return the struct field with the given index or name. + + Examples + -------- + >>> import pyarrow as pa + + Accessing fields using direct indexing: + + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + >>> struct_type[0] + pyarrow.Field + >>> struct_type['y'] + pyarrow.Field + + Accessing fields using ``field()``: + + >>> struct_type.field(1) + pyarrow.Field + >>> struct_type.field('x') + pyarrow.Field + + # Creating a schema from the struct type's fields: + >>> pa.schema(list(struct_type)) + x: int32 + y: string + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.struct_type = type.get() + + cdef Field field_by_name(self, name): + """ + Return a child field by its name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + field : Field + The child field with the given name. + + Raises + ------ + KeyError + If the name isn't found, or if several fields have the given + name. + """ + cdef vector[shared_ptr[CField]] fields + + fields = self.struct_type.GetAllFieldsByName(tobytes(name)) + if fields.size() == 0: + raise KeyError(name) + elif fields.size() > 1: + warnings.warn("Struct field name corresponds to more " + "than one field", UserWarning) + raise KeyError(name) + else: + return pyarrow_wrap_field(fields[0]) + + def get_field_index(self, name): + """ + Return index of the unique field with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + index : int + The index of the field with the given name; -1 if the + name isn't found or there are several fields with the given + name. + + Examples + -------- + >>> import pyarrow as pa + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + + Index of the field with a name 'y': + + >>> struct_type.get_field_index('y') + 1 + + Index of the field that does not exist: + + >>> struct_type.get_field_index('z') + -1 + """ + return self.struct_type.GetFieldIndex(tobytes(name)) + + cpdef Field field(self, i): + """ + Select a field by its column name or numeric index. + + Parameters + ---------- + i : int or str + + Returns + ------- + pyarrow.Field + + Examples + -------- + + >>> import pyarrow as pa + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + + Select the second field: + + >>> struct_type.field(1) + pyarrow.Field + + Select the field named 'x': + + >>> struct_type.field('x') + pyarrow.Field + """ + if isinstance(i, (bytes, str)): + return self.field_by_name(i) + elif isinstance(i, int): + return DataType.field(self, i) + else: + raise TypeError('Expected integer or string index') + + def get_all_field_indices(self, name): + """ + Return sorted list of indices for the fields with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + indices : List[int] + + Examples + -------- + >>> import pyarrow as pa + >>> struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()}) + >>> struct_type.get_all_field_indices('x') + [0] + """ + return self.struct_type.GetAllFieldIndices(tobytes(name)) + + def __len__(self): + """ + Like num_fields(). + """ + return self.type.num_fields() + + def __iter__(self): + """ + Iterate over struct fields, in order. + """ + for i in range(len(self)): + yield self[i] + + def __getitem__(self, i): + """ + Return the struct field with the given index or name. + + Alias of ``field``. + """ + return self.field(i) + + def __reduce__(self): + return struct, (list(self),) + + +cdef class UnionType(DataType): + """ + Base class for union data types. + + Examples + -------- + Create an instance of a dense UnionType using ``pa.union``: + + >>> import pyarrow as pa + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_DENSE), + (DenseUnionType(dense_union),) + + Create an instance of a dense UnionType using ``pa.dense_union``: + + >>> pa.dense_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + DenseUnionType(dense_union) + + Create an instance of a sparse UnionType using ``pa.union``: + + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_SPARSE), + (SparseUnionType(sparse_union),) + + Create an instance of a sparse UnionType using ``pa.sparse_union``: + + >>> pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + SparseUnionType(sparse_union) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + + @property + def mode(self): + """ + The mode of the union ("dense" or "sparse"). + + Examples + -------- + >>> import pyarrow as pa + >>> union = pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + >>> union.mode + 'sparse' + """ + cdef CUnionType* type = self.sp_type.get() + cdef int mode = type.mode() + if mode == _UnionMode_DENSE: + return 'dense' + if mode == _UnionMode_SPARSE: + return 'sparse' + assert 0 + + @property + def type_codes(self): + """ + The type code to indicate each data type in this union. + + Examples + -------- + >>> import pyarrow as pa + >>> union = pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + >>> union.type_codes + [0, 1] + """ + cdef CUnionType* type = self.sp_type.get() + return type.type_codes() + + def __len__(self): + """ + Like num_fields(). + """ + return self.type.num_fields() + + def __iter__(self): + """ + Iterate over union members, in order. + """ + for i in range(len(self)): + yield self[i] + + cpdef Field field(self, i): + """ + Return a child field by its numeric index. + + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> union = pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + >>> union[0] + pyarrow.Field + """ + if isinstance(i, int): + return DataType.field(self, i) + else: + raise TypeError('Expected integer') + + def __getitem__(self, i): + """ + Return a child field by its index. + + Alias of ``field``. + """ + return self.field(i) + + def __reduce__(self): + return union, (list(self), self.mode, self.type_codes) + + +cdef class SparseUnionType(UnionType): + """ + Concrete class for sparse union types. + + Examples + -------- + Create an instance of a sparse UnionType using ``pa.union``: + + >>> import pyarrow as pa + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_SPARSE), + (SparseUnionType(sparse_union),) + + Create an instance of a sparse UnionType using ``pa.sparse_union``: + + >>> pa.sparse_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + SparseUnionType(sparse_union) + """ + + +cdef class DenseUnionType(UnionType): + """ + Concrete class for dense union types. + + Examples + -------- + Create an instance of a dense UnionType using ``pa.union``: + + >>> import pyarrow as pa + >>> pa.union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())], + ... mode=pa.lib.UnionMode_DENSE), + (DenseUnionType(dense_union),) + + Create an instance of a dense UnionType using ``pa.dense_union``: + + >>> pa.dense_union([pa.field('a', pa.binary(10)), pa.field('b', pa.string())]) + DenseUnionType(dense_union) + """ + + +cdef class TimestampType(DataType): + """ + Concrete class for timestamp data types. + + Examples + -------- + >>> import pyarrow as pa + + Create an instance of timestamp type: + + >>> pa.timestamp('us') + TimestampType(timestamp[us]) + + Create an instance of timestamp type with timezone: + + >>> pa.timestamp('s', tz='UTC') + TimestampType(timestamp[s, tz=UTC]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.ts_type = type.get() + + @property + def unit(self): + """ + The timestamp unit ('s', 'ms', 'us' or 'ns'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.timestamp('us') + >>> t.unit + 'us' + """ + return timeunit_to_string(self.ts_type.unit()) + + @property + def tz(self): + """ + The timestamp time zone, if any, or None. + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.timestamp('s', tz='UTC') + >>> t.tz + 'UTC' + """ + if self.ts_type.timezone().size() > 0: + return frombytes(self.ts_type.timezone()) + else: + return None + + def __reduce__(self): + return timestamp, (self.unit, self.tz) + + +cdef class Time32Type(DataType): + """ + Concrete class for time32 data types. + + Supported time unit resolutions are 's' [second] + and 'ms' [millisecond]. + + Examples + -------- + Create an instance of time32 type: + + >>> import pyarrow as pa + >>> pa.time32('ms') + Time32Type(time32[ms]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.time_type = type.get() + + @property + def unit(self): + """ + The time unit ('s' or 'ms'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.time32('ms') + >>> t.unit + 'ms' + """ + return timeunit_to_string(self.time_type.unit()) + + +cdef class Time64Type(DataType): + """ + Concrete class for time64 data types. + + Supported time unit resolutions are 'us' [microsecond] + and 'ns' [nanosecond]. + + Examples + -------- + Create an instance of time64 type: + + >>> import pyarrow as pa + >>> pa.time64('us') + Time64Type(time64[us]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.time_type = type.get() + + @property + def unit(self): + """ + The time unit ('us' or 'ns'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.time64('us') + >>> t.unit + 'us' + """ + return timeunit_to_string(self.time_type.unit()) + + +cdef class DurationType(DataType): + """ + Concrete class for duration data types. + + Examples + -------- + Create an instance of duration type: + + >>> import pyarrow as pa + >>> pa.duration('s') + DurationType(duration[s]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.duration_type = type.get() + + @property + def unit(self): + """ + The duration unit ('s', 'ms', 'us' or 'ns'). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.duration('s') + >>> t.unit + 's' + """ + return timeunit_to_string(self.duration_type.unit()) + + +cdef class FixedSizeBinaryType(DataType): + """ + Concrete class for fixed-size binary data types. + + Examples + -------- + Create an instance of fixed-size binary type: + + >>> import pyarrow as pa + >>> pa.binary(3) + FixedSizeBinaryType(fixed_size_binary[3]) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.fixed_size_binary_type = ( + type.get()) + + def __reduce__(self): + return binary, (self.byte_width,) + + @property + def byte_width(self): + """ + The binary size in bytes. + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.binary(3) + >>> t.byte_width + 3 + """ + return self.fixed_size_binary_type.byte_width() + + +cdef class Decimal128Type(FixedSizeBinaryType): + """ + Concrete class for decimal128 data types. + + Examples + -------- + Create an instance of decimal128 type: + + >>> import pyarrow as pa + >>> pa.decimal128(5, 2) + Decimal128Type(decimal128(5, 2)) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + FixedSizeBinaryType.init(self, type) + self.decimal128_type = type.get() + + def __reduce__(self): + return decimal128, (self.precision, self.scale) + + @property + def precision(self): + """ + The decimal precision, in number of decimal digits (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal128(5, 2) + >>> t.precision + 5 + """ + return self.decimal128_type.precision() + + @property + def scale(self): + """ + The decimal scale (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal128(5, 2) + >>> t.scale + 2 + """ + return self.decimal128_type.scale() + + +cdef class Decimal256Type(FixedSizeBinaryType): + """ + Concrete class for decimal256 data types. + + Examples + -------- + Create an instance of decimal256 type: + + >>> import pyarrow as pa + >>> pa.decimal256(76, 38) + Decimal256Type(decimal256(76, 38)) + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + FixedSizeBinaryType.init(self, type) + self.decimal256_type = type.get() + + def __reduce__(self): + return decimal256, (self.precision, self.scale) + + @property + def precision(self): + """ + The decimal precision, in number of decimal digits (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal256(76, 38) + >>> t.precision + 76 + """ + return self.decimal256_type.precision() + + @property + def scale(self): + """ + The decimal scale (an integer). + + Examples + -------- + >>> import pyarrow as pa + >>> t = pa.decimal256(76, 38) + >>> t.scale + 38 + """ + return self.decimal256_type.scale() + + +cdef class RunEndEncodedType(DataType): + """ + Concrete class for run-end encoded types. + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.run_end_encoded_type = type.get() + + def __reduce__(self): + return run_end_encoded, (self.run_end_type, self.value_type) + + @property + def run_end_type(self): + return pyarrow_wrap_data_type(self.run_end_encoded_type.run_end_type()) + + @property + def value_type(self): + return pyarrow_wrap_data_type(self.run_end_encoded_type.value_type()) + + +cdef class BaseExtensionType(DataType): + """ + Concrete base class for extension types. + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + DataType.init(self, type) + self.ext_type = type.get() + + def __arrow_ext_class__(self): + """ + The associated array extension class + """ + return ExtensionArray + + def __arrow_ext_scalar_class__(self): + """ + The associated scalar class + """ + return ExtensionScalar + + @property + def extension_name(self): + """ + The extension type name. + """ + return frombytes(self.ext_type.extension_name()) + + @property + def storage_type(self): + """ + The underlying storage type. + """ + return pyarrow_wrap_data_type(self.ext_type.storage_type()) + + def wrap_array(self, storage): + """ + Wrap the given storage array as an extension array. + + Parameters + ---------- + storage : Array or ChunkedArray + + Returns + ------- + array : Array or ChunkedArray + Extension array wrapping the storage array + """ + cdef: + shared_ptr[CDataType] c_storage_type + + if isinstance(storage, Array): + c_storage_type = ( storage).ap.type() + elif isinstance(storage, ChunkedArray): + c_storage_type = ( storage).chunked_array.type() + else: + raise TypeError( + f"Expected array or chunked array, got {storage.__class__}") + + if not c_storage_type.get().Equals(deref(self.ext_type) + .storage_type(), False): + raise TypeError( + f"Incompatible storage type for {self}: " + f"expected {self.storage_type}, got {storage.type}") + + if isinstance(storage, Array): + return pyarrow_wrap_array( + self.ext_type.WrapArray( + self.sp_type, ( storage).sp_array)) + else: + return pyarrow_wrap_chunked_array( + self.ext_type.WrapArray( + self.sp_type, ( storage).sp_chunked_array)) + + +cdef class ExtensionType(BaseExtensionType): + """ + Concrete base class for Python-defined extension types. + + Parameters + ---------- + storage_type : DataType + The underlying storage type for the extension type. + extension_name : str + A unique name distinguishing this extension type. The name will be + used when deserializing IPC data. + + Examples + -------- + Define a UuidType extension type subclassing ExtensionType: + + >>> import pyarrow as pa + >>> class UuidType(pa.ExtensionType): + ... def __init__(self): + ... pa.ExtensionType.__init__(self, pa.binary(16), "my_package.uuid") + ... def __arrow_ext_serialize__(self): + ... # since we don't have a parameterized type, we don't need extra + ... # metadata to be deserialized + ... return b'' + ... @classmethod + ... def __arrow_ext_deserialize__(self, storage_type, serialized): + ... # return an instance of this subclass given the serialized + ... # metadata. + ... return UuidType() + ... + + Register the extension type: + + >>> pa.register_extension_type(UuidType()) + + Create an instance of UuidType extension type: + + >>> uuid_type = UuidType() + + Inspect the extension type: + + >>> uuid_type.extension_name + 'my_package.uuid' + >>> uuid_type.storage_type + FixedSizeBinaryType(fixed_size_binary[16]) + + Wrap an array as an extension array: + + >>> import uuid + >>> storage_array = pa.array([uuid.uuid4().bytes for _ in range(4)], pa.binary(16)) + >>> uuid_type.wrap_array(storage_array) + + [ + ... + ] + + Or do the same with creating an ExtensionArray: + + >>> pa.ExtensionArray.from_storage(uuid_type, storage_array) + + [ + ... + ] + + Unregister the extension type: + + >>> pa.unregister_extension_type("my_package.uuid") + """ + + def __cinit__(self): + if type(self) is ExtensionType: + raise TypeError("Can only instantiate subclasses of " + "ExtensionType") + + def __init__(self, DataType storage_type, extension_name): + """ + Initialize an extension type instance. + + This should be called at the end of the subclass' + ``__init__`` method. + """ + cdef: + shared_ptr[CExtensionType] cpy_ext_type + c_string c_extension_name + + c_extension_name = tobytes(extension_name) + + assert storage_type is not None + check_status(CPyExtensionType.FromClass( + storage_type.sp_type, c_extension_name, type(self), + &cpy_ext_type)) + self.init( cpy_ext_type) + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + BaseExtensionType.init(self, type) + self.cpy_ext_type = type.get() + # Store weakref and serialized version of self on C++ type instance + check_status(self.cpy_ext_type.SetInstance(self)) + + def __eq__(self, other): + # Default implementation to avoid infinite recursion through + # DataType.__eq__ -> ExtensionType::ExtensionEquals -> DataType.__eq__ + if isinstance(other, ExtensionType): + return (type(self) == type(other) and + self.extension_name == other.extension_name and + self.storage_type == other.storage_type) + else: + return NotImplemented + + def __repr__(self): + fmt = '{0.__class__.__name__}({1})' + return fmt.format(self, repr(self.storage_type)) + + def __arrow_ext_serialize__(self): + """ + Serialized representation of metadata to reconstruct the type object. + + This method should return a bytes object, and those serialized bytes + are stored in the custom metadata of the Field holding an extension + type in an IPC message. + The bytes are passed to ``__arrow_ext_deserialize`` and should hold + sufficient information to reconstruct the data type instance. + """ + return NotImplementedError + + @classmethod + def __arrow_ext_deserialize__(self, storage_type, serialized): + """ + Return an extension type instance from the storage type and serialized + metadata. + + This method should return an instance of the ExtensionType subclass + that matches the passed storage type and serialized metadata (the + return value of ``__arrow_ext_serialize__``). + """ + return NotImplementedError + + def __reduce__(self): + return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) + + def __arrow_ext_class__(self): + """Return an extension array class to be used for building or + deserializing arrays with this extension type. + + This method should return a subclass of the ExtensionArray class. By + default, if not specialized in the extension implementation, an + extension type array will be a built-in ExtensionArray instance. + """ + return ExtensionArray + + def __arrow_ext_scalar_class__(self): + """Return an extension scalar class for building scalars with this + extension type. + + This method should return subclass of the ExtensionScalar class. By + default, if not specialized in the extension implementation, an + extension type scalar will be a built-in ExtensionScalar instance. + """ + return ExtensionScalar + + +cdef class FixedShapeTensorType(BaseExtensionType): + """ + Concrete class for fixed shape tensor extension type. + + Examples + -------- + Create an instance of fixed shape tensor extension type: + + >>> import pyarrow as pa + >>> pa.fixed_shape_tensor(pa.int32(), [2, 2]) + FixedShapeTensorType(extension) + + Create an instance of fixed shape tensor extension type with + permutation: + + >>> tensor_type = pa.fixed_shape_tensor(pa.int8(), (2, 2, 3), + ... permutation=[0, 2, 1]) + >>> tensor_type.permutation + [0, 2, 1] + """ + + cdef void init(self, const shared_ptr[CDataType]& type) except *: + BaseExtensionType.init(self, type) + self.tensor_ext_type = type.get() + + @property + def value_type(self): + """ + Data type of an individual tensor. + """ + return pyarrow_wrap_data_type(self.tensor_ext_type.value_type()) + + @property + def shape(self): + """ + Shape of the tensors. + """ + return self.tensor_ext_type.shape() + + @property + def dim_names(self): + """ + Explicit names of the dimensions. + """ + list_of_bytes = self.tensor_ext_type.dim_names() + if len(list_of_bytes) != 0: + return [frombytes(x) for x in list_of_bytes] + else: + return None + + @property + def permutation(self): + """ + Indices of the dimensions ordering. + """ + indices = self.tensor_ext_type.permutation() + if len(indices) != 0: + return indices + else: + return None + + def __arrow_ext_serialize__(self): + """ + Serialized representation of metadata to reconstruct the type object. + """ + return self.tensor_ext_type.Serialize() + + @classmethod + def __arrow_ext_deserialize__(self, storage_type, serialized): + """ + Return an FixedShapeTensor type instance from the storage type and serialized + metadata. + """ + return self.tensor_ext_type.Deserialize(storage_type, serialized) + + def __arrow_ext_class__(self): + return FixedShapeTensorArray + + def __reduce__(self): + return fixed_shape_tensor, (self.value_type, self.shape, + self.dim_names, self.permutation) + + +_py_extension_type_auto_load = False + + +cdef class PyExtensionType(ExtensionType): + """ + Concrete base class for Python-defined extension types based on pickle + for (de)serialization. + + .. warning:: + This class is deprecated and its deserialization is disabled by default. + :class:`ExtensionType` is recommended instead. + + Parameters + ---------- + storage_type : DataType + The storage type for which the extension is built. + """ + + def __cinit__(self): + if type(self) is PyExtensionType: + raise TypeError("Can only instantiate subclasses of " + "PyExtensionType") + + def __init__(self, DataType storage_type): + warnings.warn( + "pyarrow.PyExtensionType is deprecated " + "and will refuse deserialization by default. " + "Instead, please derive from pyarrow.ExtensionType and implement " + "your own serialization mechanism.", + FutureWarning) + ExtensionType.__init__(self, storage_type, "arrow.py_extension_type") + + def __reduce__(self): + raise NotImplementedError("Please implement {0}.__reduce__" + .format(type(self).__name__)) + + def __arrow_ext_serialize__(self): + return pickle.dumps(self) + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + if not _py_extension_type_auto_load: + warnings.warn( + "pickle-based deserialization of pyarrow.PyExtensionType subclasses " + "is disabled by default; if you only ingest " + "trusted data files, you may re-enable this using " + "`pyarrow.PyExtensionType.set_auto_load(True)`.\n" + "In the future, Python-defined extension subclasses should " + "derive from pyarrow.ExtensionType (not pyarrow.PyExtensionType) " + "and implement their own serialization mechanism.\n", + RuntimeWarning) + return UnknownExtensionType(storage_type, serialized) + try: + ty = pickle.loads(serialized) + except Exception: + # For some reason, it's impossible to deserialize the + # ExtensionType instance. Perhaps the serialized data is + # corrupt, or more likely the type is being deserialized + # in an environment where the original Python class or module + # is not available. Fall back on a generic BaseExtensionType. + return UnknownExtensionType(storage_type, serialized) + + if ty.storage_type != storage_type: + raise TypeError("Expected storage type {0} but got {1}" + .format(ty.storage_type, storage_type)) + return ty + + # XXX Cython marks extension types as immutable, so cannot expose this + # as a writable class attribute. + @classmethod + def set_auto_load(cls, value): + """ + Enable or disable auto-loading of serialized PyExtensionType instances. + + Parameters + ---------- + value : bool + Whether to enable auto-loading. + """ + global _py_extension_type_auto_load + assert isinstance(value, bool) + _py_extension_type_auto_load = value + + +cdef class UnknownExtensionType(PyExtensionType): + """ + A concrete class for Python-defined extension types that refer to + an unknown Python implementation. + + Parameters + ---------- + storage_type : DataType + The storage type for which the extension is built. + serialized : bytes + The serialised output. + """ + + cdef: + bytes serialized + + def __init__(self, DataType storage_type, serialized): + self.serialized = serialized + PyExtensionType.__init__(self, storage_type) + + def __arrow_ext_serialize__(self): + return self.serialized + + +_python_extension_types_registry = [] + + +def register_extension_type(ext_type): + """ + Register a Python extension type. + + Registration is based on the extension name (so different registered types + need unique extension names). Registration needs an extension type + instance, but then works for any instance of the same subclass regardless + of parametrization of the type. + + Parameters + ---------- + ext_type : BaseExtensionType instance + The ExtensionType subclass to register. + + Examples + -------- + Define a UuidType extension type subclassing ExtensionType: + + >>> import pyarrow as pa + >>> class UuidType(pa.ExtensionType): + ... def __init__(self): + ... pa.ExtensionType.__init__(self, pa.binary(16), "my_package.uuid") + ... def __arrow_ext_serialize__(self): + ... # since we don't have a parameterized type, we don't need extra + ... # metadata to be deserialized + ... return b'' + ... @classmethod + ... def __arrow_ext_deserialize__(self, storage_type, serialized): + ... # return an instance of this subclass given the serialized + ... # metadata. + ... return UuidType() + ... + + Register the extension type: + + >>> pa.register_extension_type(UuidType()) + + Unregister the extension type: + + >>> pa.unregister_extension_type("my_package.uuid") + """ + cdef: + DataType _type = ensure_type(ext_type, allow_none=False) + + if not isinstance(_type, BaseExtensionType): + raise TypeError("Only extension types can be registered") + + # register on the C++ side + check_status( + RegisterPyExtensionType( _type.sp_type)) + + # register on the python side + _python_extension_types_registry.append(_type) + + +def unregister_extension_type(type_name): + """ + Unregister a Python extension type. + + Parameters + ---------- + type_name : str + The name of the ExtensionType subclass to unregister. + + Examples + -------- + Define a UuidType extension type subclassing ExtensionType: + + >>> import pyarrow as pa + >>> class UuidType(pa.ExtensionType): + ... def __init__(self): + ... pa.ExtensionType.__init__(self, pa.binary(16), "my_package.uuid") + ... def __arrow_ext_serialize__(self): + ... # since we don't have a parameterized type, we don't need extra + ... # metadata to be deserialized + ... return b'' + ... @classmethod + ... def __arrow_ext_deserialize__(self, storage_type, serialized): + ... # return an instance of this subclass given the serialized + ... # metadata. + ... return UuidType() + ... + + Register the extension type: + + >>> pa.register_extension_type(UuidType()) + + Unregister the extension type: + + >>> pa.unregister_extension_type("my_package.uuid") + """ + cdef: + c_string c_type_name = tobytes(type_name) + check_status(UnregisterPyExtensionType(c_type_name)) + + +cdef class KeyValueMetadata(_Metadata, Mapping): + """ + KeyValueMetadata + + Parameters + ---------- + __arg0__ : dict + A dict of the key-value metadata + **kwargs : optional + additional key-value metadata + """ + + def __init__(self, __arg0__=None, **kwargs): + cdef: + vector[c_string] keys, values + shared_ptr[const CKeyValueMetadata] result + + items = [] + if __arg0__ is not None: + other = (__arg0__.items() if isinstance(__arg0__, Mapping) + else __arg0__) + items.extend((tobytes(k), v) for k, v in other) + + prior_keys = {k for k, v in items} + for k, v in kwargs.items(): + k = tobytes(k) + if k in prior_keys: + raise KeyError("Duplicate key {}, " + "use pass all items as list of tuples if you " + "intend to have duplicate keys") + items.append((k, v)) + + keys.reserve(len(items)) + for key, value in items: + keys.push_back(tobytes(key)) + values.push_back(tobytes(value)) + result.reset(new CKeyValueMetadata(move(keys), move(values))) + self.init(result) + + cdef void init(self, const shared_ptr[const CKeyValueMetadata]& wrapped): + self.wrapped = wrapped + self.metadata = wrapped.get() + + @staticmethod + cdef wrap(const shared_ptr[const CKeyValueMetadata]& sp): + cdef KeyValueMetadata self = KeyValueMetadata.__new__(KeyValueMetadata) + self.init(sp) + return self + + cdef inline shared_ptr[const CKeyValueMetadata] unwrap(self) nogil: + return self.wrapped + + def equals(self, KeyValueMetadata other): + """ + Parameters + ---------- + other : pyarrow.KeyValueMetadata + + Returns + ------- + bool + """ + return self.metadata.Equals(deref(other.wrapped)) + + def __repr__(self): + return str(self) + + def __str__(self): + return frombytes(self.metadata.ToString(), safe=True) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + pass + + if isinstance(other, Mapping): + try: + other = KeyValueMetadata(other) + return self.equals(other) + except TypeError: + pass + + return NotImplemented + + def __len__(self): + return self.metadata.size() + + def __contains__(self, key): + return self.metadata.Contains(tobytes(key)) + + def __getitem__(self, key): + return GetResultValue(self.metadata.Get(tobytes(key))) + + def __iter__(self): + return self.keys() + + def __reduce__(self): + return KeyValueMetadata, (list(self.items()),) + + def key(self, i): + """ + Parameters + ---------- + i : int + + Returns + ------- + byte + """ + return self.metadata.key(i) + + def value(self, i): + """ + Parameters + ---------- + i : int + + Returns + ------- + byte + """ + return self.metadata.value(i) + + def keys(self): + for i in range(self.metadata.size()): + yield self.metadata.key(i) + + def values(self): + for i in range(self.metadata.size()): + yield self.metadata.value(i) + + def items(self): + for i in range(self.metadata.size()): + yield (self.metadata.key(i), self.metadata.value(i)) + + def get_all(self, key): + """ + Parameters + ---------- + key : str + + Returns + ------- + list[byte] + """ + key = tobytes(key) + return [v for k, v in self.items() if k == key] + + def to_dict(self): + """ + Convert KeyValueMetadata to dict. If a key occurs twice, the value for + the first one is returned + """ + cdef object key # to force coercion to Python + result = ordered_dict() + for i in range(self.metadata.size()): + key = self.metadata.key(i) + if key not in result: + result[key] = self.metadata.value(i) + return result + + +cpdef KeyValueMetadata ensure_metadata(object meta, c_bool allow_none=False): + if allow_none and meta is None: + return None + elif isinstance(meta, KeyValueMetadata): + return meta + else: + return KeyValueMetadata(meta) + + +cdef class Field(_Weakrefable): + """ + A named field, with a data type, nullability, and optional metadata. + + Notes + ----- + Do not use this class's constructor directly; use pyarrow.field + + Examples + -------- + Create an instance of pyarrow.Field: + + >>> import pyarrow as pa + >>> pa.field('key', pa.int32()) + pyarrow.Field + >>> pa.field('key', pa.int32(), nullable=False) + pyarrow.Field + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field + pyarrow.Field + >>> field.metadata + {b'key': b'Something important'} + + Use the field to create a struct type: + + >>> pa.struct([field]) + StructType(struct) + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call Field's constructor directly, use " + "`pyarrow.field` instead.") + + cdef void init(self, const shared_ptr[CField]& field): + self.sp_field = field + self.field = field.get() + self.type = pyarrow_wrap_data_type(field.get().type()) + + def equals(self, Field other, bint check_metadata=False): + """ + Test if this field is equal to the other + + Parameters + ---------- + other : pyarrow.Field + check_metadata : bool, default False + Whether Field metadata equality should be checked as well. + + Returns + ------- + is_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> f1 = pa.field('key', pa.int32()) + >>> f2 = pa.field('key', pa.int32(), nullable=False) + >>> f1.equals(f2) + False + >>> f1.equals(f1) + True + """ + return self.field.Equals(deref(other.field), check_metadata) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __reduce__(self): + return field, (self.name, self.type, self.nullable, self.metadata) + + def __str__(self): + return 'pyarrow.Field<{0}>'.format( + frombytes(self.field.ToString(), safe=True)) + + def __repr__(self): + return self.__str__() + + def __hash__(self): + return hash((self.field.name(), self.type, self.field.nullable())) + + @property + def nullable(self): + """ + The field nullability. + + Examples + -------- + >>> import pyarrow as pa + >>> f1 = pa.field('key', pa.int32()) + >>> f2 = pa.field('key', pa.int32(), nullable=False) + >>> f1.nullable + True + >>> f2.nullable + False + """ + return self.field.nullable() + + @property + def name(self): + """ + The field name. + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field.name + 'key' + """ + return frombytes(self.field.name()) + + @property + def metadata(self): + """ + The field metadata. + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field.metadata + {b'key': b'Something important'} + """ + wrapped = pyarrow_wrap_metadata(self.field.metadata()) + if wrapped is not None: + return wrapped.to_dict() + else: + return wrapped + + def with_metadata(self, metadata): + """ + Add metadata as dict of string keys and values to Field + + Parameters + ---------- + metadata : dict + Keys and values must be string-like / coercible to bytes + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + + Create new field by adding metadata to existing one: + + >>> field_new = field.with_metadata({"key": "Something important"}) + >>> field_new + pyarrow.Field + >>> field_new.metadata + {b'key': b'Something important'} + """ + cdef shared_ptr[CField] c_field + + meta = ensure_metadata(metadata, allow_none=False) + with nogil: + c_field = self.field.WithMetadata(meta.unwrap()) + + return pyarrow_wrap_field(c_field) + + def remove_metadata(self): + """ + Create new field without metadata, if any + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field.metadata + {b'key': b'Something important'} + + Create new field by removing the metadata from the existing one: + + >>> field_new = field.remove_metadata() + >>> field_new.metadata + """ + cdef shared_ptr[CField] new_field + with nogil: + new_field = self.field.RemoveMetadata() + return pyarrow_wrap_field(new_field) + + def with_type(self, DataType new_type): + """ + A copy of this field with the replaced type + + Parameters + ---------- + new_type : pyarrow.DataType + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field + pyarrow.Field + + Create new field by replacing type of an existing one: + + >>> field_new = field.with_type(pa.int64()) + >>> field_new + pyarrow.Field + """ + cdef: + shared_ptr[CField] c_field + shared_ptr[CDataType] c_datatype + + c_datatype = pyarrow_unwrap_data_type(new_type) + with nogil: + c_field = self.field.WithType(c_datatype) + + return pyarrow_wrap_field(c_field) + + def with_name(self, name): + """ + A copy of this field with the replaced name + + Parameters + ---------- + name : str + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field + pyarrow.Field + + Create new field by replacing the name of an existing one: + + >>> field_new = field.with_name('lock') + >>> field_new + pyarrow.Field + """ + cdef: + shared_ptr[CField] c_field + + c_field = self.field.WithName(tobytes(name)) + + return pyarrow_wrap_field(c_field) + + def with_nullable(self, nullable): + """ + A copy of this field with the replaced nullability + + Parameters + ---------- + nullable : bool + + Returns + ------- + field: pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> field = pa.field('key', pa.int32()) + >>> field + pyarrow.Field + >>> field.nullable + True + + Create new field by replacing the nullability of an existing one: + + >>> field_new = field.with_nullable(False) + >>> field_new + pyarrow.Field + >>> field_new.nullable + False + """ + cdef: + shared_ptr[CField] field + c_bool c_nullable + + c_nullable = bool(nullable) + with nogil: + c_field = self.field.WithNullable(c_nullable) + + return pyarrow_wrap_field(c_field) + + def flatten(self): + """ + Flatten this field. If a struct field, individual child fields + will be returned with their names prefixed by the parent's name. + + Returns + ------- + fields : List[pyarrow.Field] + + Examples + -------- + >>> import pyarrow as pa + >>> f1 = pa.field('bar', pa.float64(), nullable=False) + >>> f2 = pa.field('foo', pa.int32()).with_metadata({"key": "Something important"}) + >>> ff = pa.field('ff', pa.struct([f1, f2]), nullable=False) + + Flatten a struct field: + + >>> ff + pyarrow.Field not null> + >>> ff.flatten() + [pyarrow.Field, pyarrow.Field] + """ + cdef vector[shared_ptr[CField]] flattened + with nogil: + flattened = self.field.Flatten() + return [pyarrow_wrap_field(f) for f in flattened] + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowSchema struct, given its pointer. + + Be careful: if you don't pass the ArrowSchema struct to a consumer, + its memory will leak. This is a low-level function intended for + expert users. + """ + check_status(ExportField(deref(self.field), + _as_c_pointer(out_ptr))) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import Field from a C ArrowSchema struct, given its pointer. + + This is a low-level function intended for expert users. + """ + cdef void* c_ptr = _as_c_pointer(in_ptr) + with nogil: + result = GetResultValue(ImportField( c_ptr)) + return pyarrow_wrap_field(result) + + def __arrow_c_schema__(self): + """ + Export to a ArrowSchema PyCapsule + + Unlike _export_to_c, this will not leak memory if the capsule is not used. + """ + cdef ArrowSchema* c_schema + capsule = alloc_c_schema(&c_schema) + + with nogil: + check_status(ExportField(deref(self.field), c_schema)) + + return capsule + + @staticmethod + def _import_from_c_capsule(schema): + """ + Import a Field from a ArrowSchema PyCapsule + + Parameters + ---------- + schema : PyCapsule + A valid PyCapsule with name 'arrow_schema' containing an + ArrowSchema pointer. + """ + cdef: + ArrowSchema* c_schema + shared_ptr[CField] c_field + + if not PyCapsule_IsValid(schema, 'arrow_schema'): + raise ValueError( + "Not an ArrowSchema object" + ) + c_schema = PyCapsule_GetPointer(schema, 'arrow_schema') + + with nogil: + c_field = GetResultValue(ImportField(c_schema)) + + return pyarrow_wrap_field(c_field) + + +cdef class Schema(_Weakrefable): + """ + A named collection of types a.k.a schema. A schema defines the + column names and types in a record batch or table data structure. + They also contain metadata about the columns. For example, schemas + converted from Pandas contain metadata about their original Pandas + types so they can be converted back to the same types. + + Warnings + -------- + Do not call this class's constructor directly. Instead use + :func:`pyarrow.schema` factory function which makes a new Arrow + Schema object. + + Examples + -------- + Create a new Arrow Schema object: + + >>> import pyarrow as pa + >>> pa.schema([ + ... ('some_int', pa.int32()), + ... ('some_string', pa.string()) + ... ]) + some_int: int32 + some_string: string + + Create Arrow Schema with metadata: + + >>> pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + + def __cinit__(self): + pass + + def __init__(self): + raise TypeError("Do not call Schema's constructor directly, use " + "`pyarrow.schema` instead.") + + def __len__(self): + return self.schema.num_fields() + + def __getitem__(self, key): + # access by integer index + return self._field(key) + + def __iter__(self): + for i in range(len(self)): + yield self[i] + + cdef void init(self, const vector[shared_ptr[CField]]& fields): + self.schema = new CSchema(fields) + self.sp_schema.reset(self.schema) + + cdef void init_schema(self, const shared_ptr[CSchema]& schema): + self.schema = schema.get() + self.sp_schema = schema + + def __reduce__(self): + return schema, (list(self), self.metadata) + + def __hash__(self): + return hash((tuple(self), self.metadata)) + + def __sizeof__(self): + size = 0 + if self.metadata: + for key, value in self.metadata.items(): + size += sys.getsizeof(key) + size += sys.getsizeof(value) + + return size + super(Schema, self).__sizeof__() + + @property + def pandas_metadata(self): + """ + Return deserialized-from-JSON pandas metadata field (if it exists) + + Examples + -------- + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'n_legs': [2, 4, 5, 100], + ... 'animals': ["Flamingo", "Horse", "Brittle stars", "Centipede"]}) + >>> schema = pa.Table.from_pandas(df).schema + + Select pandas metadata field from Arrow Schema: + + >>> schema.pandas_metadata + {'index_columns': [{'kind': 'range', 'name': None, 'start': 0, 'stop': 4, 'step': 1}], ... + """ + metadata = self.metadata + key = b'pandas' + if metadata is None or key not in metadata: + return None + + import json + return json.loads(metadata[key].decode('utf8')) + + @property + def names(self): + """ + The schema's field names. + + Returns + ------- + list of str + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Get the names of the schema's fields: + + >>> schema.names + ['n_legs', 'animals'] + """ + cdef int i + result = [] + for i in range(self.schema.num_fields()): + name = frombytes(self.schema.field(i).get().name()) + result.append(name) + return result + + @property + def types(self): + """ + The schema's field types. + + Returns + ------- + list of DataType + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Get the types of the schema's fields: + + >>> schema.types + [DataType(int64), DataType(string)] + """ + return [field.type for field in self] + + @property + def metadata(self): + """ + The schema's metadata. + + Returns + ------- + metadata: dict + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + + Get the metadata of the schema's fields: + + >>> schema.metadata + {b'n_legs': b'Number of legs per animal'} + """ + wrapped = pyarrow_wrap_metadata(self.schema.metadata()) + if wrapped is not None: + return wrapped.to_dict() + else: + return wrapped + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def empty_table(self): + """ + Provide an empty table according to the schema. + + Returns + ------- + table: pyarrow.Table + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Create an empty table with schema's fields: + + >>> schema.empty_table() + pyarrow.Table + n_legs: int64 + animals: string + ---- + n_legs: [[]] + animals: [[]] + """ + arrays = [_empty_array(field.type) for field in self] + return Table.from_arrays(arrays, schema=self) + + def equals(self, Schema other not None, bint check_metadata=False): + """ + Test if this schema is equal to the other + + Parameters + ---------- + other : pyarrow.Schema + check_metadata : bool, default False + Key/value metadata must be equal too + + Returns + ------- + is_equal : bool + + Examples + -------- + >>> import pyarrow as pa + >>> schema1 = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> schema2 = pa.schema([ + ... ('some_int', pa.int32()), + ... ('some_string', pa.string()) + ... ]) + + Test two equal schemas: + + >>> schema1.equals(schema1) + True + + Test two unequal schemas: + + >>> schema1.equals(schema2) + False + """ + return self.sp_schema.get().Equals(deref(other.schema), + check_metadata) + + @classmethod + def from_pandas(cls, df, preserve_index=None): + """ + Returns implied schema from dataframe + + Parameters + ---------- + df : pandas.DataFrame + preserve_index : bool, default True + Whether to store the index as an additional column (or columns, for + MultiIndex) in the resulting `Table`. + The default of None will store the index as a column, except for + RangeIndex which is stored as metadata only. Use + ``preserve_index=True`` to force it to be stored as a column. + + Returns + ------- + pyarrow.Schema + + Examples + -------- + >>> import pandas as pd + >>> import pyarrow as pa + >>> df = pd.DataFrame({ + ... 'int': [1, 2], + ... 'str': ['a', 'b'] + ... }) + + Create an Arrow Schema from the schema of a pandas dataframe: + + >>> pa.Schema.from_pandas(df) + int: int64 + str: string + -- schema metadata -- + pandas: '{"index_columns": [{"kind": "range", "name": null, ... + """ + from pyarrow.pandas_compat import dataframe_to_types + names, types, metadata = dataframe_to_types( + df, + preserve_index=preserve_index + ) + fields = [] + for name, type_ in zip(names, types): + fields.append(field(name, type_)) + return schema(fields, metadata) + + def field(self, i): + """ + Select a field by its column name or numeric index. + + Parameters + ---------- + i : int or string + + Returns + ------- + pyarrow.Field + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Select the second field: + + >>> schema.field(1) + pyarrow.Field + + Select the field of the column named 'n_legs': + + >>> schema.field('n_legs') + pyarrow.Field + """ + if isinstance(i, (bytes, str)): + field_index = self.get_field_index(i) + if field_index < 0: + raise KeyError("Column {} does not exist in schema".format(i)) + else: + return self._field(field_index) + elif isinstance(i, int): + return self._field(i) + else: + raise TypeError("Index must either be string or integer") + + def _field(self, int i): + """ + Select a field by its numeric index. + + Parameters + ---------- + i : int + + Returns + ------- + pyarrow.Field + """ + cdef int index = _normalize_index(i, self.schema.num_fields()) + return pyarrow_wrap_field(self.schema.field(index)) + + def field_by_name(self, name): + """ + DEPRECATED + + Parameters + ---------- + name : str + + Returns + ------- + field: pyarrow.Field + """ + cdef: + vector[shared_ptr[CField]] results + + warnings.warn( + "The 'field_by_name' method is deprecated, use 'field' instead", + FutureWarning, stacklevel=2) + + results = self.schema.GetAllFieldsByName(tobytes(name)) + if results.size() == 0: + return None + elif results.size() > 1: + warnings.warn("Schema field name corresponds to more " + "than one field", UserWarning) + return None + else: + return pyarrow_wrap_field(results[0]) + + def get_field_index(self, name): + """ + Return index of the unique field with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + index : int + The index of the field with the given name; -1 if the + name isn't found or there are several fields with the given + name. + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Get the index of the field named 'animals': + + >>> schema.get_field_index("animals") + 1 + + Index in case of several fields with the given name: + + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string()), + ... pa.field('animals', pa.bool_())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> schema.get_field_index("animals") + -1 + """ + return self.schema.GetFieldIndex(tobytes(name)) + + def get_all_field_indices(self, name): + """ + Return sorted list of indices for the fields with the given name. + + Parameters + ---------- + name : str + The name of the field to look up. + + Returns + ------- + indices : List[int] + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string()), + ... pa.field('animals', pa.bool_())]) + + Get the indexes of the fields named 'animals': + + >>> schema.get_all_field_indices("animals") + [1, 2] + """ + return self.schema.GetAllFieldIndices(tobytes(name)) + + def append(self, Field field): + """ + Append a field at the end of the schema. + + In contrast to Python's ``list.append()`` it does return a new + object, leaving the original Schema unmodified. + + Parameters + ---------- + field : Field + + Returns + ------- + schema: Schema + New object with appended field. + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Append a field 'extra' at the end of the schema: + + >>> schema_new = schema.append(pa.field('extra', pa.bool_())) + >>> schema_new + n_legs: int64 + animals: string + extra: bool + + Original schema is unmodified: + + >>> schema + n_legs: int64 + animals: string + """ + return self.insert(self.schema.num_fields(), field) + + def insert(self, int i, Field field): + """ + Add a field at position i to the schema. + + Parameters + ---------- + i : int + field : Field + + Returns + ------- + schema: Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Insert a new field on the second position: + + >>> schema.insert(1, pa.field('extra', pa.bool_())) + n_legs: int64 + extra: bool + animals: string + """ + cdef: + shared_ptr[CSchema] new_schema + shared_ptr[CField] c_field + + c_field = field.sp_field + + with nogil: + new_schema = GetResultValue(self.schema.AddField(i, c_field)) + + return pyarrow_wrap_schema(new_schema) + + def remove(self, int i): + """ + Remove the field at index i from the schema. + + Parameters + ---------- + i : int + + Returns + ------- + schema: Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Remove the second field of the schema: + + >>> schema.remove(1) + n_legs: int64 + """ + cdef shared_ptr[CSchema] new_schema + + with nogil: + new_schema = GetResultValue(self.schema.RemoveField(i)) + + return pyarrow_wrap_schema(new_schema) + + def set(self, int i, Field field): + """ + Replace a field at position i in the schema. + + Parameters + ---------- + i : int + field : Field + + Returns + ------- + schema: Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Replace the second field of the schema with a new field 'extra': + + >>> schema.set(1, pa.field('replaced', pa.bool_())) + n_legs: int64 + replaced: bool + """ + cdef: + shared_ptr[CSchema] new_schema + shared_ptr[CField] c_field + + c_field = field.sp_field + + with nogil: + new_schema = GetResultValue(self.schema.SetField(i, c_field)) + + return pyarrow_wrap_schema(new_schema) + + def add_metadata(self, metadata): + """ + DEPRECATED + + Parameters + ---------- + metadata : dict + Keys and values must be string-like / coercible to bytes + """ + warnings.warn("The 'add_metadata' method is deprecated, use " + "'with_metadata' instead", FutureWarning, stacklevel=2) + return self.with_metadata(metadata) + + def with_metadata(self, metadata): + """ + Add metadata as dict of string keys and values to Schema + + Parameters + ---------- + metadata : dict + Keys and values must be string-like / coercible to bytes + + Returns + ------- + schema : pyarrow.Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Add metadata to existing schema field: + + >>> schema.with_metadata({"n_legs": "Number of legs per animal"}) + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + """ + cdef shared_ptr[CSchema] c_schema + + meta = ensure_metadata(metadata, allow_none=False) + with nogil: + c_schema = self.schema.WithMetadata(meta.unwrap()) + + return pyarrow_wrap_schema(c_schema) + + def serialize(self, memory_pool=None): + """ + Write Schema to Buffer as encapsulated IPC message + + Parameters + ---------- + memory_pool : MemoryPool, default None + Uses default memory pool if not specified + + Returns + ------- + serialized : Buffer + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())]) + + Write schema to Buffer: + + >>> schema.serialize() + + """ + cdef: + shared_ptr[CBuffer] buffer + CMemoryPool* pool = maybe_unbox_memory_pool(memory_pool) + + with nogil: + buffer = GetResultValue(SerializeSchema(deref(self.schema), + pool)) + return pyarrow_wrap_buffer(buffer) + + def remove_metadata(self): + """ + Create new schema without metadata, if any + + Returns + ------- + schema : pyarrow.Schema + + Examples + -------- + >>> import pyarrow as pa + >>> schema = pa.schema([ + ... pa.field('n_legs', pa.int64()), + ... pa.field('animals', pa.string())], + ... metadata={"n_legs": "Number of legs per animal"}) + >>> schema + n_legs: int64 + animals: string + -- schema metadata -- + n_legs: 'Number of legs per animal' + + Create a new schema with removing the metadata from the original: + + >>> schema.remove_metadata() + n_legs: int64 + animals: string + """ + cdef shared_ptr[CSchema] new_schema + with nogil: + new_schema = self.schema.RemoveMetadata() + return pyarrow_wrap_schema(new_schema) + + def to_string(self, truncate_metadata=True, show_field_metadata=True, + show_schema_metadata=True): + """ + Return human-readable representation of Schema + + Parameters + ---------- + truncate_metadata : boolean, default True + Limit metadata key/value display to a single line of ~80 characters + or less + show_field_metadata : boolean, default True + Display Field-level KeyValueMetadata + show_schema_metadata : boolean, default True + Display Schema-level KeyValueMetadata + + Returns + ------- + str : the formatted output + """ + cdef: + c_string result + PrettyPrintOptions options = PrettyPrintOptions.Defaults() + + options.indent = 0 + options.truncate_metadata = truncate_metadata + options.show_field_metadata = show_field_metadata + options.show_schema_metadata = show_schema_metadata + + with nogil: + check_status( + PrettyPrint( + deref(self.schema), + options, + &result + ) + ) + + return frombytes(result, safe=True) + + def _export_to_c(self, out_ptr): + """ + Export to a C ArrowSchema struct, given its pointer. + + Be careful: if you don't pass the ArrowSchema struct to a consumer, + its memory will leak. This is a low-level function intended for + expert users. + """ + check_status(ExportSchema(deref(self.schema), + _as_c_pointer(out_ptr))) + + @staticmethod + def _import_from_c(in_ptr): + """ + Import Schema from a C ArrowSchema struct, given its pointer. + + This is a low-level function intended for expert users. + """ + cdef void* c_ptr = _as_c_pointer(in_ptr) + with nogil: + result = GetResultValue(ImportSchema( c_ptr)) + return pyarrow_wrap_schema(result) + + def __str__(self): + return self.to_string() + + def __repr__(self): + return self.__str__() + + def __arrow_c_schema__(self): + """ + Export to a ArrowSchema PyCapsule + + Unlike _export_to_c, this will not leak memory if the capsule is not used. + """ + cdef ArrowSchema* c_schema + capsule = alloc_c_schema(&c_schema) + + with nogil: + check_status(ExportSchema(deref(self.schema), c_schema)) + + return capsule + + @staticmethod + def _import_from_c_capsule(schema): + """ + Import a Schema from a ArrowSchema PyCapsule + + Parameters + ---------- + schema : PyCapsule + A valid PyCapsule with name 'arrow_schema' containing an + ArrowSchema pointer. + """ + cdef: + ArrowSchema* c_schema + + if not PyCapsule_IsValid(schema, 'arrow_schema'): + raise ValueError( + "Not an ArrowSchema object" + ) + c_schema = PyCapsule_GetPointer(schema, 'arrow_schema') + + with nogil: + result = GetResultValue(ImportSchema(c_schema)) + + return pyarrow_wrap_schema(result) + + +def unify_schemas(schemas, *, promote_options="default"): + """ + Unify schemas by merging fields by name. + + The resulting schema will contain the union of fields from all schemas. + Fields with the same name will be merged. Note that two fields with + different types will fail merging by default. + + - The unified field will inherit the metadata from the schema where + that field is first defined. + - The first N fields in the schema will be ordered the same as the + N fields in the first schema. + + The resulting schema will inherit its metadata from the first input + schema. + + Parameters + ---------- + schemas : list of Schema + Schemas to merge into a single one. + promote_options : str, default default + Accepts strings "default" and "permissive". + Default: null and only null can be unified with another type. + Permissive: types are promoted to the greater common denominator. + + Returns + ------- + Schema + + Raises + ------ + ArrowInvalid : + If any input schema contains fields with duplicate names. + If Fields of the same name are not mergeable. + """ + cdef: + Schema schema + CField.CMergeOptions c_options + vector[shared_ptr[CSchema]] c_schemas + for schema in schemas: + if not isinstance(schema, Schema): + raise TypeError("Expected Schema, got {}".format(type(schema))) + c_schemas.push_back(pyarrow_unwrap_schema(schema)) + + if promote_options == "default": + c_options = CField.CMergeOptions.Defaults() + elif promote_options == "permissive": + c_options = CField.CMergeOptions.Permissive() + else: + raise ValueError(f"Invalid merge mode: {promote_options}") + + return pyarrow_wrap_schema( + GetResultValue(UnifySchemas(c_schemas, c_options))) + + +cdef dict _type_cache = {} + + +cdef DataType primitive_type(Type type): + if type in _type_cache: + return _type_cache[type] + + cdef DataType out = DataType.__new__(DataType) + out.init(GetPrimitiveType(type)) + + _type_cache[type] = out + return out + + +# ----------------------------------------------------------- +# Type factory functions + + +def field(name, type, bint nullable=True, metadata=None): + """ + Create a pyarrow.Field instance. + + Parameters + ---------- + name : str or bytes + Name of the field. + type : pyarrow.DataType + Arrow datatype of the field. + nullable : bool, default True + Whether the field's values are nullable. + metadata : dict, default None + Optional field metadata, the keys and values must be coercible to + bytes. + + Returns + ------- + field : pyarrow.Field + + Examples + -------- + Create an instance of pyarrow.Field: + + >>> import pyarrow as pa + >>> pa.field('key', pa.int32()) + pyarrow.Field + >>> pa.field('key', pa.int32(), nullable=False) + pyarrow.Field + + >>> field = pa.field('key', pa.int32(), + ... metadata={"key": "Something important"}) + >>> field + pyarrow.Field + >>> field.metadata + {b'key': b'Something important'} + + Use the field to create a struct type: + + >>> pa.struct([field]) + StructType(struct) + """ + cdef: + Field result = Field.__new__(Field) + DataType _type = ensure_type(type, allow_none=False) + shared_ptr[const CKeyValueMetadata] c_meta + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + + if _type.type.id() == _Type_NA and not nullable: + raise ValueError("A null type field may not be non-nullable") + + result.sp_field.reset( + new CField(tobytes(name), _type.sp_type, nullable, c_meta) + ) + result.field = result.sp_field.get() + result.type = _type + + return result + + +cdef set PRIMITIVE_TYPES = set([ + _Type_NA, _Type_BOOL, + _Type_UINT8, _Type_INT8, + _Type_UINT16, _Type_INT16, + _Type_UINT32, _Type_INT32, + _Type_UINT64, _Type_INT64, + _Type_TIMESTAMP, _Type_DATE32, + _Type_TIME32, _Type_TIME64, + _Type_DATE64, + _Type_HALF_FLOAT, + _Type_FLOAT, + _Type_DOUBLE]) + + +def null(): + """ + Create instance of null type. + + Examples + -------- + Create an instance of a null type: + + >>> import pyarrow as pa + >>> pa.null() + DataType(null) + >>> print(pa.null()) + null + + Create a ``Field`` type with a null type and a name: + + >>> pa.field('null_field', pa.null()) + pyarrow.Field + """ + return primitive_type(_Type_NA) + + +def bool_(): + """ + Create instance of boolean type. + + Examples + -------- + Create an instance of a boolean type: + + >>> import pyarrow as pa + >>> pa.bool_() + DataType(bool) + >>> print(pa.bool_()) + bool + + Create a ``Field`` type with a boolean type + and a name: + + >>> pa.field('bool_field', pa.bool_()) + pyarrow.Field + """ + return primitive_type(_Type_BOOL) + + +def uint8(): + """ + Create instance of unsigned int8 type. + + Examples + -------- + Create an instance of unsigned int8 type: + + >>> import pyarrow as pa + >>> pa.uint8() + DataType(uint8) + >>> print(pa.uint8()) + uint8 + + Create an array with unsigned int8 type: + + >>> pa.array([0, 1, 2], type=pa.uint8()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT8) + + +def int8(): + """ + Create instance of signed int8 type. + + Examples + -------- + Create an instance of int8 type: + + >>> import pyarrow as pa + >>> pa.int8() + DataType(int8) + >>> print(pa.int8()) + int8 + + Create an array with int8 type: + + >>> pa.array([0, 1, 2], type=pa.int8()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT8) + + +def uint16(): + """ + Create instance of unsigned uint16 type. + + Examples + -------- + Create an instance of unsigned int16 type: + + >>> import pyarrow as pa + >>> pa.uint16() + DataType(uint16) + >>> print(pa.uint16()) + uint16 + + Create an array with unsigned int16 type: + + >>> pa.array([0, 1, 2], type=pa.uint16()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT16) + + +def int16(): + """ + Create instance of signed int16 type. + + Examples + -------- + Create an instance of int16 type: + + >>> import pyarrow as pa + >>> pa.int16() + DataType(int16) + >>> print(pa.int16()) + int16 + + Create an array with int16 type: + + >>> pa.array([0, 1, 2], type=pa.int16()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT16) + + +def uint32(): + """ + Create instance of unsigned uint32 type. + + Examples + -------- + Create an instance of unsigned int32 type: + + >>> import pyarrow as pa + >>> pa.uint32() + DataType(uint32) + >>> print(pa.uint32()) + uint32 + + Create an array with unsigned int32 type: + + >>> pa.array([0, 1, 2], type=pa.uint32()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT32) + + +def int32(): + """ + Create instance of signed int32 type. + + Examples + -------- + Create an instance of int32 type: + + >>> import pyarrow as pa + >>> pa.int32() + DataType(int32) + >>> print(pa.int32()) + int32 + + Create an array with int32 type: + + >>> pa.array([0, 1, 2], type=pa.int32()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT32) + + +def uint64(): + """ + Create instance of unsigned uint64 type. + + Examples + -------- + Create an instance of unsigned int64 type: + + >>> import pyarrow as pa + >>> pa.uint64() + DataType(uint64) + >>> print(pa.uint64()) + uint64 + + Create an array with unsigned uint64 type: + + >>> pa.array([0, 1, 2], type=pa.uint64()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_UINT64) + + +def int64(): + """ + Create instance of signed int64 type. + + Examples + -------- + Create an instance of int64 type: + + >>> import pyarrow as pa + >>> pa.int64() + DataType(int64) + >>> print(pa.int64()) + int64 + + Create an array with int64 type: + + >>> pa.array([0, 1, 2], type=pa.int64()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_INT64) + + +cdef dict _timestamp_type_cache = {} +cdef dict _time_type_cache = {} +cdef dict _duration_type_cache = {} + + +cdef timeunit_to_string(TimeUnit unit): + if unit == TimeUnit_SECOND: + return 's' + elif unit == TimeUnit_MILLI: + return 'ms' + elif unit == TimeUnit_MICRO: + return 'us' + elif unit == TimeUnit_NANO: + return 'ns' + + +cdef TimeUnit string_to_timeunit(unit) except *: + if unit == 's': + return TimeUnit_SECOND + elif unit == 'ms': + return TimeUnit_MILLI + elif unit == 'us': + return TimeUnit_MICRO + elif unit == 'ns': + return TimeUnit_NANO + else: + raise ValueError(f"Invalid time unit: {unit!r}") + + +def tzinfo_to_string(tz): + """ + Converts a time zone object into a string indicating the name of a time + zone, one of: + * As used in the Olson time zone database (the "tz database" or + "tzdata"), such as "America/New_York" + * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 + + Parameters + ---------- + tz : datetime.tzinfo + Time zone object + + Returns + ------- + name : str + Time zone name + """ + return frombytes(GetResultValue(TzinfoToString(tz))) + + +def string_to_tzinfo(name): + """ + Convert a time zone name into a time zone object. + + Supported input strings are: + * As used in the Olson time zone database (the "tz database" or + "tzdata"), such as "America/New_York" + * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30 + + Parameters + ---------- + name: str + Time zone name. + + Returns + ------- + tz : datetime.tzinfo + Time zone object + """ + cdef PyObject* tz = GetResultValue(StringToTzinfo(name.encode('utf-8'))) + return PyObject_to_object(tz) + + +def timestamp(unit, tz=None): + """ + Create instance of timestamp type with resolution and optional time zone. + + Parameters + ---------- + unit : str + one of 's' [second], 'ms' [millisecond], 'us' [microsecond], or 'ns' + [nanosecond] + tz : str, default None + Time zone name. None indicates time zone naive + + Examples + -------- + Create an instance of timestamp type: + + >>> import pyarrow as pa + >>> pa.timestamp('us') + TimestampType(timestamp[us]) + >>> pa.timestamp('s', tz='America/New_York') + TimestampType(timestamp[s, tz=America/New_York]) + >>> pa.timestamp('s', tz='+07:30') + TimestampType(timestamp[s, tz=+07:30]) + + Use timestamp type when creating a scalar object: + + >>> from datetime import datetime + >>> pa.scalar(datetime(2012, 1, 1), type=pa.timestamp('s', tz='UTC')) + + >>> pa.scalar(datetime(2012, 1, 1), type=pa.timestamp('us')) + + + Returns + ------- + timestamp_type : TimestampType + """ + cdef: + TimeUnit unit_code + c_string c_timezone + + unit_code = string_to_timeunit(unit) + + cdef TimestampType out = TimestampType.__new__(TimestampType) + + if tz is None: + out.init(ctimestamp(unit_code)) + if unit_code in _timestamp_type_cache: + return _timestamp_type_cache[unit_code] + _timestamp_type_cache[unit_code] = out + else: + if not isinstance(tz, (bytes, str)): + tz = tzinfo_to_string(tz) + + c_timezone = tobytes(tz) + out.init(ctimestamp(unit_code, c_timezone)) + + return out + + +def time32(unit): + """ + Create instance of 32-bit time (time of day) type with unit resolution. + + Parameters + ---------- + unit : str + one of 's' [second], or 'ms' [millisecond] + + Returns + ------- + type : pyarrow.Time32Type + + Examples + -------- + >>> import pyarrow as pa + >>> pa.time32('s') + Time32Type(time32[s]) + >>> pa.time32('ms') + Time32Type(time32[ms]) + """ + cdef: + TimeUnit unit_code + c_string c_timezone + + if unit == 's': + unit_code = TimeUnit_SECOND + elif unit == 'ms': + unit_code = TimeUnit_MILLI + else: + raise ValueError(f"Invalid time unit for time32: {unit!r}") + + if unit_code in _time_type_cache: + return _time_type_cache[unit_code] + + cdef Time32Type out = Time32Type.__new__(Time32Type) + + out.init(ctime32(unit_code)) + _time_type_cache[unit_code] = out + + return out + + +def time64(unit): + """ + Create instance of 64-bit time (time of day) type with unit resolution. + + Parameters + ---------- + unit : str + One of 'us' [microsecond], or 'ns' [nanosecond]. + + Returns + ------- + type : pyarrow.Time64Type + + Examples + -------- + >>> import pyarrow as pa + >>> pa.time64('us') + Time64Type(time64[us]) + >>> pa.time64('ns') + Time64Type(time64[ns]) + """ + cdef: + TimeUnit unit_code + c_string c_timezone + + if unit == 'us': + unit_code = TimeUnit_MICRO + elif unit == 'ns': + unit_code = TimeUnit_NANO + else: + raise ValueError(f"Invalid time unit for time64: {unit!r}") + + if unit_code in _time_type_cache: + return _time_type_cache[unit_code] + + cdef Time64Type out = Time64Type.__new__(Time64Type) + + out.init(ctime64(unit_code)) + _time_type_cache[unit_code] = out + + return out + + +def duration(unit): + """ + Create instance of a duration type with unit resolution. + + Parameters + ---------- + unit : str + One of 's' [second], 'ms' [millisecond], 'us' [microsecond], or + 'ns' [nanosecond]. + + Returns + ------- + type : pyarrow.DurationType + + Examples + -------- + Create an instance of duration type: + + >>> import pyarrow as pa + >>> pa.duration('us') + DurationType(duration[us]) + >>> pa.duration('s') + DurationType(duration[s]) + + Create an array with duration type: + + >>> pa.array([0, 1, 2], type=pa.duration('s')) + + [ + 0, + 1, + 2 + ] + """ + cdef: + TimeUnit unit_code + + unit_code = string_to_timeunit(unit) + + if unit_code in _duration_type_cache: + return _duration_type_cache[unit_code] + + cdef DurationType out = DurationType.__new__(DurationType) + + out.init(cduration(unit_code)) + _duration_type_cache[unit_code] = out + + return out + + +def month_day_nano_interval(): + """ + Create instance of an interval type representing months, days and + nanoseconds between two dates. + + Examples + -------- + Create an instance of an month_day_nano_interval type: + + >>> import pyarrow as pa + >>> pa.month_day_nano_interval() + DataType(month_day_nano_interval) + + Create a scalar with month_day_nano_interval type: + + >>> pa.scalar((1, 15, -30), type=pa.month_day_nano_interval()) + + """ + return primitive_type(_Type_INTERVAL_MONTH_DAY_NANO) + + +def date32(): + """ + Create instance of 32-bit date (days since UNIX epoch 1970-01-01). + + Examples + -------- + Create an instance of 32-bit date type: + + >>> import pyarrow as pa + >>> pa.date32() + DataType(date32[day]) + + Create a scalar with 32-bit date type: + + >>> from datetime import date + >>> pa.scalar(date(2012, 1, 1), type=pa.date32()) + + """ + return primitive_type(_Type_DATE32) + + +def date64(): + """ + Create instance of 64-bit date (milliseconds since UNIX epoch 1970-01-01). + + Examples + -------- + Create an instance of 64-bit date type: + + >>> import pyarrow as pa + >>> pa.date64() + DataType(date64[ms]) + + Create a scalar with 64-bit date type: + + >>> from datetime import datetime + >>> pa.scalar(datetime(2012, 1, 1), type=pa.date64()) + + """ + return primitive_type(_Type_DATE64) + + +def float16(): + """ + Create half-precision floating point type. + + Examples + -------- + Create an instance of float16 type: + + >>> import pyarrow as pa + >>> pa.float16() + DataType(halffloat) + >>> print(pa.float16()) + halffloat + + Create an array with float16 type: + + >>> arr = np.array([1.5, np.nan], dtype=np.float16) + >>> a = pa.array(arr, type=pa.float16()) + >>> a + + [ + 15872, + 32256 + ] + >>> a.to_pylist() + [1.5, nan] + """ + return primitive_type(_Type_HALF_FLOAT) + + +def float32(): + """ + Create single-precision floating point type. + + Examples + -------- + Create an instance of float32 type: + + >>> import pyarrow as pa + >>> pa.float32() + DataType(float) + >>> print(pa.float32()) + float + + Create an array with float32 type: + + >>> pa.array([0.0, 1.0, 2.0], type=pa.float32()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_FLOAT) + + +def float64(): + """ + Create double-precision floating point type. + + Examples + -------- + Create an instance of float64 type: + + >>> import pyarrow as pa + >>> pa.float64() + DataType(double) + >>> print(pa.float64()) + double + + Create an array with float64 type: + + >>> pa.array([0.0, 1.0, 2.0], type=pa.float64()) + + [ + 0, + 1, + 2 + ] + """ + return primitive_type(_Type_DOUBLE) + + +cpdef DataType decimal128(int precision, int scale=0): + """ + Create decimal type with precision and scale and 128-bit width. + + Arrow decimals are fixed-point decimal numbers encoded as a scaled + integer. The precision is the number of significant digits that the + decimal type can represent; the scale is the number of digits after + the decimal point (note the scale can be negative). + + As an example, ``decimal128(7, 3)`` can exactly represent the numbers + 1234.567 and -1234.567 (encoded internally as the 128-bit integers + 1234567 and -1234567, respectively), but neither 12345.67 nor 123.4567. + + ``decimal128(5, -3)`` can exactly represent the number 12345000 + (encoded internally as the 128-bit integer 12345), but neither + 123450000 nor 1234500. + + If you need a precision higher than 38 significant digits, consider + using ``decimal256``. + + Parameters + ---------- + precision : int + Must be between 1 and 38 + scale : int + + Returns + ------- + decimal_type : Decimal128Type + + Examples + -------- + Create an instance of decimal type: + + >>> import pyarrow as pa + >>> pa.decimal128(5, 2) + Decimal128Type(decimal128(5, 2)) + + Create an array with decimal type: + + >>> import decimal + >>> a = decimal.Decimal('123.45') + >>> pa.array([a], pa.decimal128(5, 2)) + + [ + 123.45 + ] + """ + cdef shared_ptr[CDataType] decimal_type + if precision < 1 or precision > 38: + raise ValueError("precision should be between 1 and 38") + decimal_type.reset(new CDecimal128Type(precision, scale)) + return pyarrow_wrap_data_type(decimal_type) + + +cpdef DataType decimal256(int precision, int scale=0): + """ + Create decimal type with precision and scale and 256-bit width. + + Arrow decimals are fixed-point decimal numbers encoded as a scaled + integer. The precision is the number of significant digits that the + decimal type can represent; the scale is the number of digits after + the decimal point (note the scale can be negative). + + For most use cases, the maximum precision offered by ``decimal128`` + is sufficient, and it will result in a more compact and more efficient + encoding. ``decimal256`` is useful if you need a precision higher + than 38 significant digits. + + Parameters + ---------- + precision : int + Must be between 1 and 76 + scale : int + + Returns + ------- + decimal_type : Decimal256Type + """ + cdef shared_ptr[CDataType] decimal_type + if precision < 1 or precision > 76: + raise ValueError("precision should be between 1 and 76") + decimal_type.reset(new CDecimal256Type(precision, scale)) + return pyarrow_wrap_data_type(decimal_type) + + +def string(): + """ + Create UTF8 variable-length string type. + + Examples + -------- + Create an instance of a string type: + + >>> import pyarrow as pa + >>> pa.string() + DataType(string) + + and use the string type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.string()) + + [ + "foo", + "bar", + "baz" + ] + """ + return primitive_type(_Type_STRING) + + +def utf8(): + """ + Alias for string(). + + Examples + -------- + Create an instance of a string type: + + >>> import pyarrow as pa + >>> pa.utf8() + DataType(string) + + and use the string type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.utf8()) + + [ + "foo", + "bar", + "baz" + ] + """ + return string() + + +def binary(int length=-1): + """ + Create variable-length or fixed size binary type. + + Parameters + ---------- + length : int, optional, default -1 + If length == -1 then return a variable length binary type. If length is + greater than or equal to 0 then return a fixed size binary type of + width `length`. + + Examples + -------- + Create an instance of a variable-length binary type: + + >>> import pyarrow as pa + >>> pa.binary() + DataType(binary) + + and use the variable-length binary type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.binary()) + + [ + 666F6F, + 626172, + 62617A + ] + + Create an instance of a fixed-size binary type: + + >>> pa.binary(3) + FixedSizeBinaryType(fixed_size_binary[3]) + + and use the fixed-length binary type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.binary(3)) + + [ + 666F6F, + 626172, + 62617A + ] + """ + if length == -1: + return primitive_type(_Type_BINARY) + + cdef shared_ptr[CDataType] fixed_size_binary_type + fixed_size_binary_type.reset(new CFixedSizeBinaryType(length)) + return pyarrow_wrap_data_type(fixed_size_binary_type) + + +def large_binary(): + """ + Create large variable-length binary type. + + This data type may not be supported by all Arrow implementations. Unless + you need to represent data larger than 2GB, you should prefer binary(). + + Examples + -------- + Create an instance of large variable-length binary type: + + >>> import pyarrow as pa + >>> pa.large_binary() + DataType(large_binary) + + and use the type to create an array: + + >>> pa.array(['foo', 'bar', 'baz'], type=pa.large_binary()) + + [ + 666F6F, + 626172, + 62617A + ] + """ + return primitive_type(_Type_LARGE_BINARY) + + +def large_string(): + """ + Create large UTF8 variable-length string type. + + This data type may not be supported by all Arrow implementations. Unless + you need to represent data larger than 2GB, you should prefer string(). + + Examples + -------- + Create an instance of large UTF8 variable-length binary type: + + >>> import pyarrow as pa + >>> pa.large_string() + DataType(large_string) + + and use the type to create an array: + + >>> pa.array(['foo', 'bar'] * 50, type=pa.large_string()) + + [ + "foo", + "bar", + ... + "foo", + "bar" + ] + """ + return primitive_type(_Type_LARGE_STRING) + + +def large_utf8(): + """ + Alias for large_string(). + + Examples + -------- + Create an instance of large UTF8 variable-length binary type: + + >>> import pyarrow as pa + >>> pa.large_utf8() + DataType(large_string) + + and use the type to create an array: + + >>> pa.array(['foo', 'bar'] * 50, type=pa.large_utf8()) + + [ + "foo", + "bar", + ... + "foo", + "bar" + ] + """ + return large_string() + + +def list_(value_type, int list_size=-1): + """ + Create ListType instance from child data type or field. + + Parameters + ---------- + value_type : DataType or Field + list_size : int, optional, default -1 + If length == -1 then return a variable length list type. If length is + greater than or equal to 0 then return a fixed size list type. + + Returns + ------- + list_type : DataType + + Examples + -------- + Create an instance of ListType: + + >>> import pyarrow as pa + >>> pa.list_(pa.string()) + ListType(list) + >>> pa.list_(pa.int32(), 2) + FixedSizeListType(fixed_size_list[2]) + + Use the ListType to create a scalar: + + >>> pa.scalar(['foo', None], type=pa.list_(pa.string(), 2)) + + + or an array: + + >>> pa.array([[1, 2], [3, 4]], pa.list_(pa.int32(), 2)) + + [ + [ + 1, + 2 + ], + [ + 3, + 4 + ] + ] + """ + cdef: + Field _field + shared_ptr[CDataType] list_type + + if isinstance(value_type, DataType): + _field = field('item', value_type) + elif isinstance(value_type, Field): + _field = value_type + else: + raise TypeError('List requires DataType or Field') + + if list_size == -1: + list_type.reset(new CListType(_field.sp_field)) + else: + if list_size < 0: + raise ValueError("list_size should be a positive integer") + list_type.reset(new CFixedSizeListType(_field.sp_field, list_size)) + + return pyarrow_wrap_data_type(list_type) + + +cpdef LargeListType large_list(value_type): + """ + Create LargeListType instance from child data type or field. + + This data type may not be supported by all Arrow implementations. + Unless you need to represent data larger than 2**31 elements, you should + prefer list_(). + + Parameters + ---------- + value_type : DataType or Field + + Returns + ------- + list_type : DataType + + Examples + -------- + Create an instance of LargeListType: + + >>> import pyarrow as pa + >>> pa.large_list(pa.int8()) + LargeListType(large_list) + + Use the LargeListType to create an array: + + >>> pa.array([[-1, 3]] * 5, type=pa.large_list(pa.int8())) + + [ + [ + -1, + 3 + ], + [ + -1, + 3 + ], + ... + """ + cdef: + DataType data_type + Field _field + shared_ptr[CDataType] list_type + LargeListType out = LargeListType.__new__(LargeListType) + + if isinstance(value_type, DataType): + _field = field('item', value_type) + elif isinstance(value_type, Field): + _field = value_type + else: + raise TypeError('List requires DataType or Field') + + list_type.reset(new CLargeListType(_field.sp_field)) + out.init(list_type) + return out + + +cpdef MapType map_(key_type, item_type, keys_sorted=False): + """ + Create MapType instance from key and item data types or fields. + + Parameters + ---------- + key_type : DataType or Field + item_type : DataType or Field + keys_sorted : bool + + Returns + ------- + map_type : DataType + + Examples + -------- + Create an instance of MapType: + + >>> import pyarrow as pa + >>> pa.map_(pa.string(), pa.int32()) + MapType(map) + >>> pa.map_(pa.string(), pa.int32(), keys_sorted=True) + MapType(map) + + Use MapType to create an array: + + >>> data = [[{'key': 'a', 'value': 1}, {'key': 'b', 'value': 2}], [{'key': 'c', 'value': 3}]] + >>> pa.array(data, type=pa.map_(pa.string(), pa.int32(), keys_sorted=True)) + + [ + keys: + [ + "a", + "b" + ] + values: + [ + 1, + 2 + ], + keys: + [ + "c" + ] + values: + [ + 3 + ] + ] + """ + cdef: + Field _key_field + Field _item_field + shared_ptr[CDataType] map_type + MapType out = MapType.__new__(MapType) + + if isinstance(key_type, Field): + if key_type.nullable: + raise TypeError('Map key field should be non-nullable') + _key_field = key_type + else: + _key_field = field('key', ensure_type(key_type, allow_none=False), + nullable=False) + + if isinstance(item_type, Field): + _item_field = item_type + else: + _item_field = field('value', ensure_type(item_type, allow_none=False)) + + map_type.reset(new CMapType(_key_field.sp_field, _item_field.sp_field, + keys_sorted)) + out.init(map_type) + return out + + +cpdef DictionaryType dictionary(index_type, value_type, bint ordered=False): + """ + Dictionary (categorical, or simply encoded) type. + + Parameters + ---------- + index_type : DataType + value_type : DataType + ordered : bool + + Returns + ------- + type : DictionaryType + + Examples + -------- + Create an instance of dictionary type: + + >>> import pyarrow as pa + >>> pa.dictionary(pa.int64(), pa.utf8()) + DictionaryType(dictionary) + + Use dictionary type to create an array: + + >>> pa.array(["a", "b", None, "d"], pa.dictionary(pa.int64(), pa.utf8())) + + ... + -- dictionary: + [ + "a", + "b", + "d" + ] + -- indices: + [ + 0, + 1, + null, + 2 + ] + """ + cdef: + DataType _index_type = ensure_type(index_type, allow_none=False) + DataType _value_type = ensure_type(value_type, allow_none=False) + DictionaryType out = DictionaryType.__new__(DictionaryType) + shared_ptr[CDataType] dict_type + + if _index_type.id not in { + Type_INT8, Type_INT16, Type_INT32, Type_INT64, + Type_UINT8, Type_UINT16, Type_UINT32, Type_UINT64, + }: + raise TypeError("The dictionary index type should be integer.") + + dict_type.reset(new CDictionaryType(_index_type.sp_type, + _value_type.sp_type, ordered == 1)) + out.init(dict_type) + return out + + +def struct(fields): + """ + Create StructType instance from fields. + + A struct is a nested type parameterized by an ordered sequence of types + (which can all be distinct), called its fields. + + Parameters + ---------- + fields : iterable of Fields or tuples, or mapping of strings to DataTypes + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + + Examples + -------- + Create an instance of StructType from an iterable of tuples: + + >>> import pyarrow as pa + >>> fields = [ + ... ('f1', pa.int32()), + ... ('f2', pa.string()), + ... ] + >>> struct_type = pa.struct(fields) + >>> struct_type + StructType(struct) + + Retrieve a field from a StructType: + + >>> struct_type[0] + pyarrow.Field + >>> struct_type['f1'] + pyarrow.Field + + Create an instance of StructType from an iterable of Fields: + + >>> fields = [ + ... pa.field('f1', pa.int32()), + ... pa.field('f2', pa.string(), nullable=False), + ... ] + >>> pa.struct(fields) + StructType(struct) + + Returns + ------- + type : DataType + """ + cdef: + Field py_field + vector[shared_ptr[CField]] c_fields + cdef shared_ptr[CDataType] struct_type + + if isinstance(fields, Mapping): + fields = fields.items() + + for item in fields: + if isinstance(item, tuple): + py_field = field(*item) + else: + py_field = item + c_fields.push_back(py_field.sp_field) + + struct_type.reset(new CStructType(c_fields)) + return pyarrow_wrap_data_type(struct_type) + + +cdef _extract_union_params(child_fields, type_codes, + vector[shared_ptr[CField]]* c_fields, + vector[int8_t]* c_type_codes): + cdef: + Field child_field + + for child_field in child_fields: + c_fields[0].push_back(child_field.sp_field) + + if type_codes is not None: + if len(type_codes) != (c_fields.size()): + raise ValueError("type_codes should have the same length " + "as fields") + for code in type_codes: + c_type_codes[0].push_back(code) + else: + c_type_codes[0] = range(c_fields.size()) + + +def sparse_union(child_fields, type_codes=None): + """ + Create SparseUnionType from child fields. + + A sparse union is a nested type where each logical value is taken from + a single child. A buffer of 8-bit type ids indicates which child + a given logical value is to be taken from. + + In a sparse union, each child array should have the same length as the + union array, regardless of the actual number of union values that + refer to it. + + Parameters + ---------- + child_fields : sequence of Field values + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + type_codes : list of integers, default None + + Returns + ------- + type : SparseUnionType + """ + cdef: + vector[shared_ptr[CField]] c_fields + vector[int8_t] c_type_codes + + _extract_union_params(child_fields, type_codes, + &c_fields, &c_type_codes) + + return pyarrow_wrap_data_type( + CMakeSparseUnionType(move(c_fields), move(c_type_codes))) + + +def dense_union(child_fields, type_codes=None): + """ + Create DenseUnionType from child fields. + + A dense union is a nested type where each logical value is taken from + a single child, at a specific offset. A buffer of 8-bit type ids + indicates which child a given logical value is to be taken from, + and a buffer of 32-bit offsets indicates at which physical position + in the given child array the logical value is to be taken from. + + Unlike a sparse union, a dense union allows encoding only the child array + values which are actually referred to by the union array. This is + counterbalanced by the additional footprint of the offsets buffer, and + the additional indirection cost when looking up values. + + Parameters + ---------- + child_fields : sequence of Field values + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + type_codes : list of integers, default None + + Returns + ------- + type : DenseUnionType + """ + cdef: + vector[shared_ptr[CField]] c_fields + vector[int8_t] c_type_codes + + _extract_union_params(child_fields, type_codes, + &c_fields, &c_type_codes) + + return pyarrow_wrap_data_type( + CMakeDenseUnionType(move(c_fields), move(c_type_codes))) + + +def union(child_fields, mode, type_codes=None): + """ + Create UnionType from child fields. + + A union is a nested type where each logical value is taken from a + single child. A buffer of 8-bit type ids indicates which child + a given logical value is to be taken from. + + Unions come in two flavors: sparse and dense + (see also `pyarrow.sparse_union` and `pyarrow.dense_union`). + + Parameters + ---------- + child_fields : sequence of Field values + Each field must have a UTF8-encoded name, and these field names are + part of the type metadata. + mode : str + Must be 'sparse' or 'dense' + type_codes : list of integers, default None + + Returns + ------- + type : UnionType + """ + if isinstance(mode, int): + if mode not in (_UnionMode_SPARSE, _UnionMode_DENSE): + raise ValueError("Invalid union mode {0!r}".format(mode)) + else: + if mode == 'sparse': + mode = _UnionMode_SPARSE + elif mode == 'dense': + mode = _UnionMode_DENSE + else: + raise ValueError("Invalid union mode {0!r}".format(mode)) + + if mode == _UnionMode_SPARSE: + return sparse_union(child_fields, type_codes) + else: + return dense_union(child_fields, type_codes) + + +def run_end_encoded(run_end_type, value_type): + """ + Create RunEndEncodedType from run-end and value types. + + Parameters + ---------- + run_end_type : pyarrow.DataType + The integer type of the run_ends array. Must be 'int16', 'int32', or 'int64'. + value_type : pyarrow.DataType + The type of the values array. + + Returns + ------- + type : RunEndEncodedType + """ + cdef: + DataType _run_end_type = ensure_type(run_end_type, allow_none=False) + DataType _value_type = ensure_type(value_type, allow_none=False) + shared_ptr[CDataType] ree_type + + if not _run_end_type.type.id() in [_Type_INT16, _Type_INT32, _Type_INT64]: + raise ValueError("The run_end_type should be 'int16', 'int32', or 'int64'") + ree_type = CMakeRunEndEncodedType(_run_end_type.sp_type, _value_type.sp_type) + return pyarrow_wrap_data_type(ree_type) + + +def fixed_shape_tensor(DataType value_type, shape, dim_names=None, permutation=None): + """ + Create instance of fixed shape tensor extension type with shape and optional + names of tensor dimensions and indices of the desired logical + ordering of dimensions. + + Parameters + ---------- + value_type : DataType + Data type of individual tensor elements. + shape : tuple or list of integers + The physical shape of the contained tensors. + dim_names : tuple or list of strings, default None + Explicit names to tensor dimensions. + permutation : tuple or list integers, default None + Indices of the desired ordering of the original dimensions. + The indices contain a permutation of the values ``[0, 1, .., N-1]`` where + N is the number of dimensions. The permutation indicates which dimension + of the logical layout corresponds to which dimension of the physical tensor. + For more information on this parameter see + :ref:`fixed_shape_tensor_extension`. + + Examples + -------- + Create an instance of fixed shape tensor extension type: + + >>> import pyarrow as pa + >>> tensor_type = pa.fixed_shape_tensor(pa.int32(), [2, 2]) + >>> tensor_type + FixedShapeTensorType(extension) + + Inspect the data type: + + >>> tensor_type.value_type + DataType(int32) + >>> tensor_type.shape + [2, 2] + + Create a table with fixed shape tensor extension array: + + >>> arr = [[1, 2, 3, 4], [10, 20, 30, 40], [100, 200, 300, 400]] + >>> storage = pa.array(arr, pa.list_(pa.int32(), 4)) + >>> tensor = pa.ExtensionArray.from_storage(tensor_type, storage) + >>> pa.table([tensor], names=["tensor_array"]) + pyarrow.Table + tensor_array: extension + ---- + tensor_array: [[[1,2,3,4],[10,20,30,40],[100,200,300,400]]] + + Create an instance of fixed shape tensor extension type with names + of tensor dimensions: + + >>> tensor_type = pa.fixed_shape_tensor(pa.int8(), (2, 2, 3), + ... dim_names=['C', 'H', 'W']) + >>> tensor_type.dim_names + ['C', 'H', 'W'] + + Create an instance of fixed shape tensor extension type with + permutation: + + >>> tensor_type = pa.fixed_shape_tensor(pa.int8(), (2, 2, 3), + ... permutation=[0, 2, 1]) + >>> tensor_type.permutation + [0, 2, 1] + + Returns + ------- + type : FixedShapeTensorType + """ + + cdef: + vector[int64_t] c_shape + vector[int64_t] c_permutation + vector[c_string] c_dim_names + shared_ptr[CDataType] c_tensor_ext_type + + assert value_type is not None + assert shape is not None + + for i in shape: + c_shape.push_back(i) + + if permutation is not None: + for i in permutation: + c_permutation.push_back(i) + + if dim_names is not None: + for x in dim_names: + c_dim_names.push_back(tobytes(x)) + + cdef FixedShapeTensorType out = FixedShapeTensorType.__new__(FixedShapeTensorType) + + c_tensor_ext_type = GetResultValue(CFixedShapeTensorType.Make( + value_type.sp_type, c_shape, c_permutation, c_dim_names)) + + out.init(c_tensor_ext_type) + + return out + + +cdef dict _type_aliases = { + 'null': null, + 'bool': bool_, + 'boolean': bool_, + 'i1': int8, + 'int8': int8, + 'i2': int16, + 'int16': int16, + 'i4': int32, + 'int32': int32, + 'i8': int64, + 'int64': int64, + 'u1': uint8, + 'uint8': uint8, + 'u2': uint16, + 'uint16': uint16, + 'u4': uint32, + 'uint32': uint32, + 'u8': uint64, + 'uint64': uint64, + 'f2': float16, + 'halffloat': float16, + 'float16': float16, + 'f4': float32, + 'float': float32, + 'float32': float32, + 'f8': float64, + 'double': float64, + 'float64': float64, + 'string': string, + 'str': string, + 'utf8': string, + 'binary': binary, + 'large_string': large_string, + 'large_str': large_string, + 'large_utf8': large_string, + 'large_binary': large_binary, + 'date32': date32, + 'date64': date64, + 'date32[day]': date32, + 'date64[ms]': date64, + 'time32[s]': time32('s'), + 'time32[ms]': time32('ms'), + 'time64[us]': time64('us'), + 'time64[ns]': time64('ns'), + 'timestamp[s]': timestamp('s'), + 'timestamp[ms]': timestamp('ms'), + 'timestamp[us]': timestamp('us'), + 'timestamp[ns]': timestamp('ns'), + 'duration[s]': duration('s'), + 'duration[ms]': duration('ms'), + 'duration[us]': duration('us'), + 'duration[ns]': duration('ns'), + 'month_day_nano_interval': month_day_nano_interval(), +} + + +def type_for_alias(name): + """ + Return DataType given a string alias if one exists. + + Parameters + ---------- + name : str + The alias of the DataType that should be retrieved. + + Returns + ------- + type : DataType + """ + name = name.lower() + try: + alias = _type_aliases[name] + except KeyError: + raise ValueError('No type alias for {0}'.format(name)) + + if isinstance(alias, DataType): + return alias + return alias() + + +cpdef DataType ensure_type(object ty, bint allow_none=False): + if allow_none and ty is None: + return None + elif isinstance(ty, DataType): + return ty + elif isinstance(ty, str): + return type_for_alias(ty) + else: + raise TypeError('DataType expected, got {!r}'.format(type(ty))) + + +def schema(fields, metadata=None): + """ + Construct pyarrow.Schema from collection of fields. + + Parameters + ---------- + fields : iterable of Fields or tuples, or mapping of strings to DataTypes + Can also pass an object that implements the Arrow PyCapsule Protocol + for schemas (has an ``__arrow_c_schema__`` method). + metadata : dict, default None + Keys and values must be coercible to bytes. + + Examples + -------- + Create a Schema from iterable of tuples: + + >>> import pyarrow as pa + >>> pa.schema([ + ... ('some_int', pa.int32()), + ... ('some_string', pa.string()), + ... pa.field('some_required_string', pa.string(), nullable=False) + ... ]) + some_int: int32 + some_string: string + some_required_string: string not null + + Create a Schema from iterable of Fields: + + >>> pa.schema([ + ... pa.field('some_int', pa.int32()), + ... pa.field('some_string', pa.string()) + ... ]) + some_int: int32 + some_string: string + + Returns + ------- + schema : pyarrow.Schema + """ + cdef: + shared_ptr[const CKeyValueMetadata] c_meta + shared_ptr[CSchema] c_schema + Schema result + Field py_field + vector[shared_ptr[CField]] c_fields + + if isinstance(fields, Mapping): + fields = fields.items() + elif hasattr(fields, "__arrow_c_schema__"): + return Schema._import_from_c_capsule(fields.__arrow_c_schema__()) + + for item in fields: + if isinstance(item, tuple): + py_field = field(*item) + else: + py_field = item + if py_field is None: + raise TypeError("field or tuple expected, got None") + c_fields.push_back(py_field.sp_field) + + metadata = ensure_metadata(metadata, allow_none=True) + c_meta = pyarrow_unwrap_metadata(metadata) + + c_schema.reset(new CSchema(c_fields, c_meta)) + result = Schema.__new__(Schema) + result.init_schema(c_schema) + + return result + + +def from_numpy_dtype(object dtype): + """ + Convert NumPy dtype to pyarrow.DataType. + + Parameters + ---------- + dtype : the numpy dtype to convert + + + Examples + -------- + Create a pyarrow DataType from NumPy dtype: + + >>> import pyarrow as pa + >>> import numpy as np + >>> pa.from_numpy_dtype(np.dtype('float16')) + DataType(halffloat) + >>> pa.from_numpy_dtype('U') + DataType(string) + >>> pa.from_numpy_dtype(bool) + DataType(bool) + >>> pa.from_numpy_dtype(np.str_) + DataType(string) + """ + dtype = np.dtype(dtype) + return pyarrow_wrap_data_type(GetResultValue(NumPyDtypeToArrow(dtype))) + + +def is_boolean_value(object obj): + """ + Check if the object is a boolean. + + Parameters + ---------- + obj : object + The object to check + """ + return IsPyBool(obj) + + +def is_integer_value(object obj): + """ + Check if the object is an integer. + + Parameters + ---------- + obj : object + The object to check + """ + return IsPyInt(obj) + + +def is_float_value(object obj): + """ + Check if the object is a float. + + Parameters + ---------- + obj : object + The object to check + """ + return IsPyFloat(obj) + + +cdef class _ExtensionRegistryNanny(_Weakrefable): + # Keep the registry alive until we have unregistered PyExtensionType + cdef: + shared_ptr[CExtensionTypeRegistry] registry + + def __cinit__(self): + self.registry = CExtensionTypeRegistry.GetGlobalRegistry() + + def release_registry(self): + self.registry.reset() + + +_registry_nanny = _ExtensionRegistryNanny() + + +def _register_py_extension_type(): + cdef: + DataType storage_type + shared_ptr[CExtensionType] cpy_ext_type + c_string c_extension_name = tobytes("arrow.py_extension_type") + + # Make a dummy C++ ExtensionType + storage_type = null() + check_status(CPyExtensionType.FromClass( + storage_type.sp_type, c_extension_name, PyExtensionType, + &cpy_ext_type)) + check_status( + RegisterPyExtensionType( cpy_ext_type)) + + +def _unregister_py_extension_types(): + # This needs to be done explicitly before the Python interpreter is + # finalized. If the C++ type is destroyed later in the process + # teardown stage, it will invoke CPython APIs such as Py_DECREF + # with a destroyed interpreter. + unregister_extension_type("arrow.py_extension_type") + for ext_type in _python_extension_types_registry: + try: + unregister_extension_type(ext_type.extension_name) + except KeyError: + pass + _registry_nanny.release_registry() + + +_register_py_extension_type() +atexit.register(_unregister_py_extension_types) + + +# +# PyCapsule export utilities +# + +cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: + cdef ArrowSchema* schema = PyCapsule_GetPointer( + schema_capsule, 'arrow_schema' + ) + if schema.release != NULL: + schema.release(schema) + + free(schema) + +cdef object alloc_c_schema(ArrowSchema** c_schema): + c_schema[0] = malloc(sizeof(ArrowSchema)) + # Ensure the capsule destructor doesn't call a random release pointer + c_schema[0].release = NULL + return PyCapsule_New(c_schema[0], 'arrow_schema', &pycapsule_schema_deleter) + + +cdef void pycapsule_array_deleter(object array_capsule) noexcept: + cdef: + ArrowArray* array + # Do not invoke the deleter on a used/moved capsule + array = cpython.PyCapsule_GetPointer( + array_capsule, 'arrow_array' + ) + if array.release != NULL: + array.release(array) + + free(array) + +cdef object alloc_c_array(ArrowArray** c_array): + c_array[0] = malloc(sizeof(ArrowArray)) + # Ensure the capsule destructor doesn't call a random release pointer + c_array[0].release = NULL + return PyCapsule_New(c_array[0], 'arrow_array', &pycapsule_array_deleter) + + +cdef void pycapsule_stream_deleter(object stream_capsule) noexcept: + cdef: + ArrowArrayStream* stream + # Do not invoke the deleter on a used/moved capsule + stream = PyCapsule_GetPointer( + stream_capsule, 'arrow_array_stream' + ) + if stream.release != NULL: + stream.release(stream) + + free(stream) + +cdef object alloc_c_stream(ArrowArrayStream** c_stream): + c_stream[0] = malloc(sizeof(ArrowArrayStream)) + # Ensure the capsule destructor doesn't call a random release pointer + c_stream[0].release = NULL + return PyCapsule_New(c_stream[0], 'arrow_array_stream', &pycapsule_stream_deleter) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/types.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/types.py new file mode 100644 index 0000000000000000000000000000000000000000..5d7dbe4b451b962780659451c830a6736a7cb9eb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/types.py @@ -0,0 +1,293 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Tools for dealing with Arrow type metadata in Python + + +from pyarrow.lib import (is_boolean_value, # noqa + is_integer_value, + is_float_value) + +import pyarrow.lib as lib +from pyarrow.util import doc + + +_SIGNED_INTEGER_TYPES = {lib.Type_INT8, lib.Type_INT16, lib.Type_INT32, + lib.Type_INT64} +_UNSIGNED_INTEGER_TYPES = {lib.Type_UINT8, lib.Type_UINT16, lib.Type_UINT32, + lib.Type_UINT64} +_INTEGER_TYPES = _SIGNED_INTEGER_TYPES | _UNSIGNED_INTEGER_TYPES +_FLOATING_TYPES = {lib.Type_HALF_FLOAT, lib.Type_FLOAT, lib.Type_DOUBLE} +_DECIMAL_TYPES = {lib.Type_DECIMAL128, lib.Type_DECIMAL256} +_DATE_TYPES = {lib.Type_DATE32, lib.Type_DATE64} +_TIME_TYPES = {lib.Type_TIME32, lib.Type_TIME64} +_INTERVAL_TYPES = {lib.Type_INTERVAL_MONTH_DAY_NANO} +_TEMPORAL_TYPES = ({lib.Type_TIMESTAMP, + lib.Type_DURATION} | _TIME_TYPES | _DATE_TYPES | + _INTERVAL_TYPES) +_UNION_TYPES = {lib.Type_SPARSE_UNION, lib.Type_DENSE_UNION} +_NESTED_TYPES = {lib.Type_LIST, lib.Type_LARGE_LIST, lib.Type_STRUCT, + lib.Type_MAP} | _UNION_TYPES + + +@doc(datatype="null") +def is_null(t): + """ + Return True if value is an instance of type: {datatype}. + + Parameters + ---------- + t : DataType + """ + return t.id == lib.Type_NA + + +@doc(is_null, datatype="boolean") +def is_boolean(t): + return t.id == lib.Type_BOOL + + +@doc(is_null, datatype="any integer") +def is_integer(t): + return t.id in _INTEGER_TYPES + + +@doc(is_null, datatype="signed integer") +def is_signed_integer(t): + return t.id in _SIGNED_INTEGER_TYPES + + +@doc(is_null, datatype="unsigned integer") +def is_unsigned_integer(t): + return t.id in _UNSIGNED_INTEGER_TYPES + + +@doc(is_null, datatype="int8") +def is_int8(t): + return t.id == lib.Type_INT8 + + +@doc(is_null, datatype="int16") +def is_int16(t): + return t.id == lib.Type_INT16 + + +@doc(is_null, datatype="int32") +def is_int32(t): + return t.id == lib.Type_INT32 + + +@doc(is_null, datatype="int64") +def is_int64(t): + return t.id == lib.Type_INT64 + + +@doc(is_null, datatype="uint8") +def is_uint8(t): + return t.id == lib.Type_UINT8 + + +@doc(is_null, datatype="uint16") +def is_uint16(t): + return t.id == lib.Type_UINT16 + + +@doc(is_null, datatype="uint32") +def is_uint32(t): + return t.id == lib.Type_UINT32 + + +@doc(is_null, datatype="uint64") +def is_uint64(t): + return t.id == lib.Type_UINT64 + + +@doc(is_null, datatype="floating point numeric") +def is_floating(t): + return t.id in _FLOATING_TYPES + + +@doc(is_null, datatype="float16 (half-precision)") +def is_float16(t): + return t.id == lib.Type_HALF_FLOAT + + +@doc(is_null, datatype="float32 (single precision)") +def is_float32(t): + return t.id == lib.Type_FLOAT + + +@doc(is_null, datatype="float64 (double precision)") +def is_float64(t): + return t.id == lib.Type_DOUBLE + + +@doc(is_null, datatype="list") +def is_list(t): + return t.id == lib.Type_LIST + + +@doc(is_null, datatype="large list") +def is_large_list(t): + return t.id == lib.Type_LARGE_LIST + + +@doc(is_null, datatype="fixed size list") +def is_fixed_size_list(t): + return t.id == lib.Type_FIXED_SIZE_LIST + + +@doc(is_null, datatype="struct") +def is_struct(t): + return t.id == lib.Type_STRUCT + + +@doc(is_null, datatype="union") +def is_union(t): + return t.id in _UNION_TYPES + + +@doc(is_null, datatype="nested type") +def is_nested(t): + return t.id in _NESTED_TYPES + + +@doc(is_null, datatype="run-end encoded") +def is_run_end_encoded(t): + return t.id == lib.Type_RUN_END_ENCODED + + +@doc(is_null, datatype="date, time, timestamp or duration") +def is_temporal(t): + return t.id in _TEMPORAL_TYPES + + +@doc(is_null, datatype="timestamp") +def is_timestamp(t): + return t.id == lib.Type_TIMESTAMP + + +@doc(is_null, datatype="duration") +def is_duration(t): + return t.id == lib.Type_DURATION + + +@doc(is_null, datatype="time") +def is_time(t): + return t.id in _TIME_TYPES + + +@doc(is_null, datatype="time32") +def is_time32(t): + return t.id == lib.Type_TIME32 + + +@doc(is_null, datatype="time64") +def is_time64(t): + return t.id == lib.Type_TIME64 + + +@doc(is_null, datatype="variable-length binary") +def is_binary(t): + return t.id == lib.Type_BINARY + + +@doc(is_null, datatype="large variable-length binary") +def is_large_binary(t): + return t.id == lib.Type_LARGE_BINARY + + +@doc(method="is_string") +def is_unicode(t): + """ + Alias for {method}. + + Parameters + ---------- + t : DataType + """ + return is_string(t) + + +@doc(is_null, datatype="string (utf8 unicode)") +def is_string(t): + return t.id == lib.Type_STRING + + +@doc(is_unicode, method="is_large_string") +def is_large_unicode(t): + return is_large_string(t) + + +@doc(is_null, datatype="large string (utf8 unicode)") +def is_large_string(t): + return t.id == lib.Type_LARGE_STRING + + +@doc(is_null, datatype="fixed size binary") +def is_fixed_size_binary(t): + return t.id == lib.Type_FIXED_SIZE_BINARY + + +@doc(is_null, datatype="date") +def is_date(t): + return t.id in _DATE_TYPES + + +@doc(is_null, datatype="date32 (days)") +def is_date32(t): + return t.id == lib.Type_DATE32 + + +@doc(is_null, datatype="date64 (milliseconds)") +def is_date64(t): + return t.id == lib.Type_DATE64 + + +@doc(is_null, datatype="map") +def is_map(t): + return t.id == lib.Type_MAP + + +@doc(is_null, datatype="decimal") +def is_decimal(t): + return t.id in _DECIMAL_TYPES + + +@doc(is_null, datatype="decimal128") +def is_decimal128(t): + return t.id == lib.Type_DECIMAL128 + + +@doc(is_null, datatype="decimal256") +def is_decimal256(t): + return t.id == lib.Type_DECIMAL256 + + +@doc(is_null, datatype="dictionary-encoded") +def is_dictionary(t): + return t.id == lib.Type_DICTIONARY + + +@doc(is_null, datatype="interval") +def is_interval(t): + return t.id == lib.Type_INTERVAL_MONTH_DAY_NANO + + +@doc(is_null, datatype="primitive type") +def is_primitive(t): + return lib._is_primitive(t.id) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/util.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/util.py new file mode 100644 index 0000000000000000000000000000000000000000..bb693cd6637d108fff5816a50f9241dbb74b91a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/util.py @@ -0,0 +1,230 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Miscellaneous utility code + +import os +import contextlib +import functools +import gc +import socket +import sys +import textwrap +import types +import warnings + + +_DEPR_MSG = ( + "pyarrow.{} is deprecated as of {}, please use pyarrow.{} instead." +) + + +def doc(*docstrings, **params): + """ + A decorator that takes docstring templates, concatenates them, and finally + performs string substitution on them. + This decorator will add a variable "_docstring_components" to the wrapped + callable to keep track of the original docstring template for potential future use. + If the docstring is a template, it will be saved as a string. + Otherwise, it will be saved as a callable and the docstring will be obtained via + the __doc__ attribute. + This decorator cannot be used on Cython classes due to a CPython constraint, + which enforces the __doc__ attribute to be read-only. + See https://github.com/python/cpython/issues/91309 + + Parameters + ---------- + *docstrings : None, str, or callable + The string / docstring / docstring template to be prepended in order + before the default docstring under the callable. + **params + The key/value pairs used to format the docstring template. + """ + + def decorator(decorated): + docstring_components = [] + + # collect docstrings and docstring templates + for docstring in docstrings: + if docstring is None: + continue + if hasattr(docstring, "_docstring_components"): + docstring_components.extend( + docstring._docstring_components + ) + elif isinstance(docstring, str) or docstring.__doc__: + docstring_components.append(docstring) + + # append the callable's docstring last + if decorated.__doc__: + docstring_components.append(textwrap.dedent(decorated.__doc__)) + + params_applied = [ + component.format(**params) + if isinstance(component, str) and len(params) > 0 + else component + for component in docstring_components + ] + + decorated.__doc__ = "".join( + [ + component + if isinstance(component, str) + else textwrap.dedent(component.__doc__ or "") + for component in params_applied + ] + ) + + decorated._docstring_components = ( + docstring_components + ) + return decorated + + return decorator + + +def _deprecate_api(old_name, new_name, api, next_version, type=FutureWarning): + msg = _DEPR_MSG.format(old_name, next_version, new_name) + + def wrapper(*args, **kwargs): + warnings.warn(msg, type) + return api(*args, **kwargs) + return wrapper + + +def _deprecate_class(old_name, new_class, next_version, + instancecheck=True): + """ + Raise warning if a deprecated class is used in an isinstance check. + """ + class _DeprecatedMeta(type): + def __instancecheck__(self, other): + warnings.warn( + _DEPR_MSG.format(old_name, next_version, new_class.__name__), + FutureWarning, + stacklevel=2 + ) + return isinstance(other, new_class) + + return _DeprecatedMeta(old_name, (new_class,), {}) + + +def _is_iterable(obj): + try: + iter(obj) + return True + except TypeError: + return False + + +def _is_path_like(path): + return isinstance(path, str) or hasattr(path, '__fspath__') + + +def _stringify_path(path): + """ + Convert *path* to a string or unicode path if possible. + """ + if isinstance(path, str): + return os.path.expanduser(path) + + # checking whether path implements the filesystem protocol + try: + return os.path.expanduser(path.__fspath__()) + except AttributeError: + pass + + raise TypeError("not a path-like object") + + +def product(seq): + """ + Return a product of sequence items. + """ + return functools.reduce(lambda a, b: a*b, seq, 1) + + +def get_contiguous_span(shape, strides, itemsize): + """ + Return a contiguous span of N-D array data. + + Parameters + ---------- + shape : tuple + strides : tuple + itemsize : int + Specify array shape data + + Returns + ------- + start, end : int + The span end points. + """ + if not strides: + start = 0 + end = itemsize * product(shape) + else: + start = 0 + end = itemsize + for i, dim in enumerate(shape): + if dim == 0: + start = end = 0 + break + stride = strides[i] + if stride > 0: + end += stride * (dim - 1) + elif stride < 0: + start += stride * (dim - 1) + if end - start != itemsize * product(shape): + raise ValueError('array data is non-contiguous') + return start, end + + +def find_free_port(): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + with contextlib.closing(sock) as sock: + sock.bind(('', 0)) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return sock.getsockname()[1] + + +def guid(): + from uuid import uuid4 + return uuid4().hex + + +def _break_traceback_cycle_from_frame(frame): + # Clear local variables in all inner frames, so as to break the + # reference cycle. + this_frame = sys._getframe(0) + refs = gc.get_referrers(frame) + while refs: + for frame in refs: + if frame is not this_frame and isinstance(frame, types.FrameType): + break + else: + # No frame found in referrers (finished?) + break + refs = None + # Clear the frame locals, to try and break the cycle (it is + # somewhere along the chain of execution frames). + frame.clear() + # To visit the inner frame, we need to find it among the + # referrers of this frame (while `frame.f_back` would let + # us visit the outer frame). + refs = gc.get_referrers(frame) + refs = frame = this_frame = None