diff --git a/.gitattributes b/.gitattributes index 986a27685dd3936dc4eada38d32c6fff1ed169aa..08954eb30c6016e30775fe5b224e7d877066ab47 100644 --- a/.gitattributes +++ b/.gitattributes @@ -184,3 +184,4 @@ env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs d env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1500 filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1500 filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1500 filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h new file mode 100644 index 0000000000000000000000000000000000000000..7e857bf20568ee12f9eab76a61dcdc49b5f4e6d8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h @@ -0,0 +1,285 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" +#include "arrow/visitor.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// User array accessor types + +/// \brief Array base type +/// Immutable data array with some logical type and some length. +/// +/// Any memory is owned by the respective Buffer instance (or its parents). +/// +/// The base class is only required to have a null bitmap buffer if the null +/// count is greater than 0 +/// +/// If known, the null count can be provided in the base Array constructor. If +/// the null count is not known, pass -1 to indicate that the null count is to +/// be computed on the first call to null_count() +class ARROW_EXPORT Array { + public: + virtual ~Array() = default; + + /// \brief Return true if value at index is null. Does not boundscheck + bool IsNull(int64_t i) const { return !IsValid(i); } + + /// \brief Return true if value at index is valid (not null). Does not + /// boundscheck + bool IsValid(int64_t i) const { + if (null_bitmap_data_ != NULLPTR) { + return bit_util::GetBit(null_bitmap_data_, i + data_->offset); + } + // Dispatching with a few conditionals like this makes IsNull more + // efficient for how it is used in practice. Making IsNull virtual + // would add a vtable lookup to every call and prevent inlining + + // a potential inner-branch removal. + if (type_id() == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*data_, i); + } + if (type_id() == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*data_, i); + } + if (type_id() == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*data_, i); + } + return data_->null_count != data_->length; + } + + /// \brief Return a Scalar containing the value of this array at i + Result> GetScalar(int64_t i) const; + + /// Size in the number of elements this array contains. + int64_t length() const { return data_->length; } + + /// A relative position into another array's data, to enable zero-copy + /// slicing. This value defaults to zero + int64_t offset() const { return data_->offset; } + + /// The number of null entries in the array. If the null count was not known + /// at time of construction (and set to a negative value), then the null + /// count will be computed and cached on the first invocation of this + /// function + int64_t null_count() const; + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// null_count(). For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + const std::shared_ptr& type() const { return data_->type; } + Type::type type_id() const { return data_->type->id(); } + + /// Buffer for the validity (null) bitmap, if any. Note that Union types + /// never have a null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const std::shared_ptr& null_bitmap() const { return data_->buffers[0]; } + + /// Raw pointer to the null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const uint8_t* null_bitmap_data() const { return null_bitmap_data_; } + + /// Equality comparison with another array + bool Equals(const Array& arr, const EqualOptions& = EqualOptions::Defaults()) const; + bool Equals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Return the formatted unified diff of arrow::Diff between this + /// Array and another Array + std::string Diff(const Array& other) const; + + /// Approximate equality comparison with another array + /// + /// epsilon is only used if this is FloatArray or DoubleArray + bool ApproxEquals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + bool ApproxEquals(const Array& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// Compare if the range of slots specified are equal for the given array and + /// this array. end_idx exclusive. This methods does not bounds check. + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const Array& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const std::shared_ptr& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const Array& other, int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const std::shared_ptr& other, int64_t start_idx, + int64_t end_idx, int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Apply the ArrayVisitor::Visit() method specialized to the array type + Status Accept(ArrayVisitor* visitor) const; + + /// Construct a zero-copy view of this array with the given type. + /// + /// This method checks if the types are layout-compatible. + /// Nested types are traversed in depth-first order. Data buffers must have + /// the same item sizes, even though the logical types may be different. + /// An error is returned if the types are not layout-compatible. + Result> View(const std::shared_ptr& type) const; + + /// Construct a zero-copy slice of the array with the indicated offset and + /// length + /// + /// \param[in] offset the position of the first element in the constructed + /// slice + /// \param[in] length the length of the slice. If there are not enough + /// elements in the array, the length will be adjusted accordingly + /// + /// \return a new object wrapped in std::shared_ptr + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// Slice from offset until end of the array + std::shared_ptr Slice(int64_t offset) const; + + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset, int64_t length) const; + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset) const; + + const std::shared_ptr& data() const { return data_; } + + int num_fields() const { return static_cast(data_->child_data.size()); } + + /// \return PrettyPrint representation of array suitable for debugging + std::string ToString() const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the array's internal data. + /// + /// This is O(k) where k is the number of descendents. + /// + /// \return Status + Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the array's internal data. + /// + /// This is potentially O(k*n) where k is the number of descendents and n + /// is the array length. + /// + /// \return Status + Status ValidateFull() const; + + protected: + Array() = default; + ARROW_DEFAULT_MOVE_AND_ASSIGN(Array); + + std::shared_ptr data_; + const uint8_t* null_bitmap_data_ = NULLPTR; + + /// Protected method for constructors + void SetData(const std::shared_ptr& data) { + if (data->buffers.size() > 0) { + null_bitmap_data_ = data->GetValuesSafe(0, /*offset=*/0); + } else { + null_bitmap_data_ = NULLPTR; + } + data_ = data; + } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(Array); + + ARROW_FRIEND_EXPORT friend void PrintTo(const Array& x, std::ostream* os); +}; + +static inline std::ostream& operator<<(std::ostream& os, const Array& x) { + os << x.ToString(); + return os; +} + +/// Base class for non-nested arrays +class ARROW_EXPORT FlatArray : public Array { + protected: + using Array::Array; +}; + +/// Base class for arrays of fixed-size logical types +class ARROW_EXPORT PrimitiveArray : public FlatArray { + public: + PrimitiveArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// Does not account for any slice offset + const std::shared_ptr& values() const { return data_->buffers[1]; } + + protected: + PrimitiveArray() : raw_values_(NULLPTR) {} + + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_values_ = data->GetValuesSafe(1, /*offset=*/0); + } + + explicit PrimitiveArray(const std::shared_ptr& data) { SetData(data); } + + const uint8_t* raw_values_; +}; + +/// Degenerate null type Array +class ARROW_EXPORT NullArray : public FlatArray { + public: + using TypeClass = NullType; + + explicit NullArray(const std::shared_ptr& data) { SetData(data); } + explicit NullArray(int64_t length); + + private: + void SetData(const std::shared_ptr& data) { + null_bitmap_data_ = NULLPTR; + data->null_count = data->length; + data_ = data; + } +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..fd68a379ddbfb02e70492d4281715a06c9da46c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h @@ -0,0 +1,329 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for Binary, LargeBinart, String, LargeString, +// FixedSizeBinary + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/stl_iterator.h" +#include "arrow/type.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +/// Base class for variable-sized binary arrays, regardless of offset size +/// and logical interpretation. +template +class BaseBinaryArray : public FlatArray { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + using IteratorType = stl::ArrayIterator>; + + /// Return the pointer to the given elements bytes + // XXX should GetValue(int64_t i) return a string_view? + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + // Account for base offset + i += data_->offset; + const offset_type pos = raw_value_offsets_[i]; + *out_length = raw_value_offsets_[i + 1] - pos; + return raw_data_ + pos; + } + + /// \brief Get binary value as a string_view + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view GetView(int64_t i) const { + // Account for base offset + i += data_->offset; + const offset_type pos = raw_value_offsets_[i]; + return std::string_view(reinterpret_cast(raw_data_ + pos), + raw_value_offsets_[i + 1] - pos); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + /// \brief Get binary value as a string_view + /// Provided for consistency with other arrays. + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view Value(int64_t i) const { return GetView(i); } + + /// \brief Get binary value as a std::string + /// + /// \param i the value index + /// \return the value copied into a std::string + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_offsets() const { return data_->buffers[1]; } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_data() const { return data_->buffers[2]; } + + const offset_type* raw_value_offsets() const { + return raw_value_offsets_ + data_->offset; + } + + const uint8_t* raw_data() const { return raw_data_; } + + /// \brief Return the data buffer absolute offset of the data for the value + /// at the passed index. + /// + /// Does not perform boundschecking + offset_type value_offset(int64_t i) const { + return raw_value_offsets_[i + data_->offset]; + } + + /// \brief Return the length of the data for the value at the passed index. + /// + /// Does not perform boundschecking + offset_type value_length(int64_t i) const { + i += data_->offset; + return raw_value_offsets_[i + 1] - raw_value_offsets_[i]; + } + + /// \brief Return the total length of the memory in the data buffer + /// referenced by this array. If the array has been sliced then this may be + /// less than the size of the data buffer (data_->buffers[2]). + offset_type total_values_length() const { + if (data_->length > 0) { + return raw_value_offsets_[data_->length + data_->offset] - + raw_value_offsets_[data_->offset]; + } else { + return 0; + } + } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + // For subclasses + BaseBinaryArray() = default; + + // Protected method for constructors + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_value_offsets_ = data->GetValuesSafe(1, /*offset=*/0); + raw_data_ = data->GetValuesSafe(2, /*offset=*/0); + } + + const offset_type* raw_value_offsets_ = NULLPTR; + const uint8_t* raw_data_ = NULLPTR; +}; + +/// Concrete Array class for variable-size binary data +class ARROW_EXPORT BinaryArray : public BaseBinaryArray { + public: + explicit BinaryArray(const std::shared_ptr& data); + + BinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as StringArray + BinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for variable-size string (utf-8) data +class ARROW_EXPORT StringArray : public BinaryArray { + public: + using TypeClass = StringType; + + explicit StringArray(const std::shared_ptr& data); + + StringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +/// Concrete Array class for large variable-size binary data +class ARROW_EXPORT LargeBinaryArray : public BaseBinaryArray { + public: + explicit LargeBinaryArray(const std::shared_ptr& data); + + LargeBinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as LargeStringArray + LargeBinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for large variable-size string (utf-8) data +class ARROW_EXPORT LargeStringArray : public LargeBinaryArray { + public: + using TypeClass = LargeStringType; + + explicit LargeStringArray(const std::shared_ptr& data); + + LargeStringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// BinaryView and StringView + +/// Concrete Array class for variable-size binary view data using the +/// BinaryViewType::c_type struct to reference in-line or out-of-line string values +class ARROW_EXPORT BinaryViewArray : public FlatArray { + public: + using TypeClass = BinaryViewType; + using IteratorType = stl::ArrayIterator; + using c_type = BinaryViewType::c_type; + + explicit BinaryViewArray(std::shared_ptr data); + + BinaryViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr views, BufferVector data_buffers, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + // For API compatibility with BinaryArray etc. + std::string_view GetView(int64_t i) const; + std::string GetString(int64_t i) const { return std::string{GetView(i)}; } + + const auto& values() const { return data_->buffers[1]; } + const c_type* raw_values() const { return raw_values_; } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + IteratorType begin() const { return IteratorType(*this); } + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using FlatArray::FlatArray; + + void SetData(std::shared_ptr data) { + FlatArray::SetData(std::move(data)); + raw_values_ = data_->GetValuesSafe(1); + } + + const c_type* raw_values_; +}; + +/// Concrete Array class for variable-size string view (utf-8) data using +/// BinaryViewType::c_type to reference in-line or out-of-line string values +class ARROW_EXPORT StringViewArray : public BinaryViewArray { + public: + using TypeClass = StringViewType; + + explicit StringViewArray(std::shared_ptr data); + + using BinaryViewArray::BinaryViewArray; + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// Fixed width binary + +/// Concrete Array class for fixed-size binary data +class ARROW_EXPORT FixedSizeBinaryArray : public PrimitiveArray { + public: + using TypeClass = FixedSizeBinaryType; + using IteratorType = stl::ArrayIterator; + + explicit FixedSizeBinaryArray(const std::shared_ptr& data); + + FixedSizeBinaryArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const uint8_t* GetValue(int64_t i) const; + const uint8_t* Value(int64_t i) const { return GetValue(i); } + + std::string_view GetView(int64_t i) const { + return std::string_view(reinterpret_cast(GetValue(i)), byte_width()); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + int32_t byte_width() const { return byte_width_; } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width_; } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + void SetData(const std::shared_ptr& data) { + this->PrimitiveArray::SetData(data); + byte_width_ = + internal::checked_cast(*type()).byte_width(); + } + + int32_t byte_width_; +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..f14812549089ad9c2f6d0e80c8cc17e05d6252c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/array_binary.h" +#include "arrow/array/data.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Decimal128Array + +/// Concrete Array class for 128-bit decimal data +class ARROW_EXPORT Decimal128Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal128Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal128Array from ArrayData instance + explicit Decimal128Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +// Backward compatibility +using DecimalArray = Decimal128Array; + +// ---------------------------------------------------------------------- +// Decimal256Array + +/// Concrete Array class for 256-bit decimal data +class ARROW_EXPORT Decimal256Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal256Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal256Array from ArrayData instance + explicit Decimal256Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..9aa0a7bcc2d6694404dd3bda8694ccaabe60c89c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// DictionaryArray + +/// \brief Array type for dictionary-encoded data with a +/// data-dependent dictionary +/// +/// A dictionary array contains an array of non-negative integers (the +/// "dictionary indices") along with a data type containing a "dictionary" +/// corresponding to the distinct values represented in the data. +/// +/// For example, the array +/// +/// ["foo", "bar", "foo", "bar", "foo", "bar"] +/// +/// with dictionary ["bar", "foo"], would have dictionary array representation +/// +/// indices: [1, 0, 1, 0, 1, 0] +/// dictionary: ["bar", "foo"] +/// +/// The indices in principle may be any integer type. +class ARROW_EXPORT DictionaryArray : public Array { + public: + using TypeClass = DictionaryType; + + explicit DictionaryArray(const std::shared_ptr& data); + + DictionaryArray(const std::shared_ptr& type, + const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + /// \brief Construct DictionaryArray from dictionary and indices + /// array and validate + /// + /// This function does the validation of the indices and input type. It checks if + /// all indices are non-negative and smaller than the size of the dictionary. + /// + /// \param[in] type a dictionary type + /// \param[in] dictionary the dictionary with same value type as the + /// type object + /// \param[in] indices an array of non-negative integers smaller than the + /// size of the dictionary + static Result> FromArrays( + const std::shared_ptr& type, const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + static Result> FromArrays( + const std::shared_ptr& indices, const std::shared_ptr& dictionary) { + return FromArrays(::arrow::dictionary(indices->type(), dictionary->type()), indices, + dictionary); + } + + /// \brief Transpose this DictionaryArray + /// + /// This method constructs a new dictionary array with the given dictionary + /// type, transposing indices using the transpose map. The type and the + /// transpose map are typically computed using DictionaryUnifier. + /// + /// \param[in] type the new type object + /// \param[in] dictionary the new dictionary + /// \param[in] transpose_map transposition array of this array's indices + /// into the target array's indices + /// \param[in] pool a pool to allocate the array data from + Result> Transpose( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + const int32_t* transpose_map, MemoryPool* pool = default_memory_pool()) const; + + Result> Compact(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Determine whether dictionary arrays may be compared without unification + bool CanCompareIndices(const DictionaryArray& other) const; + + /// \brief Return the dictionary for this array, which is stored as + /// a member of the ArrayData internal structure + const std::shared_ptr& dictionary() const; + const std::shared_ptr& indices() const; + + /// \brief Return the ith value of indices, cast to int64_t. Not recommended + /// for use in performance-sensitive code. Does not validate whether the + /// value is null or out-of-bounds. + int64_t GetValueIndex(int64_t i) const; + + const DictionaryType* dict_type() const { return dict_type_; } + + private: + void SetData(const std::shared_ptr& data); + const DictionaryType* dict_type_; + std::shared_ptr indices_; + + // Lazily initialized when invoking dictionary() + mutable std::shared_ptr dictionary_; +}; + +/// \brief Helper class for incremental dictionary unification +class ARROW_EXPORT DictionaryUnifier { + public: + virtual ~DictionaryUnifier() = default; + + /// \brief Construct a DictionaryUnifier + /// \param[in] value_type the data type of the dictionaries + /// \param[in] pool MemoryPool to use for memory allocations + static Result> Make( + std::shared_ptr value_type, MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries accross array chunks + /// + /// The dictionaries in the array chunks will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyChunkedArray( + const std::shared_ptr& array, + MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries accross the chunks of each table column + /// + /// The dictionaries in each table column will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyTable( + const Table& table, MemoryPool* pool = default_memory_pool()); + + /// \brief Append dictionary to the internal memo + virtual Status Unify(const Array& dictionary) = 0; + + /// \brief Append dictionary and compute transpose indices + /// \param[in] dictionary the dictionary values to unify + /// \param[out] out_transpose a Buffer containing computed transpose indices + /// as int32_t values equal in length to the passed dictionary. The value in + /// each slot corresponds to the new index value for each original index + /// for a DictionaryArray with the old dictionary + virtual Status Unify(const Array& dictionary, + std::shared_ptr* out_transpose) = 0; + + /// \brief Return a result DictionaryType with the smallest possible index + /// type to accommodate the unified dictionary. The unifier cannot be used + /// after this is called + virtual Status GetResult(std::shared_ptr* out_type, + std::shared_ptr* out_dict) = 0; + + /// \brief Return a unified dictionary with the given index type. If + /// the index type is not large enough then an invalid status will be returned. + /// The unifier cannot be used after this is called + virtual Status GetResultWithIndexType(const std::shared_ptr& index_type, + std::shared_ptr* out_dict) = 0; +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h new file mode 100644 index 0000000000000000000000000000000000000000..768a630e0af54da969c1dd9a00de75e7fada8b3c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h @@ -0,0 +1,863 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for List, LargeList, ListView, LargeListView, FixedSizeList, +// Map, Struct, and Union + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// VarLengthListLikeArray + +template +class VarLengthListLikeArray; + +namespace internal { + +// Private helper for [Large]List[View]Array::SetData. +// Unfortunately, trying to define VarLengthListLikeArray::SetData outside of this header +// doesn't play well with MSVC. +template +void SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id = TYPE::type_id); + +} // namespace internal + +/// Base class for variable-sized list and list-view arrays, regardless of offset size. +template +class VarLengthListLikeArray : public Array { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + const TypeClass* var_length_list_like_type() const { return this->list_type_; } + + /// \brief Return array object containing the list's values + /// + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& values() const { return values_; } + + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_offsets() const { return data_->buffers[1]; } + + const std::shared_ptr& value_type() const { return list_type_->value_type(); } + + /// Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_offsets() const { + return raw_value_offsets_ + data_->offset; + } + + // The following functions will not perform boundschecking + + offset_type value_offset(int64_t i) const { + return raw_value_offsets_[i + data_->offset]; + } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists and list-views are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + virtual offset_type value_length(int64_t i) const = 0; + + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + protected: + friend void internal::SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id); + + const TypeClass* list_type_ = NULLPTR; + std::shared_ptr values_; + const offset_type* raw_value_offsets_ = NULLPTR; +}; + +// ---------------------------------------------------------------------- +// ListArray / LargeListArray + +template +class BaseListArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_type() const { return this->var_length_list_like_type(); } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { + i += this->data_->offset; + return this->raw_value_offsets_[i + 1] - this->raw_value_offsets_[i]; + } +}; + +/// Concrete Array class for list data +class ARROW_EXPORT ListArray : public BaseListArray { + public: + explicit ListArray(std::shared_ptr data); + + ListArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListArray from a ListViewArray + static Result> FromListView(const ListViewArray& source, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + protected: + // This constructor defers SetData to a derived array class + ListArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// Concrete Array class for large list data (with 64-bit offsets) +class ARROW_EXPORT LargeListArray : public BaseListArray { + public: + explicit LargeListArray(const std::shared_ptr& data); + + LargeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int64 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListArray from a LargeListViewArray + static Result> FromListView( + const LargeListViewArray& source, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int64Array + std::shared_ptr offsets() const; + + protected: + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// ListViewArray / LargeListViewArray + +template +class BaseListViewArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_view_type() const { return this->var_length_list_like_type(); } + + /// \brief Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_sizes() const { return this->data_->buffers[2]; } + + /// \brief Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_sizes() const { + return raw_value_sizes_ + this->data_->offset; + } + + /// \brief Return the size of the value at a particular index + /// + /// This should not be called if the list-view at slot i is null. + /// The returned size in those cases could be any value from 0 to the + /// length of the child values array. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { + return this->raw_value_sizes_[i + this->data_->offset]; + } + + protected: + const offset_type* raw_value_sizes_ = NULLPTR; +}; + +/// \brief Concrete Array class for list-view data +class ARROW_EXPORT ListViewArray : public BaseListViewArray { + public: + explicit ListViewArray(std::shared_ptr data); + + ListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct a ListViewArray using buffers from offsets and sizes arrays + /// that project views into the child values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the + /// offsets's null bitmap. But if a null_bitmap is provided, the offsets array + /// can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int32 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int32 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListViewArray from a ListArray + static Result> FromList(const ListArray& list_array, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the list-views in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + /// + /// This function invokes Concatenate() if list-views are non-contiguous. It + /// will try to minimize the number of array slices passed to Concatenate() by + /// maximizing the size of each slice (containing as many contiguous + /// list-views as possible). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + ListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// \brief Concrete Array class for large list-view data (with 64-bit offsets +/// and sizes) +class ARROW_EXPORT LargeListViewArray : public BaseListViewArray { + public: + explicit LargeListViewArray(std::shared_ptr data); + + LargeListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct an LargeListViewArray using buffers from offsets and sizes arrays + /// that project views into the values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' or + /// sizes' null bitmap. Only one of these two is allowed to have a null bitmap. But if a + /// null_bitmap is provided, the offsets array and the sizes array can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int64 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int64 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListViewArray from a LargeListArray + static Result> FromList( + const LargeListArray& list_array, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the large list-views in this + /// array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + LargeListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// MapArray + +/// Concrete Array class for map data +/// +/// NB: "value" in this context refers to a pair of a key and the corresponding item +class ARROW_EXPORT MapArray : public ListArray { + public: + using TypeClass = MapType; + + explicit MapArray(const std::shared_ptr& data); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, BufferVector buffers, + const std::shared_ptr& keys, const std::shared_ptr& items, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct MapArray from array of offsets and child key, item arrays + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] keys Array containing key values + /// \param[in] items Array containing item values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + static Result> FromArrays( + const std::shared_ptr& offsets, const std::shared_ptr& keys, + const std::shared_ptr& items, MemoryPool* pool = default_memory_pool()); + + static Result> FromArrays( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool = default_memory_pool()); + + const MapType* map_type() const { return map_type_; } + + /// \brief Return array object containing all map keys + const std::shared_ptr& keys() const { return keys_; } + + /// \brief Return array object containing all mapped items + const std::shared_ptr& items() const { return items_; } + + /// Validate child data before constructing the actual MapArray. + static Status ValidateChildData( + const std::vector>& child_data); + + protected: + void SetData(const std::shared_ptr& data); + + static Result> FromArraysInternal( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool); + + private: + const MapType* map_type_; + std::shared_ptr keys_, items_; +}; + +// ---------------------------------------------------------------------- +// FixedSizeListArray + +/// Concrete Array class for fixed size list data +class ARROW_EXPORT FixedSizeListArray : public Array { + public: + using TypeClass = FixedSizeListType; + using offset_type = TypeClass::offset_type; + + explicit FixedSizeListArray(const std::shared_ptr& data); + + FixedSizeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const FixedSizeListType* list_type() const; + + /// \brief Return array object containing the list's values + const std::shared_ptr& values() const; + + const std::shared_ptr& value_type() const; + + // The following functions will not perform boundschecking + int64_t value_offset(int64_t i) const { + i += data_->offset; + return list_size_ * i; + } + /// \brief Return the fixed-size of the values + /// + /// No matter the value of the index parameter, the result is the same. + /// So even when the value at slot i is null, this function will return a + /// non-zero size. + /// + /// \pre IsValid(i) + int32_t value_length(int64_t i = 0) const { + ARROW_UNUSED(i); + return list_size_; + } + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration null elements (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Construct FixedSizeListArray from child value array and value_length + /// + /// \param[in] values Array containing list values + /// \param[in] list_size The fixed length of each list + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / list_size + static Result> FromArrays( + const std::shared_ptr& values, int32_t list_size, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Construct FixedSizeListArray from child value array and type + /// + /// \param[in] values Array containing list values + /// \param[in] type The fixed sized list type + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / type.list_size() + static Result> FromArrays( + const std::shared_ptr& values, std::shared_ptr type, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + protected: + void SetData(const std::shared_ptr& data); + int32_t list_size_; + + private: + std::shared_ptr values_; +}; + +// ---------------------------------------------------------------------- +// Struct + +/// Concrete Array class for struct data +class ARROW_EXPORT StructArray : public Array { + public: + using TypeClass = StructType; + + explicit StructArray(const std::shared_ptr& data); + + StructArray(const std::shared_ptr& type, int64_t length, + const std::vector>& children, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and field names. + /// + /// The length and data type are automatically inferred from the arguments. + /// There should be at least one child array. + static Result> Make( + const ArrayVector& children, const std::vector& field_names, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and fields. + /// + /// The length is automatically inferred from the arguments. + /// There should be at least one child array. This method does not + /// check that field types and child array types are consistent. + static Result> Make( + const ArrayVector& children, const FieldVector& fields, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const StructType* struct_type() const; + + // Return a shared pointer in case the requestor desires to share ownership + // with this array. The returned array has its offset, length and null + // count adjusted. + const std::shared_ptr& field(int pos) const; + + const ArrayVector& fields() const; + + /// Returns null if name not found + std::shared_ptr GetFieldByName(const std::string& name) const; + + /// Indicate if field named `name` can be found unambiguously in the struct. + Status CanReferenceFieldByName(const std::string& name) const; + + /// Indicate if fields named `names` can be found unambiguously in the struct. + Status CanReferenceFieldsByNames(const std::vector& names) const; + + /// \brief Flatten this array as a vector of arrays, one for each field + /// + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result Flatten(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Get one of the child arrays, combining its null bitmap + /// with the parent struct array's bitmap. + /// + /// \param[in] index Which child array to get + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + private: + // For caching boxed child data + // XXX This is not handled in a thread-safe manner. + mutable ArrayVector boxed_fields_; +}; + +// ---------------------------------------------------------------------- +// Union + +/// Base class for SparseUnionArray and DenseUnionArray +class ARROW_EXPORT UnionArray : public Array { + public: + using type_code_t = int8_t; + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& type_codes() const { return data_->buffers[1]; } + + const type_code_t* raw_type_codes() const { return raw_type_codes_ + data_->offset; } + + /// The logical type code of the value at index. + type_code_t type_code(int64_t i) const { return raw_type_codes_[i + data_->offset]; } + + /// The physical child id containing value at index. + int child_id(int64_t i) const { + return union_type_->child_ids()[raw_type_codes_[i + data_->offset]]; + } + + const UnionType* union_type() const { return union_type_; } + + UnionMode::type mode() const { return union_type_->mode(); } + + /// \brief Return the given field as an individual array. + /// + /// For sparse unions, the returned array has its offset, length and null + /// count adjusted. + std::shared_ptr field(int pos) const; + + protected: + void SetData(std::shared_ptr data); + + const type_code_t* raw_type_codes_; + const UnionType* union_type_; + + // For caching boxed child data + mutable std::vector> boxed_fields_; +}; + +/// Concrete Array class for sparse union data +class ARROW_EXPORT SparseUnionArray : public UnionArray { + public: + using TypeClass = SparseUnionType; + + explicit SparseUnionArray(std::shared_ptr data); + + SparseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, int64_t offset = 0); + + /// \brief Construct SparseUnionArray from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector type_codes) { + return Make(std::move(type_ids), std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct SparseUnionArray with custom field names from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const SparseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// \brief Get one of the child arrays, adjusting its null bitmap + /// where the union array type code does not match. + /// + /// \param[in] index Which child array to get (i.e. the physical index, not the type + /// code) \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + protected: + void SetData(std::shared_ptr data); +}; + +/// \brief Concrete Array class for dense union data +/// +/// Note that union types do not have a validity bitmap +class ARROW_EXPORT DenseUnionArray : public UnionArray { + public: + using TypeClass = DenseUnionType; + + explicit DenseUnionArray(const std::shared_ptr& data); + + DenseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, + std::shared_ptr value_offsets = NULLPTR, int64_t offset = 0); + + /// \brief Construct DenseUnionArray from type_ids, value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector type_codes) { + return Make(type_ids, value_offsets, std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct DenseUnionArray with custom field names from type_ids, + /// value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const DenseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& value_offsets() const { return data_->buffers[2]; } + + int32_t value_offset(int64_t i) const { return raw_value_offsets_[i + data_->offset]; } + + const int32_t* raw_value_offsets() const { return raw_value_offsets_ + data_->offset; } + + protected: + const int32_t* raw_value_offsets_; + + void SetData(const std::shared_ptr& data); +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h new file mode 100644 index 0000000000000000000000000000000000000000..e6df92e3b788ceaf74d67219b4f88fd97d786724 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h @@ -0,0 +1,202 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor types for primitive/C-type-based arrays, such as numbers, +// boolean, and temporal types. + +#pragma once + +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/stl_iterator.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" // IWYU pragma: export +#include "arrow/type_traits.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// Concrete Array class for boolean data +class ARROW_EXPORT BooleanArray : public PrimitiveArray { + public: + using TypeClass = BooleanType; + using IteratorType = stl::ArrayIterator; + + explicit BooleanArray(const std::shared_ptr& data); + + BooleanArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + bool Value(int64_t i) const { + return bit_util::GetBit(reinterpret_cast(raw_values_), + i + data_->offset); + } + + bool GetView(int64_t i) const { return Value(i); } + + std::optional operator[](int64_t i) const { return *IteratorType(*this, i); } + + /// \brief Return the number of false (0) values among the valid + /// values. Result is not cached. + int64_t false_count() const; + + /// \brief Return the number of true (1) values among the valid + /// values. Result is not cached. + int64_t true_count() const; + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using PrimitiveArray::PrimitiveArray; +}; + +/// \addtogroup numeric-arrays +/// +/// @{ + +/// \brief Concrete Array class for numeric data with a corresponding C type +/// +/// This class is templated on the corresponding DataType subclass for the +/// given data, for example NumericArray or NumericArray. +/// +/// Note that convenience aliases are available for all accepted types +/// (for example Int8Array for NumericArray). +template +class NumericArray : public PrimitiveArray { + public: + using TypeClass = TYPE; + using value_type = typename TypeClass::c_type; + using IteratorType = stl::ArrayIterator>; + + explicit NumericArray(const std::shared_ptr& data) : PrimitiveArray(data) {} + + // Only enable this constructor without a type argument for types without additional + // metadata + template + NumericArray(enable_if_parameter_free length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : PrimitiveArray(TypeTraits::type_singleton(), length, data, null_bitmap, + null_count, offset) {} + + const value_type* raw_values() const { + return reinterpret_cast(raw_values_) + data_->offset; + } + + value_type Value(int64_t i) const { return raw_values()[i]; } + + // For API compatibility with BinaryArray etc. + value_type GetView(int64_t i) const { return Value(i); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using PrimitiveArray::PrimitiveArray; +}; + +/// DayTimeArray +/// --------------------- +/// \brief Array of Day and Millisecond values. +class ARROW_EXPORT DayTimeIntervalArray : public PrimitiveArray { + public: + using TypeClass = DayTimeIntervalType; + using IteratorType = stl::ArrayIterator; + + explicit DayTimeIntervalArray(const std::shared_ptr& data); + + DayTimeIntervalArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + DayTimeIntervalArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + TypeClass::DayMilliseconds GetValue(int64_t i) const; + TypeClass::DayMilliseconds Value(int64_t i) const { return GetValue(i); } + + // For compatibility with Take kernel. + TypeClass::DayMilliseconds GetView(int64_t i) const { return GetValue(i); } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + int32_t byte_width() const { return sizeof(TypeClass::DayMilliseconds); } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width(); } +}; + +/// \brief Array of Month, Day and nanosecond values. +class ARROW_EXPORT MonthDayNanoIntervalArray : public PrimitiveArray { + public: + using TypeClass = MonthDayNanoIntervalType; + using IteratorType = stl::ArrayIterator; + + explicit MonthDayNanoIntervalArray(const std::shared_ptr& data); + + MonthDayNanoIntervalArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MonthDayNanoIntervalArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + TypeClass::MonthDayNanos GetValue(int64_t i) const; + TypeClass::MonthDayNanos Value(int64_t i) const { return GetValue(i); } + + // For compatibility with Take kernel. + TypeClass::MonthDayNanos GetView(int64_t i) const { return GetValue(i); } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + int32_t byte_width() const { return sizeof(TypeClass::MonthDayNanos); } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width(); } +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..9770aa1fbbb1c2ae488f922bb9e245df1ebf9a90 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes run-end encoded arrays + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// RunEndEncoded + +/// \brief Array type for run-end encoded data +class ARROW_EXPORT RunEndEncodedArray : public Array { + private: + std::shared_ptr run_ends_array_; + std::shared_ptr values_array_; + + public: + using TypeClass = RunEndEncodedType; + + explicit RunEndEncodedArray(const std::shared_ptr& data); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the lenght of + /// the child run_ends and values arrays. + RunEndEncodedArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t offset = 0); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the lenght of + /// the child run_ends and values arrays. + static Result> Make( + const std::shared_ptr& type, int64_t logical_length, + const std::shared_ptr& run_ends, const std::shared_ptr& values, + int64_t logical_offset = 0); + + /// \brief Construct a RunEndEncodedArray from values and run ends arrays + /// + /// The data type is automatically inferred from the arguments. + /// The run_ends and values arrays must have the same length. + static Result> Make( + int64_t logical_length, const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t logical_offset = 0); + + protected: + void SetData(const std::shared_ptr& data); + + public: + /// \brief Returns an array holding the logical indexes of each run-end + /// + /// The physical offset to the array is applied. + const std::shared_ptr& run_ends() const { return run_ends_array_; } + + /// \brief Returns an array holding the values of each run + /// + /// The physical offset to the array is applied. + const std::shared_ptr& values() const { return values_array_; } + + /// \brief Returns an array holding the logical indexes of each run end + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array and rewrites all the run end values to be relative to the logical + /// offset and cuts the end of the array to the logical length. + Result> LogicalRunEnds(MemoryPool* pool) const; + + /// \brief Returns an array holding the values of each run + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array containing only the values within the logical range. + std::shared_ptr LogicalValues() const; + + /// \brief Find the physical offset of this REE array + /// + /// This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalOffset() const; + + /// \brief Find the physical length of this REE array + /// + /// The physical length of an REE is the number of physical values (and + /// run-ends) necessary to represent the logical range of values from offset + /// to length. + /// + /// Avoid calling this function if the physical length can be estabilished in + /// some other way (e.g. when iterating over the runs sequentially until the + /// end). This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalLength() const; +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h new file mode 100644 index 0000000000000000000000000000000000000000..0cea571be3e3244741f3df15f87c8958eedddf76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h @@ -0,0 +1,215 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/builder_base.h" +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-builders +/// +/// @{ + +namespace internal { + +class ARROW_EXPORT AdaptiveIntBuilderBase : public ArrayBuilder { + public: + AdaptiveIntBuilderBase(uint8_t start_int_size, MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment); + + explicit AdaptiveIntBuilderBase(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : AdaptiveIntBuilderBase(sizeof(uint8_t), pool, alignment) {} + + /// \brief Append multiple nulls + /// \param[in] length the number of nulls to append + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(CommitPendingData()); + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(Reserve(length)); + memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length); + UnsafeSetNull(length); + } + return Status::OK(); + } + + Status AppendNull() final { + pending_data_[pending_pos_] = 0; + pending_valid_[pending_pos_] = 0; + pending_has_nulls_ = true; + ++pending_pos_; + ++length_; + ++null_count_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(CommitPendingData()); + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(Reserve(length)); + memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length); + UnsafeSetNotNull(length); + } + return Status::OK(); + } + + Status AppendEmptyValue() final { + pending_data_[pending_pos_] = 0; + pending_valid_[pending_pos_] = 1; + ++pending_pos_; + ++length_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + void Reset() override; + Status Resize(int64_t capacity) override; + + protected: + Status AppendInternal(const uint64_t val) { + pending_data_[pending_pos_] = val; + pending_valid_[pending_pos_] = 1; + ++pending_pos_; + ++length_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + virtual Status CommitPendingData() = 0; + + template + typename std::enable_if= sizeof(new_type), Status>::type + ExpandIntSizeInternal(); + template + typename std::enable_if<(sizeof(old_type) < sizeof(new_type)), Status>::type + ExpandIntSizeInternal(); + + std::shared_ptr data_; + uint8_t* raw_data_ = NULLPTR; + + const uint8_t start_int_size_; + uint8_t int_size_; + + static constexpr int32_t pending_size_ = 1024; + uint8_t pending_valid_[pending_size_]; + uint64_t pending_data_[pending_size_]; + int32_t pending_pos_ = 0; + bool pending_has_nulls_ = false; +}; + +} // namespace internal + +class ARROW_EXPORT AdaptiveUIntBuilder : public internal::AdaptiveIntBuilderBase { + public: + explicit AdaptiveUIntBuilder(uint8_t start_int_size, + MemoryPool* pool = default_memory_pool()); + + explicit AdaptiveUIntBuilder(MemoryPool* pool = default_memory_pool()) + : AdaptiveUIntBuilder(sizeof(uint8_t), pool) {} + + using internal::AdaptiveIntBuilderBase::Reset; + + /// Scalar append + Status Append(const uint64_t val) { return AppendInternal(val); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override; + + protected: + Status CommitPendingData() override; + Status ExpandIntSize(uint8_t new_int_size); + + Status AppendValuesInternal(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes); + + template + Status ExpandIntSizeN(); +}; + +class ARROW_EXPORT AdaptiveIntBuilder : public internal::AdaptiveIntBuilderBase { + public: + explicit AdaptiveIntBuilder(uint8_t start_int_size, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + explicit AdaptiveIntBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : AdaptiveIntBuilder(sizeof(uint8_t), pool, alignment) {} + + using internal::AdaptiveIntBuilderBase::Reset; + + /// Scalar append + Status Append(const int64_t val) { return AppendInternal(static_cast(val)); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override; + + protected: + Status CommitPendingData() override; + Status ExpandIntSize(uint8_t new_int_size); + + Status AppendValuesInternal(const int64_t* values, int64_t length, + const uint8_t* valid_bytes); + + template + Status ExpandIntSizeN(); +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h new file mode 100644 index 0000000000000000000000000000000000000000..05af850fd149c6fce368cf14bac1aac821153a83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h @@ -0,0 +1,370 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: keep +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_primitive.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { + +template +class ArrayBuilderExtraOps { + public: + /// \brief Append a value from an optional or null if it has no value. + Status AppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->Append(*value) : self->AppendNull(); + } + + /// \brief Append a value from an optional or null if it has no value. + /// + /// Unsafe methods don't check existing size. + void UnsafeAppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->UnsafeAppend(*value) : self->UnsafeAppendNull(); + } +}; + +} // namespace internal + +/// \defgroup numeric-builders Concrete builder subclasses for numeric types +/// @{ +/// @} + +/// \defgroup temporal-builders Concrete builder subclasses for temporal types +/// @{ +/// @} + +/// \defgroup binary-builders Concrete builder subclasses for binary types +/// @{ +/// @} + +/// \defgroup nested-builders Concrete builder subclasses for nested types +/// @{ +/// @} + +/// \defgroup dictionary-builders Concrete builder subclasses for dictionary types +/// @{ +/// @} + +/// \defgroup run-end-encoded-builders Concrete builder subclasses for run-end encoded +/// arrays +/// @{ +/// @} + +constexpr int64_t kMinBuilderCapacity = 1 << 5; +constexpr int64_t kListMaximumElements = std::numeric_limits::max() - 1; + +/// Base class for all data array builders. +/// +/// This class provides a facilities for incrementally building the null bitmap +/// (see Append methods) and as a side effect the current number of slots and +/// the null count. +/// +/// \note Users are expected to use builders as one of the concrete types below. +/// For example, ArrayBuilder* pointing to BinaryBuilder should be downcast before use. +class ARROW_EXPORT ArrayBuilder { + public: + explicit ArrayBuilder(MemoryPool* pool, int64_t alignment = kDefaultBufferAlignment) + : pool_(pool), alignment_(alignment), null_bitmap_builder_(pool, alignment) {} + + ARROW_DEFAULT_MOVE_AND_ASSIGN(ArrayBuilder); + + virtual ~ArrayBuilder() = default; + + /// For nested types. Since the objects are owned by this class instance, we + /// skip shared pointers and just return a raw pointer + ArrayBuilder* child(int i) { return children_[i].get(); } + + const std::shared_ptr& child_builder(int i) const { return children_[i]; } + + int num_children() const { return static_cast(children_.size()); } + + virtual int64_t length() const { return length_; } + int64_t null_count() const { return null_count_; } + int64_t capacity() const { return capacity_; } + + /// \brief Ensure that enough memory has been allocated to fit the indicated + /// number of total elements in the builder, including any that have already + /// been appended. Does not account for reallocations that may be due to + /// variable size data, like binary values. To make space for incremental + /// appends, use Reserve instead. + /// + /// \param[in] capacity the minimum number of total array values to + /// accommodate. Must be greater than the current capacity. + /// \return Status + virtual Status Resize(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of elements without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental Reserve() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional array values + /// \return Status + Status Reserve(int64_t additional_capacity) { + auto current_capacity = capacity(); + auto min_capacity = length() + additional_capacity; + if (min_capacity <= current_capacity) return Status::OK(); + + // leave growth factor up to BufferBuilder + auto new_capacity = BufferBuilder::GrowByFactor(current_capacity, min_capacity); + return Resize(new_capacity); + } + + /// Reset the builder. + virtual void Reset(); + + /// \brief Append a null value to builder + virtual Status AppendNull() = 0; + /// \brief Append a number of null values to builder + virtual Status AppendNulls(int64_t length) = 0; + + /// \brief Append a non-null value to builder + /// + /// The appended value is an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending a null value to a parent nested type. + virtual Status AppendEmptyValue() = 0; + + /// \brief Append a number of non-null values to builder + /// + /// The appended values are an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending null values to a parent nested type. + virtual Status AppendEmptyValues(int64_t length) = 0; + + /// \brief Append a value from a scalar + Status AppendScalar(const Scalar& scalar) { return AppendScalar(scalar, 1); } + virtual Status AppendScalar(const Scalar& scalar, int64_t n_repeats); + virtual Status AppendScalars(const ScalarVector& scalars); + + /// \brief Append a range of values from an array. + /// + /// The given array must be the same type as the builder. + virtual Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) { + return Status::NotImplemented("AppendArraySlice for builder for ", *type()); + } + + /// \brief Return result of builder as an internal generic ArrayData + /// object. Resets builder except for dictionary builder + /// + /// \param[out] out the finalized ArrayData object + /// \return Status + virtual Status FinishInternal(std::shared_ptr* out) = 0; + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \param[out] out the finalized Array object + /// \return Status + Status Finish(std::shared_ptr* out); + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \return The finalized Array object + Result> Finish(); + + /// \brief Return the type of the built Array + virtual std::shared_ptr type() const = 0; + + protected: + /// Append to null bitmap + Status AppendToBitmap(bool is_valid); + + /// Vector append. Treat each zero byte as a null. If valid_bytes is null + /// assume all of length bits are valid. + Status AppendToBitmap(const uint8_t* valid_bytes, int64_t length); + + /// Uniform append. Append N times the same validity bit. + Status AppendToBitmap(int64_t num_bits, bool value); + + /// Set the next length bits to not null (i.e. valid). + Status SetNotNull(int64_t length); + + // Unsafe operations (don't check capacity/don't resize) + + void UnsafeAppendNull() { UnsafeAppendToBitmap(false); } + + // Append to null bitmap, update the length + void UnsafeAppendToBitmap(bool is_valid) { + null_bitmap_builder_.UnsafeAppend(is_valid); + ++length_; + if (!is_valid) ++null_count_; + } + + // Vector append. Treat each zero byte as a nullzero. If valid_bytes is null + // assume all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* valid_bytes, int64_t length) { + if (valid_bytes == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(valid_bytes, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Vector append. Copy from a given bitmap. If bitmap is null assume + // all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* bitmap, int64_t offset, int64_t length) { + if (bitmap == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(bitmap, offset, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Append the same validity value a given number of times. + void UnsafeAppendToBitmap(const int64_t num_bits, bool value) { + if (value) { + UnsafeSetNotNull(num_bits); + } else { + UnsafeSetNull(num_bits); + } + } + + void UnsafeAppendToBitmap(const std::vector& is_valid); + + // Set the next validity bits to not null (i.e. valid). + void UnsafeSetNotNull(int64_t length); + + // Set the next validity bits to null (i.e. invalid). + void UnsafeSetNull(int64_t length); + + static Status TrimBuffer(const int64_t bytes_filled, ResizableBuffer* buffer); + + /// \brief Finish to an array of the specified ArrayType + template + Status FinishTyped(std::shared_ptr* out) { + std::shared_ptr out_untyped; + ARROW_RETURN_NOT_OK(Finish(&out_untyped)); + *out = std::static_pointer_cast(std::move(out_untyped)); + return Status::OK(); + } + + // Check the requested capacity for validity + Status CheckCapacity(int64_t new_capacity) { + if (ARROW_PREDICT_FALSE(new_capacity < 0)) { + return Status::Invalid( + "Resize capacity must be positive (requested: ", new_capacity, ")"); + } + + if (ARROW_PREDICT_FALSE(new_capacity < length_)) { + return Status::Invalid("Resize cannot downsize (requested: ", new_capacity, + ", current length: ", length_, ")"); + } + + return Status::OK(); + } + + // Check for array type + Status CheckArrayType(const std::shared_ptr& expected_type, + const Array& array, const char* message); + Status CheckArrayType(Type::type expected_type, const Array& array, + const char* message); + + MemoryPool* pool_; + int64_t alignment_; + + TypedBufferBuilder null_bitmap_builder_; + int64_t null_count_ = 0; + + // Array length, so far. Also, the index of the next element to be added + int64_t length_ = 0; + int64_t capacity_ = 0; + + // Child value array builders. These are owned by this class + std::vector> children_; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrayBuilder); +}; + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the data type to create the builder for +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeBuilder(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilder( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilder(pool, type, &out)); + return std::move(out); +} + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type, where any top-level or nested dictionary builders return the +/// exact index type specified by the type. +ARROW_EXPORT +Status MakeBuilderExactIndex(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilderExactIndex( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilderExactIndex(pool, type, &out)); + return std::move(out); +} + +/// \brief Construct an empty DictionaryBuilder initialized optionally +/// with a pre-existing dictionary +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the dictionary type to create the builder for +/// \param[in] dictionary the initial dictionary, if any. May be nullptr +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeDictionaryBuilder(MemoryPool* pool, const std::shared_ptr& type, + const std::shared_ptr& dictionary, + std::unique_ptr* out); + +inline Result> MakeDictionaryBuilder( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, type, dictionary, &out)); + return std::move(out); +} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..d825f7d32520a309905871155a81b9adccff3c07 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h @@ -0,0 +1,971 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/binary_view_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-builders +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +template +class BaseBinaryBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, std::string_view> { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + explicit BaseBinaryBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + offsets_builder_(pool, alignment), + value_data_builder_(pool, alignment) {} + + BaseBinaryBuilder(const std::shared_ptr& type, MemoryPool* pool) + : BaseBinaryBuilder(pool) {} + + Status Append(const uint8_t* value, offset_type length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status Append(const char* value, offset_type length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// Extend the last appended value by appending more data at the end + /// + /// Unlike Append, this does not create a new offset. + Status ExtendCurrent(const uint8_t* value, offset_type length) { + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + return Status::OK(); + } + + Status ExtendCurrent(std::string_view value) { + return ExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNulls(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, false); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, true); + return Status::OK(); + } + + /// \brief Append without checking capacity + /// + /// Offsets and data should have been presized using Reserve() and + /// ReserveData(), respectively. + void UnsafeAppend(const uint8_t* value, offset_type length) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(value, length); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppend(const char* value, offset_type length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// Like ExtendCurrent, but do not check capacity + void UnsafeExtendCurrent(const uint8_t* value, offset_type length) { + value_data_builder_.UnsafeAppend(value, length); + } + + void UnsafeExtendCurrent(std::string_view value) { + UnsafeExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + void UnsafeAppendNull() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a sequence of strings in one shot. + /// + /// \param[in] values a vector of strings + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const std::vector& values, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = std::accumulate( + values.begin(), values.end(), 0ULL, + [](uint64_t sum, const std::string& str) { return sum + str.size(); }); + ARROW_RETURN_NOT_OK(Reserve(values.size())); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes != NULLPTR) { + for (std::size_t i = 0; i < values.size(); ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + value_data_builder_.UnsafeAppend( + reinterpret_cast(values[i].data()), values[i].size()); + } + } + } else { + for (const auto& value : values) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(value.data()), + value.size()); + } + } + + UnsafeAppendToBitmap(valid_bytes, values.size()); + return Status::OK(); + } + + /// \brief Append a sequence of nul-terminated strings in one shot. + /// If one of the values is NULL, it is processed as a null + /// value even if the corresponding valid_bytes entry is 1. + /// + /// \param[in] values a contiguous C array of nul-terminated char * + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const char** values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = 0; + std::vector value_lengths(length); + bool have_null_value = false; + for (int64_t i = 0; i < length; ++i) { + if (values[i] != NULLPTR) { + auto value_length = strlen(values[i]); + value_lengths[i] = value_length; + total_length += value_length; + } else { + have_null_value = true; + } + } + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes) { + int64_t valid_bytes_offset = 0; + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } else { + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, + i - valid_bytes_offset); + UnsafeAppendToBitmap(false); + valid_bytes_offset = i + 1; + } + } + } + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, length - valid_bytes_offset); + } else { + if (have_null_value) { + std::vector valid_vector(length, 0); + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + valid_vector[i] = 1; + } + } + UnsafeAppendToBitmap(valid_vector.data(), length); + } else { + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } + UnsafeAppendToBitmap(NULLPTR, length); + } + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + auto bitmap = array.GetValues(0, 0); + auto offsets = array.GetValues(1); + auto data = array.GetValues(2, 0); + auto total_length = offsets[offset + length] - offsets[offset]; + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + for (int64_t i = 0; i < length; i++) { + if (!bitmap || bit_util::GetBit(bitmap, array.offset + offset + i)) { + const offset_type start = offsets[offset + i]; + const offset_type end = offsets[offset + i + 1]; + UnsafeAppend(data + start, end - start); + } else { + UnsafeAppendNull(); + } + } + return Status::OK(); + } + + void Reset() override { + ArrayBuilder::Reset(); + offsets_builder_.Reset(); + value_data_builder_.Reset(); + } + + Status ValidateOverflow(int64_t new_bytes) { + auto new_size = value_data_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + // One more than requested for offsets + ARROW_RETURN_NOT_OK(offsets_builder_.Resize(capacity + 1)); + return ArrayBuilder::Resize(capacity); + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return value_data_builder_.Reserve(elements); + } + + Status FinishInternal(std::shared_ptr* out) override { + // Write final offset (values length) + ARROW_RETURN_NOT_OK(AppendNextOffset()); + + // These buffers' padding zeroed by BufferBuilder + std::shared_ptr offsets, value_data, null_bitmap; + ARROW_RETURN_NOT_OK(offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(value_data_builder_.Finish(&value_data)); + ARROW_RETURN_NOT_OK(null_bitmap_builder_.Finish(&null_bitmap)); + + *out = ArrayData::Make(type(), length_, {null_bitmap, offsets, value_data}, + null_count_, 0); + Reset(); + return Status::OK(); + } + + /// \return data pointer of the value date builder + const uint8_t* value_data() const { return value_data_builder_.data(); } + /// \return size of values buffer so far + int64_t value_data_length() const { return value_data_builder_.length(); } + /// \return capacity of values buffer + int64_t value_data_capacity() const { return value_data_builder_.capacity(); } + + /// \return data pointer of the value date builder + const offset_type* offsets_data() const { return offsets_builder_.data(); } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + const offset_type* offsets = offsets_builder_.data(); + const auto offset = offsets[i]; + if (i == (length_ - 1)) { + *out_length = static_cast(value_data_builder_.length()) - offset; + } else { + *out_length = offsets[i + 1] - offset; + } + return value_data_builder_.data() + offset; + } + + offset_type offset(int64_t i) const { return offsets_data()[i]; } + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const { + offset_type value_length; + const uint8_t* value_data = GetValue(i, &value_length); + return std::string_view(reinterpret_cast(value_data), value_length); + } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + protected: + TypedBufferBuilder offsets_builder_; + TypedBufferBuilder value_data_builder_; + + Status AppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + return offsets_builder_.Append(static_cast(num_bytes)); + } + + void UnsafeAppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } +}; + +/// \class BinaryBuilder +/// \brief Builder class for variable-length binary data +class ARROW_EXPORT BinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return binary(); } +}; + +/// \class StringBuilder +/// \brief Builder class for UTF8 strings +class ARROW_EXPORT StringBuilder : public BinaryBuilder { + public: + using BinaryBuilder::BinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return utf8(); } +}; + +/// \class LargeBinaryBuilder +/// \brief Builder class for large variable-length binary data +class ARROW_EXPORT LargeBinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_binary(); } +}; + +/// \class LargeStringBuilder +/// \brief Builder class for large UTF8 strings +class ARROW_EXPORT LargeStringBuilder : public LargeBinaryBuilder { + public: + using LargeBinaryBuilder::LargeBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_utf8(); } +}; + +// ---------------------------------------------------------------------- +// BinaryViewBuilder, StringViewBuilder +// +// These builders do not support building raw pointer view arrays. + +namespace internal { + +// We allocate medium-sized memory chunks and accumulate data in those, which +// may result in some waste if there are many large-ish strings. If a string +// comes along that does not fit into a block, we allocate a new block and +// write into that. +// +// Later we can implement optimizations to continuing filling underfull blocks +// after encountering a large string that required allocating a new block. +class ARROW_EXPORT StringHeapBuilder { + public: + static constexpr int64_t kDefaultBlocksize = 32 << 10; // 32KB + + StringHeapBuilder(MemoryPool* pool, int64_t alignment) + : pool_(pool), alignment_(alignment) {} + + void SetBlockSize(int64_t blocksize) { blocksize_ = blocksize; } + + using c_type = BinaryViewType::c_type; + + template + std::conditional_t, c_type> Append(const uint8_t* value, + int64_t length) { + if (length <= BinaryViewType::kInlineSize) { + return util::ToInlineBinaryView(value, static_cast(length)); + } + + if constexpr (Safe) { + ARROW_RETURN_NOT_OK(Reserve(length)); + } + + auto v = + util::ToBinaryView(value, static_cast(length), + static_cast(blocks_.size() - 1), current_offset_); + + memcpy(current_out_buffer_, value, static_cast(length)); + current_out_buffer_ += length; + current_remaining_bytes_ -= length; + current_offset_ += static_cast(length); + return v; + } + + static constexpr int64_t ValueSizeLimit() { + return std::numeric_limits::max(); + } + + /// \brief Ensure that the indicated number of bytes can be appended via + /// UnsafeAppend operations without the need to allocate more memory + Status Reserve(int64_t num_bytes) { + if (ARROW_PREDICT_FALSE(num_bytes > ValueSizeLimit())) { + return Status::CapacityError( + "BinaryView or StringView elements cannot reference " + "strings larger than 2GB"); + } + if (num_bytes > current_remaining_bytes_) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + current_remaining_bytes_ = num_bytes > blocksize_ ? num_bytes : blocksize_; + ARROW_ASSIGN_OR_RAISE( + std::shared_ptr new_block, + AllocateResizableBuffer(current_remaining_bytes_, alignment_, pool_)); + current_offset_ = 0; + current_out_buffer_ = new_block->mutable_data(); + blocks_.emplace_back(std::move(new_block)); + } + return Status::OK(); + } + + void Reset() { + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + blocks_.clear(); + } + + int64_t current_remaining_bytes() const { return current_remaining_bytes_; } + + Result>> Finish() { + if (!blocks_.empty()) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + } + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + return std::move(blocks_); + } + + private: + Status FinishLastBlock() { + if (current_remaining_bytes_ > 0) { + // Avoid leaking uninitialized bytes from the allocator + ARROW_RETURN_NOT_OK( + blocks_.back()->Resize(blocks_.back()->size() - current_remaining_bytes_, + /*shrink_to_fit=*/true)); + blocks_.back()->ZeroPadding(); + } + return Status::OK(); + } + + MemoryPool* pool_; + int64_t alignment_; + int64_t blocksize_ = kDefaultBlocksize; + std::vector> blocks_; + + int32_t current_offset_ = 0; + uint8_t* current_out_buffer_ = NULLPTR; + int64_t current_remaining_bytes_ = 0; +}; + +} // namespace internal + +class ARROW_EXPORT BinaryViewBuilder : public ArrayBuilder { + public: + using TypeClass = BinaryViewType; + + // this constructor provided for MakeBuilder compatibility + BinaryViewBuilder(const std::shared_ptr&, MemoryPool* pool); + + explicit BinaryViewBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + data_builder_(pool, alignment), + data_heap_builder_(pool, alignment) {} + + /// Set the size for future preallocated data buffers. + /// + /// The default size is 32KB, so after each 32KB of string data appended to the builder + /// a new data buffer will be allocated. Adjust this to a larger value to decrease the + /// frequency of allocation, or to a smaller value to lower the overhead of each + /// allocation. + void SetBlockSize(int64_t blocksize) { data_heap_builder_.SetBlockSize(blocksize); } + + /// The number of bytes which can be appended to this builder without allocating another + /// data buffer. + int64_t current_block_bytes_remaining() const { + return data_heap_builder_.current_remaining_bytes(); + } + + Status Append(const uint8_t* value, int64_t length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendToBitmap(true); + ARROW_ASSIGN_OR_RAISE(auto v, + data_heap_builder_.Append(value, length)); + data_builder_.UnsafeAppend(v); + return Status::OK(); + } + + Status Append(const char* value, int64_t length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// \brief Append without checking capacity + /// + /// Builder should have been presized using Reserve() and ReserveData(), + /// respectively, and the value must not be larger than 2GB + void UnsafeAppend(const uint8_t* value, int64_t length) { + UnsafeAppendToBitmap(true); + auto v = data_heap_builder_.Append(value, length); + data_builder_.UnsafeAppend(v); + } + + void UnsafeAppend(const char* value, int64_t length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// \brief Ensures there is enough allocated available capacity in the + /// out-of-line data heap to append the indicated number of bytes without + /// additional allocations + Status ReserveData(int64_t length); + + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element (length-0 inline string) + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNotNull(length); + return Status::OK(); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a slice of a BinaryViewArray passed as an ArraySpan. Copies + /// the underlying out-of-line string memory to avoid memory lifetime issues + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + + void Reset() override; + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override { return binary_view(); } + + protected: + TypedBufferBuilder data_builder_; + + // Accumulates out-of-line data in fixed-size chunks which are then attached + // to the resulting ArrayData + internal::StringHeapBuilder data_heap_builder_; +}; + +class ARROW_EXPORT StringViewBuilder : public BinaryViewBuilder { + public: + using BinaryViewBuilder::BinaryViewBuilder; + std::shared_ptr type() const override { return utf8_view(); } +}; + +// ---------------------------------------------------------------------- +// FixedSizeBinaryBuilder + +class ARROW_EXPORT FixedSizeBinaryBuilder : public ArrayBuilder { + public: + using TypeClass = FixedSizeBinaryType; + + explicit FixedSizeBinaryBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + Status Append(const uint8_t* value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(value); + return Status::OK(); + } + + Status Append(const char* value) { + return Append(reinterpret_cast(value)); + } + + Status Append(std::string_view view) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(view); + return Status::OK(); + } + + Status Append(const std::string& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const Buffer& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const std::shared_ptr& s) { return Append(*s); } + + template + Status Append(const std::array& value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend( + std::string_view(reinterpret_cast(value.data()), value.size())); + return Status::OK(); + } + + Status AppendValues(const uint8_t* data, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status AppendValues(const uint8_t* data, int64_t length, const uint8_t* validity, + int64_t bitmap_offset); + + Status AppendNull() final; + Status AppendNulls(int64_t length) final; + + Status AppendEmptyValue() final; + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues( + array.GetValues(1, 0) + ((array.offset + offset) * byte_width_), length, + array.GetValues(0, 0), array.offset + offset); + } + + void UnsafeAppend(const uint8_t* value) { + UnsafeAppendToBitmap(true); + if (ARROW_PREDICT_TRUE(byte_width_ > 0)) { + byte_builder_.UnsafeAppend(value, byte_width_); + } + } + + void UnsafeAppend(const char* value) { + UnsafeAppend(reinterpret_cast(value)); + } + + void UnsafeAppend(std::string_view value) { +#ifndef NDEBUG + CheckValueSize(static_cast(value.size())); +#endif + UnsafeAppend(reinterpret_cast(value.data())); + } + + void UnsafeAppend(const Buffer& s) { UnsafeAppend(std::string_view{s}); } + + void UnsafeAppend(const std::shared_ptr& s) { UnsafeAppend(*s); } + + void UnsafeAppendNull() { + UnsafeAppendToBitmap(false); + byte_builder_.UnsafeAppend(/*num_copies=*/byte_width_, 0); + } + + Status ValidateOverflow(int64_t new_bytes) const { + auto new_size = byte_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return byte_builder_.Reserve(elements); + } + + void Reset() override; + Status Resize(int64_t capacity) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \return size of values buffer so far + int64_t value_data_length() const { return byte_builder_.length(); } + + int32_t byte_width() const { return byte_width_; } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i) const; + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const; + + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + std::shared_ptr type() const override { + return fixed_size_binary(byte_width_); + } + + protected: + int32_t byte_width_; + BufferBuilder byte_builder_; + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + uint8_t* GetMutableValue(int64_t i) { + uint8_t* data_ptr = byte_builder_.mutable_data(); + return data_ptr + i * byte_width_; + } + + void CheckValueSize(int64_t size); +}; + +/// @} + +// ---------------------------------------------------------------------- +// Chunked builders: build a sequence of BinaryArray or StringArray that are +// limited to a particular size (to the upper limit of 2GB) + +namespace internal { + +class ARROW_EXPORT ChunkedBinaryBuilder { + public: + explicit ChunkedBinaryBuilder(int32_t max_chunk_value_length, + MemoryPool* pool = default_memory_pool()); + + ChunkedBinaryBuilder(int32_t max_chunk_value_length, int32_t max_chunk_length, + MemoryPool* pool = default_memory_pool()); + + virtual ~ChunkedBinaryBuilder() = default; + + Status Append(const uint8_t* value, int32_t length) { + if (ARROW_PREDICT_FALSE(length + builder_->value_data_length() > + max_chunk_value_length_)) { + if (builder_->value_data_length() == 0) { + // The current item is larger than max_chunk_size_; + // this chunk will be oversize and hold *only* this item + ARROW_RETURN_NOT_OK(builder_->Append(value, length)); + return NextChunk(); + } + // The current item would cause builder_->value_data_length() to exceed + // max_chunk_size_, so finish this chunk and append the current item to the next + // chunk + ARROW_RETURN_NOT_OK(NextChunk()); + return Append(value, length); + } + + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + // The current item would cause builder_->length() to exceed max_chunk_length_, so + // finish this chunk and append the current item to the next chunk + ARROW_RETURN_NOT_OK(NextChunk()); + } + + return builder_->Append(value, length); + } + + Status Append(std::string_view value) { + return Append(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNull() { + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + ARROW_RETURN_NOT_OK(NextChunk()); + } + return builder_->AppendNull(); + } + + Status Reserve(int64_t values); + + virtual Status Finish(ArrayVector* out); + + protected: + Status NextChunk(); + + // maximum total character data size per chunk + int64_t max_chunk_value_length_; + + // maximum elements allowed per chunk + int64_t max_chunk_length_ = kListMaximumElements; + + // when Reserve() would cause builder_ to exceed its max_chunk_length_, + // add to extra_capacity_ instead and wait to reserve until the next chunk + int64_t extra_capacity_ = 0; + + std::unique_ptr builder_; + std::vector> chunks_; +}; + +class ARROW_EXPORT ChunkedStringBuilder : public ChunkedBinaryBuilder { + public: + using ChunkedBinaryBuilder::ChunkedBinaryBuilder; + + Status Finish(ArrayVector* out) override; +}; + +} // namespace internal + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..8094250aef8d4b5068d8d8a3ec5ab6488221775c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array/array_decimal.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/data.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-builders +/// +/// @{ + +class ARROW_EXPORT Decimal128Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal128Type; + using ValueType = Decimal128; + + explicit Decimal128Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(Decimal128 val); + void UnsafeAppend(Decimal128 val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +class ARROW_EXPORT Decimal256Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal256Type; + using ValueType = Decimal256; + + explicit Decimal256Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(const Decimal256& val); + void UnsafeAppend(const Decimal256& val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +using DecimalBuilder = Decimal128Builder; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..3f0d711dc5bb588c3abcbb301902338cf60d32bf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h @@ -0,0 +1,737 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_adaptive.h" // IWYU pragma: export +#include "arrow/array/builder_base.h" // IWYU pragma: export +#include "arrow/array/builder_primitive.h" // IWYU pragma: export +#include "arrow/array/data.h" +#include "arrow/array/util.h" +#include "arrow/scalar.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/decimal.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Dictionary builder + +namespace internal { + +template +struct DictionaryValue { + using type = typename T::c_type; + using PhysicalType = T; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = + typename std::conditional::value, + BinaryType, LargeBinaryType>::type; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryViewType; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryType; +}; + +class ARROW_EXPORT DictionaryMemoTable { + public: + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& type); + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& dictionary); + ~DictionaryMemoTable(); + + Status GetArrayData(int64_t start_offset, std::shared_ptr* out); + + /// \brief Insert new memo values + Status InsertValues(const Array& values); + + int32_t size() const; + + template + Status GetOrInsert(typename DictionaryValue::type value, int32_t* out) { + // We want to keep the DictionaryMemoTable implementation private, also we can't + // use extern template classes because of compiler issues (MinGW?). Instead, + // we expose explicit function overrides for each supported physical type. + const typename DictionaryValue::PhysicalType* physical_type = NULLPTR; + return GetOrInsert(physical_type, value, out); + } + + private: + Status GetOrInsert(const BooleanType*, bool value, int32_t* out); + Status GetOrInsert(const Int8Type*, int8_t value, int32_t* out); + Status GetOrInsert(const Int16Type*, int16_t value, int32_t* out); + Status GetOrInsert(const Int32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Int64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const UInt8Type*, uint8_t value, int32_t* out); + Status GetOrInsert(const UInt16Type*, uint16_t value, int32_t* out); + Status GetOrInsert(const UInt32Type*, uint32_t value, int32_t* out); + Status GetOrInsert(const UInt64Type*, uint64_t value, int32_t* out); + Status GetOrInsert(const DurationType*, int64_t value, int32_t* out); + Status GetOrInsert(const TimestampType*, int64_t value, int32_t* out); + Status GetOrInsert(const Date32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Date64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const Time32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Time64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const MonthDayNanoIntervalType*, + MonthDayNanoIntervalType::MonthDayNanos value, int32_t* out); + Status GetOrInsert(const DayTimeIntervalType*, + DayTimeIntervalType::DayMilliseconds value, int32_t* out); + Status GetOrInsert(const MonthIntervalType*, int32_t value, int32_t* out); + Status GetOrInsert(const FloatType*, float value, int32_t* out); + Status GetOrInsert(const DoubleType*, double value, int32_t* out); + + Status GetOrInsert(const BinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const LargeBinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const BinaryViewType*, std::string_view value, int32_t* out); + + class DictionaryMemoTableImpl; + std::unique_ptr impl_; +}; + +} // namespace internal + +/// \addtogroup dictionary-builders +/// +/// @{ + +namespace internal { + +/// \brief Array builder for created encoded DictionaryArray from +/// dense array +/// +/// Unlike other builders, dictionary builder does not completely +/// reset the state on Finish calls. +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + using TypeClass = DictionaryType; + using Value = typename DictionaryValue::type; + + // WARNING: the type given below is the value type, not the DictionaryType. + // The DictionaryType is instantiated on the Finish() call. + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + !is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_parameter_free pool = default_memory_pool()) + : DictionaryBuilderBase(TypeTraits::type_singleton(), pool) {} + + // This constructor doesn't check for errors. Use InsertMemoValues instead. + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, dictionary)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(dictionary->type()) {} + + ~DictionaryBuilderBase() override = default; + + /// \brief The current number of entries in the dictionary + int64_t dictionary_length() const { return memo_table_->size(); } + + /// \brief The value byte width (for FixedSizeBinaryType) + template + enable_if_fixed_size_binary byte_width() const { + return byte_width_; + } + + /// \brief Append a scalar value + Status Append(Value value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + + int32_t memo_index; + ARROW_RETURN_NOT_OK(memo_table_->GetOrInsert(value, &memo_index)); + ARROW_RETURN_NOT_OK(indices_builder_.Append(memo_index)); + length_ += 1; + + return Status::OK(); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const uint8_t* value) { + return Append(std::string_view(reinterpret_cast(value), byte_width_)); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const char* value) { + return Append(std::string_view(value, byte_width_)); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const uint8_t* value, int32_t length) { + return Append(reinterpret_cast(value), length); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a string (only for string types) + template + enable_if_string_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a decimal (only for Decimal128Type) + template + enable_if_decimal128 Append(const Decimal128& value) { + uint8_t data[16]; + value.ToBytes(data); + return Append(data, 16); + } + + /// \brief Append a decimal (only for Decimal128Type) + template + enable_if_decimal256 Append(const Decimal256& value) { + uint8_t data[32]; + value.ToBytes(data); + return Append(data, 32); + } + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override { + if (!scalar.is_valid) return AppendNulls(n_repeats); + + const auto& dict_ty = internal::checked_cast(*scalar.type); + const DictionaryScalar& dict_scalar = + internal::checked_cast(scalar); + const auto& dict = internal::checked_cast::ArrayType&>( + *dict_scalar.value.dictionary); + ARROW_RETURN_NOT_OK(Reserve(n_repeats)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + Status AppendScalars(const ScalarVector& scalars) override { + for (const auto& scalar : scalars) { + ARROW_RETURN_NOT_OK(AppendScalar(*scalar, /*n_repeats=*/1)); + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length) final { + // Visit the indices and insert the unpacked values. + const auto& dict_ty = internal::checked_cast(*array.type); + // See if possible to avoid using ToArrayData here + const typename TypeTraits::ArrayType dict(array.dictionary().ToArrayData()); + ARROW_RETURN_NOT_OK(Reserve(length)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT64: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT64: + return AppendArraySliceImpl(dict, array, offset, length); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + /// \brief Insert values into the dictionary's memo, but do not append any + /// indices. Can be used to initialize a new builder with known dictionary + /// values + /// \param[in] values dictionary values to add to memo. Type must match + /// builder type + Status InsertMemoValues(const Array& values) { + return memo_table_->InsertValues(values); + } + + /// \brief Append a whole dense array to the builder + template + enable_if_t::value, Status> AppendArray( + const Array& array) { + using ArrayType = typename TypeTraits::ArrayType; + +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetView(i))); + } + } + return Status::OK(); + } + + template + enable_if_fixed_size_binary AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetValue(i))); + } + } + return Status::OK(); + } + + void Reset() override { + // Perform a partial reset. Call ResetFull to also reset the accumulated + // dictionary values + ArrayBuilder::Reset(); + indices_builder_.Reset(); + } + + /// \brief Reset and also clear accumulated dictionary values in memo table + void ResetFull() { + Reset(); + memo_table_.reset(new internal::DictionaryMemoTable(pool_, value_type_)); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + /// \brief Return dictionary indices and a delta dictionary since the last + /// time that Finish or FinishDelta were called, and reset state of builder + /// (except the memo table) + Status FinishDelta(std::shared_ptr* out_indices, + std::shared_ptr* out_delta) { + std::shared_ptr indices_data; + std::shared_ptr delta_data; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(delta_offset_, &indices_data, &delta_data)); + *out_indices = MakeArray(indices_data); + *out_delta = MakeArray(delta_data); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), value_type_); + } + + protected: + template + Status AppendArraySliceImpl(const typename TypeTraits::ArrayType& dict, + const ArraySpan& array, int64_t offset, int64_t length) { + const c_type* values = array.GetValues(1) + offset; + return VisitBitBlocks( + array.buffers[0].data, array.offset + offset, length, + [&](const int64_t position) { + const int64_t index = static_cast(values[position]); + if (dict.IsValid(index)) { + return Append(dict.GetView(index)); + } + return AppendNull(); + }, + [&]() { return AppendNull(); }); + } + + template + Status AppendScalarImpl(const typename TypeTraits::ArrayType& dict, + const Scalar& index_scalar, int64_t n_repeats) { + using ScalarType = typename TypeTraits::ScalarType; + const auto index = internal::checked_cast(index_scalar).value; + if (index_scalar.is_valid && dict.IsValid(index)) { + const auto& value = dict.GetView(index); + for (int64_t i = 0; i < n_repeats; i++) { + ARROW_RETURN_NOT_OK(Append(value)); + } + return Status::OK(); + } + return AppendNulls(n_repeats); + } + + Status FinishInternal(std::shared_ptr* out) override { + std::shared_ptr dictionary; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(/*offset=*/0, out, &dictionary)); + + // Set type of array data to the right dictionary type + (*out)->type = type(); + (*out)->dictionary = dictionary; + return Status::OK(); + } + + Status FinishWithDictOffset(int64_t dict_offset, + std::shared_ptr* out_indices, + std::shared_ptr* out_dictionary) { + // Finalize indices array + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out_indices)); + + // Generate dictionary array from hash table contents + ARROW_RETURN_NOT_OK(memo_table_->GetArrayData(dict_offset, out_dictionary)); + delta_offset_ = memo_table_->size(); + + // Update internals for further uses of this DictionaryBuilder + ArrayBuilder::Reset(); + return Status::OK(); + } + + std::unique_ptr memo_table_; + + // The size of the dictionary memo at last invocation of Finish, to use in + // FinishDelta for computing dictionary deltas + int32_t delta_offset_; + + // Only used for FixedSizeBinaryType + int32_t byte_width_; + + BuilderType indices_builder_; + std::shared_ptr value_type_; +}; + +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + template + DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& index_type, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(index_type, pool) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + /// \brief Append a whole dense array to the builder + Status AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + Type::NA, array, "Wrong value type of array to be appended")); +#endif + for (int64_t i = 0; i < array.length(); i++) { + ARROW_RETURN_NOT_OK(AppendNull()); + } + return Status::OK(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out)); + (*out)->type = dictionary((*out)->type, null()); + (*out)->dictionary = NullArray(0).data(); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), null()); + } + + protected: + BuilderType indices_builder_; +}; + +} // namespace internal + +/// \brief A DictionaryArray builder that uses AdaptiveIntBuilder to return the +/// smallest index size that can accommodate the dictionary indices +template +class DictionaryBuilder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +/// \brief A DictionaryArray builder that always returns int32 dictionary +/// indices so that data cast to dictionary form will have a consistent index +/// type, e.g. for creating a ChunkedArray +template +class Dictionary32Builder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int32_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// Binary / Unicode builders +// (compatibility aliases; those used to be derived classes with additional +// Append() overloads, but they have been folded into DictionaryBuilderBase) + +using BinaryDictionaryBuilder = DictionaryBuilder; +using StringDictionaryBuilder = DictionaryBuilder; +using BinaryDictionary32Builder = Dictionary32Builder; +using StringDictionary32Builder = Dictionary32Builder; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h new file mode 100644 index 0000000000000000000000000000000000000000..21c2d4b270eb102065f626569c62484c594be910 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h @@ -0,0 +1,839 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_nested.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-builders +/// +/// @{ + +// ---------------------------------------------------------------------- +// VarLengthListLikeBuilder + +template +class ARROW_EXPORT VarLengthListLikeBuilder : public ArrayBuilder { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + /// Use this constructor to incrementally build the value array along with offsets and + /// null bitmap. + VarLengthListLikeBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + offsets_builder_(pool, alignment), + value_builder_(value_builder), + value_field_(type->field(0)->WithType(NULLPTR)) {} + + VarLengthListLikeBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + int64_t alignment = kDefaultBufferAlignment) + : VarLengthListLikeBuilder(pool, value_builder, + std::make_shared(value_builder->type()), + alignment) {} + + ~VarLengthListLikeBuilder() override = default; + + Status Resize(int64_t capacity) override { + if (ARROW_PREDICT_FALSE(capacity > maximum_elements())) { + return Status::CapacityError(type_name(), + " array cannot reserve space for more than ", + maximum_elements(), " got ", capacity); + } + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + + // One more than requested for list offsets + const int64_t offsets_capacity = + is_list_view(TYPE::type_id) ? capacity : capacity + 1; + ARROW_RETURN_NOT_OK(offsets_builder_.Resize(offsets_capacity)); + return ArrayBuilder::Resize(capacity); + } + + void Reset() override { + ArrayBuilder::Reset(); + offsets_builder_.Reset(); + value_builder_->Reset(); + } + + /// \brief Start a new variable-length list slot + /// + /// This function should be called before appending elements to the + /// value builder. Elements appended to the value builder before this function + /// is called for the first time, will not be members of any list value. + /// + /// After this function is called, list_length elements SHOULD be appended to + /// the values builder. If this contract is violated, the behavior is defined by + /// the concrete builder implementation and SHOULD NOT be relied upon unless + /// the caller is specifically building a [Large]List or [Large]ListView array. + /// + /// For [Large]List arrays, the list slot length will be the number of elements + /// appended to the values builder before the next call to Append* or Finish. For + /// [Large]ListView arrays, the list slot length will be exactly list_length, but if + /// Append* is called before at least list_length elements are appended to the values + /// builder, the current list slot will share elements with the next list + /// slots or an invalid [Large]ListView array will be generated because there + /// aren't enough elements in the values builder to fill the list slots. + /// + /// If you're building a [Large]List and don't need to be compatible + /// with [Large]ListView, then `BaseListBuilder::Append(bool is_valid)` + /// is a simpler API. + /// + /// \pre if is_valid is false, list_length MUST be 0 + /// \param is_valid Whether the new list slot is valid + /// \param list_length The number of elements in the list + Status Append(bool is_valid, int64_t list_length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + assert(is_valid || list_length == 0); + UnsafeAppendToBitmap(is_valid); + UnsafeAppendDimensions(/*offset=*/value_builder_->length(), /*size=*/list_length); + return Status::OK(); + } + + Status AppendNull() final { + // Append() a null list slot with list_length=0. + // + // When building [Large]List arrays, elements being appended to the values builder + // before the next call to Append* or Finish will extend the list slot length, but + // that is totally fine because list arrays admit non-empty null list slots. + // + // In the case of [Large]ListViews that's not a problem either because the + // list slot length remains zero. + return Append(false, 0); + } + + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, false); + UnsafeAppendEmptyDimensions(/*num_values=*/length); + return Status::OK(); + } + + /// \brief Append an empty list slot + /// + /// \post Another call to Append* or Finish should be made before appending to + /// the values builder to ensure list slot remains empty + Status AppendEmptyValue() final { return Append(true, 0); } + + /// \brief Append an empty list slot + /// + /// \post Another call to Append* or Finish should be made before appending to + /// the values builder to ensure the last list slot remains empty + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, true); + UnsafeAppendEmptyDimensions(/*num_values=*/length); + return Status::OK(); + } + + /// \brief Vector append + /// + /// For list-array builders, the sizes are inferred from the offsets. + /// BaseListBuilder provides an implementation that doesn't take sizes, but + /// this virtual function allows dispatching calls to both list-array and + /// list-view-array builders (which need the sizes) + /// + /// \param offsets The offsets of the variable-length lists + /// \param sizes The sizes of the variable-length lists + /// \param length The number of offsets, sizes, and validity bits to append + /// \param valid_bytes If passed, valid_bytes is of equal length to values, + /// and any zero byte will be considered as a null for that slot + virtual Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length, const uint8_t* valid_bytes) = 0; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + const offset_type* offsets = array.GetValues(1); + [[maybe_unused]] const offset_type* sizes = NULLPTR; + if constexpr (is_list_view(TYPE::type_id)) { + sizes = array.GetValues(2); + } + const bool all_valid = !array.MayHaveLogicalNulls(); + const uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t row = offset; row < offset + length; row++) { + const bool is_valid = + all_valid || (validity && bit_util::GetBit(validity, array.offset + row)) || + array.IsValid(row); + int64_t size = 0; + if (is_valid) { + if constexpr (is_list_view(TYPE::type_id)) { + size = sizes[row]; + } else { + size = offsets[row + 1] - offsets[row]; + } + } + UnsafeAppendToBitmap(is_valid); + UnsafeAppendDimensions(/*offset=*/value_builder_->length(), size); + if (is_valid) { + ARROW_RETURN_NOT_OK( + value_builder_->AppendArraySlice(array.child_data[0], offsets[row], size)); + } + } + return Status::OK(); + } + + Status ValidateOverflow(int64_t new_elements) const { + auto new_length = value_builder_->length() + new_elements; + if (ARROW_PREDICT_FALSE(new_length > maximum_elements())) { + return Status::CapacityError(type_name(), " array cannot contain more than ", + maximum_elements(), " elements, have ", new_elements); + } else { + return Status::OK(); + } + } + + ArrayBuilder* value_builder() const { return value_builder_.get(); } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t maximum_elements() { + return std::numeric_limits::max() - 1; + } + + std::shared_ptr type() const override { + return std::make_shared(value_field_->WithType(value_builder_->type())); + } + + private: + static constexpr const char* type_name() { + if constexpr (is_list_view(TYPE::type_id)) { + return "ListView"; + } else { + return "List"; + } + } + + protected: + /// \brief Append dimensions for num_values empty list slots. + /// + /// ListViewBuilder overrides this to also append the sizes. + virtual void UnsafeAppendEmptyDimensions(int64_t num_values) { + const int64_t offset = value_builder_->length(); + for (int64_t i = 0; i < num_values; ++i) { + offsets_builder_.UnsafeAppend(static_cast(offset)); + } + } + + /// \brief Append dimensions for a single list slot. + /// + /// ListViewBuilder overrides this to also append the size. + virtual void UnsafeAppendDimensions(int64_t offset, int64_t size) { + offsets_builder_.UnsafeAppend(static_cast(offset)); + } + + TypedBufferBuilder offsets_builder_; + std::shared_ptr value_builder_; + std::shared_ptr value_field_; +}; + +// ---------------------------------------------------------------------- +// ListBuilder / LargeListBuilder + +template +class ARROW_EXPORT BaseListBuilder : public VarLengthListLikeBuilder { + private: + using BASE = VarLengthListLikeBuilder; + + public: + using TypeClass = TYPE; + using offset_type = typename BASE::offset_type; + + using BASE::BASE; + + using BASE::Append; + + ~BaseListBuilder() override = default; + + /// \brief Start a new variable-length list slot + /// + /// This function should be called before beginning to append elements to the + /// value builder + Status Append(bool is_valid = true) { + // The value_length parameter to BASE::Append(bool, int64_t) is ignored when + // building a list array, so we can pass 0 here. + return BASE::Append(is_valid, 0); + } + + /// \brief Vector append + /// + /// If passed, valid_bytes is of equal length to values, and any zero byte + /// will be considered as a null for that slot + Status AppendValues(const offset_type* offsets, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + ARROW_RETURN_NOT_OK(this->Reserve(length)); + this->UnsafeAppendToBitmap(valid_bytes, length); + this->offsets_builder_.UnsafeAppend(offsets, length); + return Status::OK(); + } + + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length, const uint8_t* valid_bytes) final { + // Offsets are assumed to be valid, but the first length-1 sizes have to be + // consistent with the offsets to partially rule out the possibility that the + // caller is passing sizes that could work if building a list-view, but don't + // work on building a list that requires offsets to be non-decreasing. + // + // CAUTION: the last size element (`sizes[length - 1]`) is not + // validated and could be inconsistent with the offsets given in a + // subsequent call to AppendValues. +#ifndef NDEBUG + if (sizes) { + for (int64_t i = 0; i < length - 1; ++i) { + if (ARROW_PREDICT_FALSE(offsets[i] != offsets[i + 1] - sizes[i])) { + if (!valid_bytes || valid_bytes[i]) { + return Status::Invalid( + "BaseListBuilder: sizes are inconsistent with offsets provided"); + } + } + } + } +#endif + return AppendValues(offsets, length, valid_bytes); + } + + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length) { + return AppendValues(offsets, sizes, length, /*valid_bytes=*/NULLPTR); + } + + Status AppendNextOffset() { + ARROW_RETURN_NOT_OK(this->ValidateOverflow(0)); + const int64_t num_values = this->value_builder_->length(); + return this->offsets_builder_.Append(static_cast(num_values)); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_RETURN_NOT_OK(AppendNextOffset()); + + // Offset padding zeroed by BufferBuilder + std::shared_ptr offsets; + std::shared_ptr null_bitmap; + ARROW_RETURN_NOT_OK(this->offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(this->null_bitmap_builder_.Finish(&null_bitmap)); + + if (this->value_builder_->length() == 0) { + // Try to make sure we get a non-null values buffer (ARROW-2744) + ARROW_RETURN_NOT_OK(this->value_builder_->Resize(0)); + } + + std::shared_ptr items; + ARROW_RETURN_NOT_OK(this->value_builder_->FinishInternal(&items)); + + *out = ArrayData::Make(this->type(), this->length_, + {std::move(null_bitmap), std::move(offsets)}, + {std::move(items)}, this->null_count_); + this->Reset(); + return Status::OK(); + } +}; + +/// \class ListBuilder +/// \brief Builder class for variable-length list array value types +/// +/// To use this class, you must append values to the child array builder and use +/// the Append function to delimit each distinct list value (once the values +/// have been appended to the child array) or use the bulk API to append +/// a sequence of offsets and null values. +/// +/// A note on types. Per arrow/type.h all types in the c++ implementation are +/// logical so even though this class always builds list array, this can +/// represent multiple different logical types. If no logical type is provided +/// at construction time, the class defaults to List where t is taken from the +/// value_builder/values that the object is constructed with. +class ARROW_EXPORT ListBuilder : public BaseListBuilder { + public: + using BaseListBuilder::BaseListBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +/// \class LargeListBuilder +/// \brief Builder class for large variable-length list array value types +/// +/// Like ListBuilder, but to create large list arrays (with 64-bit offsets). +class ARROW_EXPORT LargeListBuilder : public BaseListBuilder { + public: + using BaseListBuilder::BaseListBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +// ---------------------------------------------------------------------- +// ListViewBuilder / LargeListViewBuilder + +template +class ARROW_EXPORT BaseListViewBuilder : public VarLengthListLikeBuilder { + private: + using BASE = VarLengthListLikeBuilder; + + public: + using TypeClass = TYPE; + using offset_type = typename BASE::offset_type; + + using BASE::BASE; + + ~BaseListViewBuilder() override = default; + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(BASE::Resize(capacity)); + return sizes_builder_.Resize(capacity); + } + + void Reset() override { + BASE::Reset(); + sizes_builder_.Reset(); + } + + /// \brief Vector append + /// + /// If passed, valid_bytes is of equal length to values, and any zero byte + /// will be considered as a null for that slot + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length, const uint8_t* valid_bytes) final { + ARROW_RETURN_NOT_OK(this->Reserve(length)); + this->UnsafeAppendToBitmap(valid_bytes, length); + this->offsets_builder_.UnsafeAppend(offsets, length); + this->sizes_builder_.UnsafeAppend(sizes, length); + return Status::OK(); + } + + Status AppendValues(const offset_type* offsets, const offset_type* sizes, + int64_t length) { + return AppendValues(offsets, sizes, length, /*valid_bytes=*/NULLPTR); + } + + Status FinishInternal(std::shared_ptr* out) override { + // Offset and sizes padding zeroed by BufferBuilder + std::shared_ptr null_bitmap; + std::shared_ptr offsets; + std::shared_ptr sizes; + ARROW_RETURN_NOT_OK(this->null_bitmap_builder_.Finish(&null_bitmap)); + ARROW_RETURN_NOT_OK(this->offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(this->sizes_builder_.Finish(&sizes)); + + if (this->value_builder_->length() == 0) { + // Try to make sure we get a non-null values buffer (ARROW-2744) + ARROW_RETURN_NOT_OK(this->value_builder_->Resize(0)); + } + + std::shared_ptr items; + ARROW_RETURN_NOT_OK(this->value_builder_->FinishInternal(&items)); + + *out = ArrayData::Make(this->type(), this->length_, + {std::move(null_bitmap), std::move(offsets), std::move(sizes)}, + {std::move(items)}, this->null_count_); + this->Reset(); + return Status::OK(); + } + + protected: + void UnsafeAppendEmptyDimensions(int64_t num_values) override { + for (int64_t i = 0; i < num_values; ++i) { + this->offsets_builder_.UnsafeAppend(0); + } + for (int64_t i = 0; i < num_values; ++i) { + this->sizes_builder_.UnsafeAppend(0); + } + } + + void UnsafeAppendDimensions(int64_t offset, int64_t size) override { + this->offsets_builder_.UnsafeAppend(static_cast(offset)); + this->sizes_builder_.UnsafeAppend(static_cast(size)); + } + + private: + TypedBufferBuilder sizes_builder_; +}; + +class ARROW_EXPORT ListViewBuilder final : public BaseListViewBuilder { + public: + using BaseListViewBuilder::BaseListViewBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +class ARROW_EXPORT LargeListViewBuilder final + : public BaseListViewBuilder { + public: + using BaseListViewBuilder::BaseListViewBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +// ---------------------------------------------------------------------- +// Map builder + +/// \class MapBuilder +/// \brief Builder class for arrays of variable-size maps +/// +/// To use this class, you must append values to the key and item array builders +/// and use the Append function to delimit each distinct map (once the keys and items +/// have been appended) or use the bulk API to append a sequence of offsets and null +/// maps. +/// +/// Key uniqueness and ordering are not validated. +class ARROW_EXPORT MapBuilder : public ArrayBuilder { + public: + /// Use this constructor to define the built array's type explicitly. If key_builder + /// or item_builder has indeterminate type, this builder will also. + MapBuilder(MemoryPool* pool, const std::shared_ptr& key_builder, + const std::shared_ptr& item_builder, + const std::shared_ptr& type); + + /// Use this constructor to infer the built array's type. If key_builder or + /// item_builder has indeterminate type, this builder will also. + MapBuilder(MemoryPool* pool, const std::shared_ptr& key_builder, + const std::shared_ptr& item_builder, bool keys_sorted = false); + + MapBuilder(MemoryPool* pool, const std::shared_ptr& item_builder, + const std::shared_ptr& type); + + Status Resize(int64_t capacity) override; + void Reset() override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Vector append + /// + /// If passed, valid_bytes is of equal length to values, and any zero byte + /// will be considered as a null for that slot + Status AppendValues(const int32_t* offsets, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + /// \brief Start a new variable-length map slot + /// + /// This function should be called before beginning to append elements to the + /// key and item builders + Status Append(); + + Status AppendNull() final; + + Status AppendNulls(int64_t length) final; + + Status AppendEmptyValue() final; + + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + const int32_t* offsets = array.GetValues(1); + const bool all_valid = !array.MayHaveLogicalNulls(); + const uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + for (int64_t row = offset; row < offset + length; row++) { + const bool is_valid = + all_valid || (validity && bit_util::GetBit(validity, array.offset + row)) || + array.IsValid(row); + if (is_valid) { + ARROW_RETURN_NOT_OK(Append()); + const int64_t slot_length = offsets[row + 1] - offsets[row]; + // Add together the inner StructArray offset to the Map/List offset + int64_t key_value_offset = array.child_data[0].offset + offsets[row]; + ARROW_RETURN_NOT_OK(key_builder_->AppendArraySlice( + array.child_data[0].child_data[0], key_value_offset, slot_length)); + ARROW_RETURN_NOT_OK(item_builder_->AppendArraySlice( + array.child_data[0].child_data[1], key_value_offset, slot_length)); + } else { + ARROW_RETURN_NOT_OK(AppendNull()); + } + } + return Status::OK(); + } + + /// \brief Get builder to append keys. + /// + /// Append a key with this builder should be followed by appending + /// an item or null value with item_builder(). + ArrayBuilder* key_builder() const { return key_builder_.get(); } + + /// \brief Get builder to append items + /// + /// Appending an item with this builder should have been preceded + /// by appending a key with key_builder(). + ArrayBuilder* item_builder() const { return item_builder_.get(); } + + /// \brief Get builder to add Map entries as struct values. + /// + /// This is used instead of key_builder()/item_builder() and allows + /// the Map to be built as a list of struct values. + ArrayBuilder* value_builder() const { return list_builder_->value_builder(); } + + std::shared_ptr type() const override { + // Key and Item builder may update types, but they don't contain the field names, + // so we need to reconstruct the type. (See ARROW-13735.) + return std::make_shared( + field(entries_name_, + struct_({field(key_name_, key_builder_->type(), false), + field(item_name_, item_builder_->type(), item_nullable_)}), + false), + keys_sorted_); + } + + Status ValidateOverflow(int64_t new_elements) { + return list_builder_->ValidateOverflow(new_elements); + } + + protected: + inline Status AdjustStructBuilderLength(); + + protected: + bool keys_sorted_ = false; + bool item_nullable_ = false; + std::string entries_name_; + std::string key_name_; + std::string item_name_; + std::shared_ptr list_builder_; + std::shared_ptr key_builder_; + std::shared_ptr item_builder_; +}; + +// ---------------------------------------------------------------------- +// FixedSizeList builder + +/// \class FixedSizeListBuilder +/// \brief Builder class for fixed-length list array value types +class ARROW_EXPORT FixedSizeListBuilder : public ArrayBuilder { + public: + /// Use this constructor to define the built array's type explicitly. If value_builder + /// has indeterminate type, this builder will also. + FixedSizeListBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + int32_t list_size); + + /// Use this constructor to infer the built array's type. If value_builder has + /// indeterminate type, this builder will also. + FixedSizeListBuilder(MemoryPool* pool, + std::shared_ptr const& value_builder, + const std::shared_ptr& type); + + Status Resize(int64_t capacity) override; + void Reset() override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Append a valid fixed length list. + /// + /// This function affects only the validity bitmap; the child values must be appended + /// using the child array builder. + Status Append(); + + /// \brief Vector append + /// + /// If passed, valid_bytes wil be read and any zero byte + /// will cause the corresponding slot to be null + /// + /// This function affects only the validity bitmap; the child values must be appended + /// using the child array builder. This includes appending nulls for null lists. + /// XXX this restriction is confusing, should this method be omitted? + Status AppendValues(int64_t length, const uint8_t* valid_bytes = NULLPTR); + + /// \brief Append a null fixed length list. + /// + /// The child array builder will have the appropriate number of nulls appended + /// automatically. + Status AppendNull() final; + + /// \brief Append length null fixed length lists. + /// + /// The child array builder will have the appropriate number of nulls appended + /// automatically. + Status AppendNulls(int64_t length) final; + + Status ValidateOverflow(int64_t new_elements); + + Status AppendEmptyValue() final; + + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length) final { + const uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + for (int64_t row = offset; row < offset + length; row++) { + if (!validity || bit_util::GetBit(validity, array.offset + row)) { + ARROW_RETURN_NOT_OK(value_builder_->AppendArraySlice( + array.child_data[0], list_size_ * (array.offset + row), list_size_)); + ARROW_RETURN_NOT_OK(Append()); + } else { + ARROW_RETURN_NOT_OK(AppendNull()); + } + } + return Status::OK(); + } + + ArrayBuilder* value_builder() const { return value_builder_.get(); } + + std::shared_ptr type() const override { + return fixed_size_list(value_field_->WithType(value_builder_->type()), list_size_); + } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t maximum_elements() { + return std::numeric_limits::max() - 1; + } + + protected: + std::shared_ptr value_field_; + const int32_t list_size_; + std::shared_ptr value_builder_; +}; + +// ---------------------------------------------------------------------- +// Struct + +// --------------------------------------------------------------------------------- +// StructArray builder +/// Append, Resize and Reserve methods are acting on StructBuilder. +/// Please make sure all these methods of all child-builders' are consistently +/// called to maintain data-structure consistency. +class ARROW_EXPORT StructBuilder : public ArrayBuilder { + public: + /// If any of field_builders has indeterminate type, this builder will also + StructBuilder(const std::shared_ptr& type, MemoryPool* pool, + std::vector> field_builders); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// Null bitmap is of equal length to every child field, and any zero byte + /// will be considered as a null for that field, but users must using app- + /// end methods or advance methods of the child builders' independently to + /// insert data. + Status AppendValues(int64_t length, const uint8_t* valid_bytes) { + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(valid_bytes, length); + return Status::OK(); + } + + /// Append an element to the Struct. All child-builders' Append method must + /// be called independently to maintain data-structure consistency. + Status Append(bool is_valid = true) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendToBitmap(is_valid); + return Status::OK(); + } + + /// \brief Append a null value. Automatically appends an empty value to each child + /// builder. + Status AppendNull() final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValue()); + } + return Append(false); + } + + /// \brief Append multiple null values. Automatically appends empty values to each + /// child builder. + Status AppendNulls(int64_t length) final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValues(length)); + } + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, false); + return Status::OK(); + } + + Status AppendEmptyValue() final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValue()); + } + return Append(true); + } + + Status AppendEmptyValues(int64_t length) final { + for (const auto& field : children_) { + ARROW_RETURN_NOT_OK(field->AppendEmptyValues(length)); + } + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(length, true); + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + for (int i = 0; static_cast(i) < children_.size(); i++) { + ARROW_RETURN_NOT_OK(children_[i]->AppendArraySlice(array.child_data[i], + array.offset + offset, length)); + } + const uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + ARROW_RETURN_NOT_OK(Reserve(length)); + UnsafeAppendToBitmap(validity, array.offset + offset, length); + return Status::OK(); + } + + void Reset() override; + + ArrayBuilder* field_builder(int i) const { return children_[i].get(); } + + int num_fields() const { return static_cast(children_.size()); } + + std::shared_ptr type() const override; + + private: + std::shared_ptr type_; +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h new file mode 100644 index 0000000000000000000000000000000000000000..29e01d55edeb110c6b93e12722521e81ca445436 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h @@ -0,0 +1,555 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" + +namespace arrow { + +class ARROW_EXPORT NullBuilder : public ArrayBuilder { + public: + explicit NullBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool) {} + explicit NullBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NullBuilder(pool, alignment) {} + + /// \brief Append the specified number of null elements + Status AppendNulls(int64_t length) final { + if (length < 0) return Status::Invalid("length must be positive"); + null_count_ += length; + length_ += length; + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { return AppendNulls(1); } + + Status AppendEmptyValues(int64_t length) final { return AppendNulls(length); } + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + + Status Append(std::nullptr_t) { return AppendNull(); } + + Status AppendArraySlice(const ArraySpan&, int64_t, int64_t length) override { + return AppendNulls(length); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + std::shared_ptr type() const override { return null(); } + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +/// \addtogroup numeric-builders +/// +/// @{ + +/// Base class for all Builders that emit an Array of a scalar numerical type. +template +class NumericBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, typename T::c_type> { + public: + using TypeClass = T; + using value_type = typename T::c_type; + using ArrayType = typename TypeTraits::ArrayType; + + template + explicit NumericBuilder( + enable_if_parameter_free pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + type_(TypeTraits::type_singleton()), + data_builder_(pool, alignment) {} + + NumericBuilder(const std::shared_ptr& type, MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), type_(type), data_builder_(pool, alignment) {} + + /// Append a single scalar and increase the size if necessary. + Status Append(const value_type val) { + ARROW_RETURN_NOT_OK(ArrayBuilder::Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + /// The memory at the corresponding data slot is set to 0 to prevent + /// uninitialized memory access + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNotNull(length); + return Status::OK(); + } + + value_type GetValue(int64_t index) const { return data_builder_.data()[index]; } + + void Reset() override { + data_builder_.Reset(); + ArrayBuilder::Reset(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + value_type operator[](int64_t index) const { return GetValue(index); } + + value_type& operator[](int64_t index) { + return reinterpret_cast(data_builder_.mutable_data())[index]; + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(valid_bytes, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] bitmap a validity bitmap to copy (may be null) + /// \param[in] bitmap_offset an offset into the validity bitmap + /// \return Status + Status AppendValues(const value_type* values, int64_t length, const uint8_t* bitmap, + int64_t bitmap_offset) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(bitmap, bitmap_offset, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const std::vector& is_valid) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(is_valid); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid) { + return AppendValues(values.data(), static_cast(values.size()), is_valid); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \return Status + Status AppendValues(const std::vector& values) { + return AppendValues(values.data(), static_cast(values.size())); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_ASSIGN_OR_RAISE(auto null_bitmap, + null_bitmap_builder_.FinishWithLength(length_)); + ARROW_ASSIGN_OR_RAISE(auto data, data_builder_.FinishWithLength(length_)); + *out = ArrayData::Make(type(), length_, {null_bitmap, data}, null_count_); + capacity_ = length_ = null_count_ = 0; + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values. + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, with a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + } + + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1) + offset, length, + array.GetValues(0, 0), array.offset + offset); + } + + /// Append a single scalar under the assumption that the underlying Buffer is + /// large enough. + /// + /// This method does not capacity-check; make sure to call Reserve + /// beforehand. + void UnsafeAppend(const value_type val) { + ArrayBuilder::UnsafeAppendToBitmap(true); + data_builder_.UnsafeAppend(val); + } + + void UnsafeAppendNull() { + ArrayBuilder::UnsafeAppendToBitmap(false); + data_builder_.UnsafeAppend(value_type{}); // zero + } + + std::shared_ptr type() const override { return type_; } + + protected: + std::shared_ptr type_; + TypedBufferBuilder data_builder_; +}; + +// Builders + +using UInt8Builder = NumericBuilder; +using UInt16Builder = NumericBuilder; +using UInt32Builder = NumericBuilder; +using UInt64Builder = NumericBuilder; + +using Int8Builder = NumericBuilder; +using Int16Builder = NumericBuilder; +using Int32Builder = NumericBuilder; +using Int64Builder = NumericBuilder; + +using HalfFloatBuilder = NumericBuilder; +using FloatBuilder = NumericBuilder; +using DoubleBuilder = NumericBuilder; + +/// @} + +/// \addtogroup temporal-builders +/// +/// @{ + +using Date32Builder = NumericBuilder; +using Date64Builder = NumericBuilder; +using Time32Builder = NumericBuilder; +using Time64Builder = NumericBuilder; +using TimestampBuilder = NumericBuilder; +using MonthIntervalBuilder = NumericBuilder; +using DurationBuilder = NumericBuilder; + +/// @} + +class ARROW_EXPORT BooleanBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps { + public: + using TypeClass = BooleanType; + using value_type = bool; + + explicit BooleanBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + BooleanBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNull(length); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNull(); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(false); + UnsafeSetNotNull(1); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// Scalar append + Status Append(const bool val) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + Status Append(const uint8_t val) { return Append(val != 0); } + + /// Scalar append, without checking for capacity + void UnsafeAppend(const bool val) { + data_builder_.UnsafeAppend(val); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(false); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppend(const uint8_t val) { UnsafeAppend(val != 0); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous array of bytes (non-zero is 1) + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a bitmap of values + /// \param[in] length the number of values to append + /// \param[in] validity a validity bitmap to copy (may be null) + /// \param[in] offset an offset into the values and validity bitmaps + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, const uint8_t* validity, + int64_t offset); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// or null(0) values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + // this updates length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, for a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + } + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + Status AppendValues(int64_t length, bool value); + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1, 0), length, + array.GetValues(0, 0), array.offset + offset); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + void Reset() override; + Status Resize(int64_t capacity) override; + + std::shared_ptr type() const override { return boolean(); } + + protected: + TypedBufferBuilder data_builder_; +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..ac92efbd0dbe6b470b8275219e75b41aa3f7ab3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h @@ -0,0 +1,303 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_base.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-builders +/// +/// @{ + +namespace internal { + +/// \brief An ArrayBuilder that deduplicates repeated values as they are +/// appended to the inner-ArrayBuilder and reports the length of the current run +/// of identical values. +/// +/// The following sequence of calls +/// +/// Append(2) +/// Append(2) +/// Append(2) +/// Append(7) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// will cause the inner-builder to receive only 3 Append calls +/// +/// Append(2) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// Note that values returned by length(), null_count() and capacity() are +/// related to the compressed array built by the inner-ArrayBuilder. +class RunCompressorBuilder : public ArrayBuilder { + public: + RunCompressorBuilder(MemoryPool* pool, std::shared_ptr inner_builder, + std::shared_ptr type); + + ~RunCompressorBuilder() override; + + ARROW_DISALLOW_COPY_AND_ASSIGN(RunCompressorBuilder); + + /// \brief Called right before a run is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run is closed (i.e. run-length is known and value is appended to the + /// inner builder). + /// + /// \param value can be NULLPTR if closing a run of NULLs + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRun(const std::shared_ptr& value, + int64_t length) { + return Status::OK(); + } + + /// \brief Called right before a run of empty values is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run of empty values is appended (i.e. run-length is known and a single + /// empty value is appended to the inner builder). + /// + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRunOfEmptyValues(int64_t length) { return Status::OK(); } + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing a run-length compressed array for a given + /// number of logical elements is not possible, since the physical length will + /// vary depending on the values to be appended in the future. But we can + /// pessimistically assume that each run will contain a single value and + /// allocate that number of runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + /// + /// Like Resize on non-encoded builders, it does not account for variable size + /// data. + Status ResizePhysical(int64_t capacity); + + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + + // AppendArraySlice() is not implemented. + + /// \brief Append a slice of an array containing values from already + /// compressed runs. + /// + /// NOTE: WillCloseRun() is not called as the length of each run cannot be + /// determined at this point. Caller should ensure that !has_open_run() by + /// calling FinishCurrentRun() before calling this. + /// + /// Pre-condition: !has_open_run() + Status AppendRunCompressedArraySlice(const ArraySpan& array, int64_t offset, + int64_t length); + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the underlying array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + /// + /// Finish() and FinishInternal() call this automatically. + virtual Status FinishCurrentRun(); + + Status FinishInternal(std::shared_ptr* out) override; + + ArrayBuilder& inner_builder() const { return *inner_builder_; } + + std::shared_ptr type() const override { return inner_builder_->type(); } + + bool has_open_run() const { return current_run_length_ > 0; } + int64_t open_run_length() const { return current_run_length_; } + + private: + inline void UpdateDimensions() { + capacity_ = inner_builder_->capacity(); + length_ = inner_builder_->length(); + null_count_ = inner_builder_->null_count(); + } + + private: + std::shared_ptr inner_builder_; + std::shared_ptr current_value_ = NULLPTR; + int64_t current_run_length_ = 0; +}; + +} // namespace internal + +// ---------------------------------------------------------------------- +// RunEndEncoded builder + +/// \brief Run-end encoded array builder. +/// +/// NOTE: the value returned by and capacity() is related to the +/// compressed array (physical) and not the decoded array (logical) that is +/// run-end encoded. null_count() always returns 0. length(), on the other hand, +/// returns the logical length of the run-end encoded array. +class ARROW_EXPORT RunEndEncodedBuilder : public ArrayBuilder { + private: + // An internal::RunCompressorBuilder that produces a run-end in the + // RunEndEncodedBuilder every time a value-run is closed. + class ValueRunBuilder : public internal::RunCompressorBuilder { + public: + ValueRunBuilder(MemoryPool* pool, const std::shared_ptr& value_builder, + const std::shared_ptr& value_type, + RunEndEncodedBuilder& ree_builder); + + ~ValueRunBuilder() override = default; + + Status WillCloseRun(const std::shared_ptr&, int64_t length) override { + return ree_builder_.CloseRun(length); + } + + Status WillCloseRunOfEmptyValues(int64_t length) override { + return ree_builder_.CloseRun(length); + } + + private: + RunEndEncodedBuilder& ree_builder_; + }; + + public: + RunEndEncodedBuilder(MemoryPool* pool, + const std::shared_ptr& run_end_builder, + const std::shared_ptr& value_builder, + std::shared_ptr type); + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing an REE for a given number of logical + /// elements is not possible, since the physical length will vary depending on + /// the values to be appended in the future. But we can pessimistically assume + /// that each run will contain a single value and allocate that number of + /// runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + Status ResizePhysical(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of run without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental ReservePhysical() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional runs + /// \return Status + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the values array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + Status FinishCurrentRun(); + + std::shared_ptr type() const override; + + private: + /// \brief Update physical capacity and logical length + /// + /// \param committed_logical_length number of logical values that have been + /// committed to the values array + /// \param open_run_length number of logical values in the currently open run if any + inline void UpdateDimensions(int64_t committed_logical_length, + int64_t open_run_length) { + capacity_ = run_end_builder().capacity(); + length_ = committed_logical_length + open_run_length; + committed_logical_length_ = committed_logical_length; + } + + // Pre-condition: !value_run_builder_.has_open_run() + template + Status DoAppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length); + + template + Status DoAppendRunEnd(int64_t run_end); + + /// \brief Cast run_end to the appropriate type and appends it to the run_ends + /// array. + Status AppendRunEnd(int64_t run_end); + + /// \brief Close a run by appending a value to the run_ends array and updating + /// length_ to reflect the new run. + /// + /// Pre-condition: run_length > 0. + [[nodiscard]] Status CloseRun(int64_t run_length); + + ArrayBuilder& run_end_builder(); + ArrayBuilder& value_builder(); + + private: + std::shared_ptr type_; + ValueRunBuilder* value_run_builder_; + // The length not counting the current open run in the value_run_builder_ + int64_t committed_logical_length_ = 0; +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h new file mode 100644 index 0000000000000000000000000000000000000000..da29ae3124b5d3da32605503b29edf6920cdf6d6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Contains declarations of time related Arrow builder types. + +#pragma once + +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_primitive.h" + +namespace arrow { + +/// \addtogroup temporal-builders +/// +/// @{ + +// TODO(ARROW-7938): this class is untested + +class ARROW_EXPORT DayTimeIntervalBuilder : public NumericBuilder { + public: + using DayMilliseconds = DayTimeIntervalType::DayMilliseconds; + + explicit DayTimeIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : DayTimeIntervalBuilder(day_time_interval(), pool, alignment) {} + + explicit DayTimeIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +class ARROW_EXPORT MonthDayNanoIntervalBuilder + : public NumericBuilder { + public: + using MonthDayNanos = MonthDayNanoIntervalType::MonthDayNanos; + + explicit MonthDayNanoIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : MonthDayNanoIntervalBuilder(month_day_nano_interval(), pool, alignment) {} + + explicit MonthDayNanoIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h new file mode 100644 index 0000000000000000000000000000000000000000..718ef4c32cebef1d30e4f7c036a7ab8f4b333e4a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h @@ -0,0 +1,254 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_nested.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer_builder.h" +#include "arrow/memory_pool.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-builders +/// +/// @{ + +/// \brief Base class for union array builds. +/// +/// Note that while we subclass ArrayBuilder, as union types do not have a +/// validity bitmap, the bitmap builder member of ArrayBuilder is not used. +class ARROW_EXPORT BasicUnionBuilder : public ArrayBuilder { + public: + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Make a new child builder available to the UnionArray + /// + /// \param[in] new_child the child builder + /// \param[in] field_name the name of the field in the union array type + /// if type inference is used + /// \return child index, which is the "type" argument that needs + /// to be passed to the "Append" method to add a new element to + /// the union array. + int8_t AppendChild(const std::shared_ptr& new_child, + const std::string& field_name = ""); + + std::shared_ptr type() const override; + + int64_t length() const override { return types_builder_.length(); } + + protected: + BasicUnionBuilder(MemoryPool* pool, int64_t alignment, + const std::vector>& children, + const std::shared_ptr& type); + + int8_t NextTypeId(); + + std::vector> child_fields_; + std::vector type_codes_; + UnionMode::type mode_; + + std::vector type_id_to_children_; + std::vector type_id_to_child_id_; + // for all type_id < dense_type_id_, type_id_to_children_[type_id] != nullptr + int8_t dense_type_id_ = 0; + TypedBufferBuilder types_builder_; +}; + +/// \class DenseUnionBuilder +/// +/// This API is EXPERIMENTAL. +class ARROW_EXPORT DenseUnionBuilder : public BasicUnionBuilder { + public: + /// Use this constructor to initialize the UnionBuilder with no child builders, + /// allowing type to be inferred. You will need to call AppendChild for each of the + /// children builders you want to use. + explicit DenseUnionBuilder(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, {}, dense_union(FieldVector{})), + offsets_builder_(pool, alignment) {} + + /// Use this constructor to specify the type explicitly. + /// You can still add child builders to the union after using this constructor + DenseUnionBuilder(MemoryPool* pool, + const std::vector>& children, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, children, type), + offsets_builder_(pool, alignment) {} + + Status AppendNull() final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(static_cast(child_builder->length()))); + // Append a null arbitrarily to the first child + return child_builder->AppendNull(); + } + + Status AppendNulls(int64_t length) final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(length, static_cast(child_builder->length()))); + // Append just a single null to the first child + return child_builder->AppendNull(); + } + + Status AppendEmptyValue() final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(static_cast(child_builder->length()))); + // Append an empty value arbitrarily to the first child + return child_builder->AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(length, static_cast(child_builder->length()))); + // Append just a single empty value to the first child + return child_builder->AppendEmptyValue(); + } + + /// \brief Append an element to the UnionArray. This must be followed + /// by an append to the appropriate child builder. + /// + /// \param[in] next_type type_id of the child to which the next value will be appended. + /// + /// The corresponding child builder must be appended to independently after this method + /// is called. + Status Append(int8_t next_type) { + ARROW_RETURN_NOT_OK(types_builder_.Append(next_type)); + if (type_id_to_children_[next_type]->length() == kListMaximumElements) { + return Status::CapacityError( + "a dense UnionArray cannot contain more than 2^31 - 1 elements from a single " + "child"); + } + auto offset = static_cast(type_id_to_children_[next_type]->length()); + return offsets_builder_.Append(offset); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + + Status FinishInternal(std::shared_ptr* out) override; + + private: + TypedBufferBuilder offsets_builder_; +}; + +/// \class SparseUnionBuilder +/// +/// This API is EXPERIMENTAL. +class ARROW_EXPORT SparseUnionBuilder : public BasicUnionBuilder { + public: + /// Use this constructor to initialize the UnionBuilder with no child builders, + /// allowing type to be inferred. You will need to call AppendChild for each of the + /// children builders you want to use. + explicit SparseUnionBuilder(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, {}, sparse_union(FieldVector{})) {} + + /// Use this constructor to specify the type explicitly. + /// You can still add child builders to the union after using this constructor + SparseUnionBuilder(MemoryPool* pool, + const std::vector>& children, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, children, type) {} + + /// \brief Append a null value. + /// + /// A null is appended to the first child, empty values to the other children. + Status AppendNull() final { + const auto first_child_code = type_codes_[0]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK(type_id_to_children_[first_child_code]->AppendNull()); + for (int i = 1; i < static_cast(type_codes_.size()); ++i) { + ARROW_RETURN_NOT_OK(type_id_to_children_[type_codes_[i]]->AppendEmptyValue()); + } + return Status::OK(); + } + + /// \brief Append multiple null values. + /// + /// Nulls are appended to the first child, empty values to the other children. + Status AppendNulls(int64_t length) final { + const auto first_child_code = type_codes_[0]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK(type_id_to_children_[first_child_code]->AppendNulls(length)); + for (int i = 1; i < static_cast(type_codes_.size()); ++i) { + ARROW_RETURN_NOT_OK( + type_id_to_children_[type_codes_[i]]->AppendEmptyValues(length)); + } + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(types_builder_.Append(type_codes_[0])); + for (int8_t code : type_codes_) { + ARROW_RETURN_NOT_OK(type_id_to_children_[code]->AppendEmptyValue()); + } + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(types_builder_.Append(length, type_codes_[0])); + for (int8_t code : type_codes_) { + ARROW_RETURN_NOT_OK(type_id_to_children_[code]->AppendEmptyValues(length)); + } + return Status::OK(); + } + + /// \brief Append an element to the UnionArray. This must be followed + /// by an append to the appropriate child builder. + /// + /// \param[in] next_type type_id of the child to which the next value will be appended. + /// + /// The corresponding child builder must be appended to independently after this method + /// is called, and all other child builders must have null or empty value appended. + Status Append(int8_t next_type) { return types_builder_.Append(next_type); } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; +}; + +/// @} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h new file mode 100644 index 0000000000000000000000000000000000000000..e7597aad812c4ca20a9335afa1bc44129b2ad727 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Concatenate arrays +/// +/// \param[in] arrays a vector of arrays to be concatenated +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return the concatenated array +ARROW_EXPORT +Result> Concatenate(const ArrayVector& arrays, + MemoryPool* pool = default_memory_pool()); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h new file mode 100644 index 0000000000000000000000000000000000000000..edd443adc43c4759e4c814f545166d30dd41c3f0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h @@ -0,0 +1,621 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: export +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/span.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +struct ArrayData; + +namespace internal { +// ---------------------------------------------------------------------- +// Null handling for types without a validity bitmap and the dictionary type + +ARROW_EXPORT bool IsNullSparseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullDenseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullRunEndEncoded(const ArrayData& data, int64_t i); + +ARROW_EXPORT bool UnionMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool RunEndEncodedMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool DictionaryMayHaveLogicalNulls(const ArrayData& data); +} // namespace internal + +// When slicing, we do not know the null count of the sliced range without +// doing some computation. To avoid doing this eagerly, we set the null count +// to -1 (any negative number will do). When Array::null_count is called the +// first time, the null count will be computed. See ARROW-33 +constexpr int64_t kUnknownNullCount = -1; + +// ---------------------------------------------------------------------- +// Generic array data container + +/// \class ArrayData +/// \brief Mutable container for generic Arrow array data +/// +/// This data structure is a self-contained representation of the memory and +/// metadata inside an Arrow array data structure (called vectors in Java). The +/// classes arrow::Array and its subclasses provide strongly-typed accessors +/// with support for the visitor pattern and other affordances. +/// +/// This class is designed for easy internal data manipulation, analytical data +/// processing, and data transport to and from IPC messages. For example, we +/// could cast from int64 to float64 like so: +/// +/// Int64Array arr = GetMyData(); +/// auto new_data = arr.data()->Copy(); +/// new_data->type = arrow::float64(); +/// DoubleArray double_arr(new_data); +/// +/// This object is also useful in an analytics setting where memory may be +/// reused. For example, if we had a group of operations all returning doubles, +/// say: +/// +/// Log(Sqrt(Expr(arr))) +/// +/// Then the low-level implementations of each of these functions could have +/// the signatures +/// +/// void Log(const ArrayData& values, ArrayData* out); +/// +/// As another example a function may consume one or more memory buffers in an +/// input array and replace them with newly-allocated data, changing the output +/// data type as well. +struct ARROW_EXPORT ArrayData { + ArrayData() = default; + + ArrayData(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : type(std::move(type)), length(length), null_count(null_count), offset(offset) {} + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); + } + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); + this->child_data = std::move(child_data); + } + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + std::shared_ptr dictionary, int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + // Move constructor + ArrayData(ArrayData&& other) noexcept + : type(std::move(other.type)), + length(other.length), + offset(other.offset), + buffers(std::move(other.buffers)), + child_data(std::move(other.child_data)), + dictionary(std::move(other.dictionary)) { + SetNullCount(other.null_count); + } + + // Copy constructor + ArrayData(const ArrayData& other) noexcept + : type(other.type), + length(other.length), + offset(other.offset), + buffers(other.buffers), + child_data(other.child_data), + dictionary(other.dictionary) { + SetNullCount(other.null_count); + } + + // Move assignment + ArrayData& operator=(ArrayData&& other) { + type = std::move(other.type); + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = std::move(other.buffers); + child_data = std::move(other.child_data); + dictionary = std::move(other.dictionary); + return *this; + } + + // Copy assignment + ArrayData& operator=(const ArrayData& other) { + type = other.type; + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = other.buffers; + child_data = other.child_data; + dictionary = other.dictionary; + return *this; + } + + std::shared_ptr Copy() const { return std::make_shared(*this); } + + bool IsNull(int64_t i) const { return !IsValid(i); } + + bool IsValid(int64_t i) const { + if (buffers[0] != NULLPTR) { + return bit_util::GetBit(buffers[0]->data(), i + offset); + } + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*this, i); + } + if (type == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*this, i); + } + if (type == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*this, i); + } + return null_count.load() != length; + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, offset); + } + + // Like GetValues, but returns NULLPTR instead of aborting if the underlying + // buffer is not a CPU buffer. + template + inline const T* GetValuesSafe(int i, int64_t absolute_offset) const { + if (buffers[i] && buffers[i]->is_cpu()) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValuesSafe(int i) const { + return GetValuesSafe(i, offset); + } + + // Access a buffer's data as a typed C pointer + template + inline T* GetMutableValues(int i, int64_t absolute_offset) { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->mutable_data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline T* GetMutableValues(int i) { + return GetMutableValues(i, offset); + } + + /// \brief Construct a zero-copy slice of the data with the given offset and length + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// \brief Input-checking variant of Slice + /// + /// An Invalid Status is returned if the requested slice falls out of bounds. + /// Note that unlike Slice, `length` isn't clamped to the available buffer size. + Result> SliceSafe(int64_t offset, int64_t length) const; + + void SetNullCount(int64_t v) { null_count.store(v); } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the data has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known. + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count.load() != 0 && buffers[0] != NULLPTR; + } + + /// \brief Return true if the data has a validity bitmap + bool HasValidityBitmap() const { return buffers[0] != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// This is not a drop-in replacement for MayHaveNulls, as historically + /// MayHaveNulls() has been used to check for the presence of a validity + /// bitmap that needs to be checked. + /// + /// Code that previously used MayHaveNulls() and then dealt with the validity + /// bitmap directly can be fixed to handle all types correctly without + /// performance degradation when handling most types by adopting + /// HasValidityBitmap and MayHaveLogicalNulls. + /// + /// Before: + /// + /// uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// if (validity && !bit_util::GetBit(validity, i)) { + /// continue; // skip a NULL + /// } + /// ... + /// } + /// + /// After: + /// + /// bool all_valid = !array.MayHaveLogicalNulls(); + /// uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// bool is_valid = all_valid || + /// (validity && bit_util::GetBit(validity, i)) || + /// array.IsValid(i); + /// if (!is_valid) { + /// continue; // skip a NULL + /// } + /// ... + /// } + bool MayHaveLogicalNulls() const { + if (buffers[0] != NULLPTR) { + return null_count.load() != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return internal::UnionMayHaveLogicalNulls(*this); + } + if (t == Type::RUN_END_ENCODED) { + return internal::RunEndEncodedMayHaveLogicalNulls(*this); + } + if (t == Type::DICTIONARY) { + return internal::DictionaryMayHaveLogicalNulls(*this); + } + return null_count.load() != 0; + } + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + std::shared_ptr type; + int64_t length = 0; + mutable std::atomic null_count{0}; + // The logical start point into the physical buffers (in values, not bytes). + // Note that, for child data, this must be *added* to the child data's own offset. + int64_t offset = 0; + std::vector> buffers; + std::vector> child_data; + + // The dictionary for this Array, if any. Only used for dictionary type + std::shared_ptr dictionary; +}; + +/// \brief A non-owning Buffer reference +struct ARROW_EXPORT BufferSpan { + // It is the user of this class's responsibility to ensure that + // buffers that were const originally are not written to + // accidentally. + uint8_t* data = NULLPTR; + int64_t size = 0; + // Pointer back to buffer that owns this memory + const std::shared_ptr* owner = NULLPTR; + + template + const T* data_as() const { + return reinterpret_cast(data); + } + template + T* mutable_data_as() { + return reinterpret_cast(data); + } +}; + +/// \brief EXPERIMENTAL: A non-owning ArrayData reference that is cheaply +/// copyable and does not contain any shared_ptr objects. Do not use in public +/// APIs aside from compute kernels for now +struct ARROW_EXPORT ArraySpan { + const DataType* type = NULLPTR; + int64_t length = 0; + mutable int64_t null_count = kUnknownNullCount; + int64_t offset = 0; + BufferSpan buffers[3]; + + ArraySpan() = default; + + explicit ArraySpan(const DataType* type, int64_t length) : type(type), length(length) {} + + ArraySpan(const ArrayData& data) { // NOLINT implicit conversion + SetMembers(data); + } + explicit ArraySpan(const Scalar& data) { FillFromScalar(data); } + + /// If dictionary-encoded, put dictionary in the first entry + std::vector child_data; + + /// \brief Populate ArraySpan to look like an array of length 1 pointing at + /// the data members of a Scalar value + void FillFromScalar(const Scalar& value); + + void SetMembers(const ArrayData& data); + + void SetBuffer(int index, const std::shared_ptr& buffer) { + this->buffers[index].data = const_cast(buffer->data()); + this->buffers[index].size = buffer->size(); + this->buffers[index].owner = &buffer; + } + + const ArraySpan& dictionary() const { return child_data[0]; } + + /// \brief Return the number of buffers (out of 3) that are used to + /// constitute this array + int num_buffers() const; + + // Access a buffer's data as a typed C pointer + template + inline T* GetValues(int i, int64_t absolute_offset) { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline T* GetValues(int i) { + return GetValues(i, this->offset); + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, this->offset); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) const { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].data_as() + this->offset, length); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].mutable_data_as() + this->offset, length); + } + + inline bool IsNull(int64_t i) const { return !IsValid(i); } + + inline bool IsValid(int64_t i) const { + if (this->buffers[0].data != NULLPTR) { + return bit_util::GetBit(this->buffers[0].data, i + this->offset); + } else { + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !IsNullSparseUnion(i); + } + if (type == Type::DENSE_UNION) { + return !IsNullDenseUnion(i); + } + if (type == Type::RUN_END_ENCODED) { + return !IsNullRunEndEncoded(i); + } + return this->null_count != this->length; + } + } + + std::shared_ptr ToArrayData() const; + + std::shared_ptr ToArray() const; + + std::shared_ptr GetBuffer(int index) const { + const BufferSpan& buf = this->buffers[index]; + if (buf.owner) { + return *buf.owner; + } else if (buf.data != NULLPTR) { + // Buffer points to some memory without an owning buffer + return std::make_shared(buf.data, buf.size); + } else { + return NULLPTR; + } + } + + void SetSlice(int64_t offset, int64_t length) { + this->offset = offset; + this->length = length; + if (this->type->id() == Type::NA) { + this->null_count = this->length; + } else if (this->MayHaveNulls()) { + this->null_count = kUnknownNullCount; + } else { + this->null_count = 0; + } + } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the array has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count != 0 && buffers[0].data != NULLPTR; + } + + /// \brief Return true if the array has a validity bitmap + bool HasValidityBitmap() const { return buffers[0].data != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// \see ArrayData::MayHaveLogicalNulls + bool MayHaveLogicalNulls() const { + if (buffers[0].data != NULLPTR) { + return null_count != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return UnionMayHaveLogicalNulls(); + } + if (t == Type::RUN_END_ENCODED) { + return RunEndEncodedMayHaveLogicalNulls(); + } + if (t == Type::DICTIONARY) { + return DictionaryMayHaveLogicalNulls(); + } + return null_count != 0; + } + + /// \brief Compute the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the logical null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + /// Some DataTypes (StringView, BinaryView) may have an arbitrary number of variadic + /// buffers. Since ArraySpan only has 3 buffers, we pack the variadic buffers into + /// buffers[2]; IE buffers[2].data points to the first shared_ptr of the + /// variadic set and buffers[2].size is the number of variadic buffers times + /// sizeof(shared_ptr). + /// + /// \see HasVariadicBuffers + util::span> GetVariadicBuffers() const; + bool HasVariadicBuffers() const; + + private: + ARROW_FRIEND_EXPORT friend bool internal::IsNullRunEndEncoded(const ArrayData& span, + int64_t i); + + bool IsNullSparseUnion(int64_t i) const; + bool IsNullDenseUnion(int64_t i) const; + + /// \brief Return true if the value at logical index i is null + /// + /// This function uses binary-search, so it has a O(log N) cost. + /// Iterating over the whole array and calling IsNull is O(N log N), so + /// for better performance it is recommended to use a + /// ree_util::RunEndEncodedArraySpan to iterate run by run instead. + bool IsNullRunEndEncoded(int64_t i) const; + + bool UnionMayHaveLogicalNulls() const; + bool RunEndEncodedMayHaveLogicalNulls() const; + bool DictionaryMayHaveLogicalNulls() const; +}; + +namespace internal { + +void FillZeroLengthArray(const DataType* type, ArraySpan* span); + +/// Construct a zero-copy view of this ArrayData with the given type. +/// +/// This method checks if the types are layout-compatible. +/// Nested types are traversed in depth-first order. Data buffers must have +/// the same item sizes, even though the logical types may be different. +/// An error is returned if the types are not layout-compatible. +ARROW_EXPORT +Result> GetArrayView(const std::shared_ptr& data, + const std::shared_ptr& type); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h new file mode 100644 index 0000000000000000000000000000000000000000..a405164b333f3b21a17e8414ef59a8a628c28579 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_nested.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Compare two arrays, returning an edit script which expresses the difference +/// between them +/// +/// An edit script is an array of struct(insert: bool, run_length: int64_t). +/// Each element of "insert" determines whether an element was inserted into (true) +/// or deleted from (false) base. Each insertion or deletion is followed by a run of +/// elements which are unchanged from base to target; the length of this run is stored +/// in "run_length". (Note that the edit script begins and ends with a run of shared +/// elements but both fields of the struct must have the same length. To accommodate this +/// the first element of "insert" should be ignored.) +/// +/// For example for base "hlloo" and target "hello", the edit script would be +/// [ +/// {"insert": false, "run_length": 1}, // leading run of length 1 ("h") +/// {"insert": true, "run_length": 3}, // insert("e") then a run of length 3 ("llo") +/// {"insert": false, "run_length": 0} // delete("o") then an empty run +/// ] +/// +/// Diffing arrays containing nulls is not currently supported. +/// +/// \param[in] base baseline for comparison +/// \param[in] target an array of identical type to base whose elements differ from base's +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return an edit script array which can be applied to base to produce target +ARROW_EXPORT +Result> Diff(const Array& base, const Array& target, + MemoryPool* pool = default_memory_pool()); + +/// \brief visitor interface for easy traversal of an edit script +/// +/// visitor will be called for each hunk of insertions and deletions. +ARROW_EXPORT Status VisitEditScript( + const Array& edits, + const std::function& visitor); + +/// \brief return a function which will format an edit script in unified +/// diff format to os, given base and target arrays of type +ARROW_EXPORT Result< + std::function> +MakeUnifiedDiffFormatter(const DataType& type, std::ostream* os); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h new file mode 100644 index 0000000000000000000000000000000000000000..9f34af0525d96449a76d7d7ea8be8439dca588a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Create a strongly-typed Array instance from generic ArrayData +/// \param[in] data the array contents +/// \return the resulting Array instance +ARROW_EXPORT +std::shared_ptr MakeArray(const std::shared_ptr& data); + +/// \brief Create a strongly-typed Array instance with all elements null +/// \param[in] type the array type +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayOfNull(const std::shared_ptr& type, + int64_t length, + MemoryPool* pool = default_memory_pool()); + +/// \brief Create an Array instance whose slots are the given scalar +/// \param[in] scalar the value with which to fill the array +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayFromScalar( + const Scalar& scalar, int64_t length, MemoryPool* pool = default_memory_pool()); + +/// \brief Create an empty Array of a given type +/// +/// The output Array will be of the given type. +/// +/// \param[in] type the data type of the empty Array +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting Array +ARROW_EXPORT +Result> MakeEmptyArray(std::shared_ptr type, + MemoryPool* pool = default_memory_pool()); + +namespace internal { + +/// \brief Swap endian of each element in a generic ArrayData +/// +/// As dictionaries are often shared between different arrays, dictionaries +/// are not swapped by this function and should be handled separately. +/// +/// \param[in] data the array contents +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting ArrayData whose elements were swapped +ARROW_EXPORT +Result> SwapEndianArrayData( + const std::shared_ptr& data, MemoryPool* pool = default_memory_pool()); + +/// Given a number of ArrayVectors, treat each ArrayVector as the +/// chunks of a chunked array. Then rechunk each ArrayVector such that +/// all ArrayVectors are chunked identically. It is mandatory that +/// all ArrayVectors contain the same total number of elements. +ARROW_EXPORT +std::vector RechunkArraysConsistently(const std::vector&); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h new file mode 100644 index 0000000000000000000000000000000000000000..3ebfa0a51edce21ca585862b1dbb074b6cf8d9c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// Internal functions implementing Array::Validate() and friends. + +// O(1) array metadata validation + +ARROW_EXPORT +Status ValidateArray(const Array& array); + +ARROW_EXPORT +Status ValidateArray(const ArrayData& data); + +// O(N) array data validation. +// Note that, starting from 7.0.0, "full" routines also validate metadata. +// Before, ValidateArray() needed to be called before ValidateArrayFull() +// to ensure metadata correctness, otherwise invalid memory accesses +// may occur. + +ARROW_EXPORT +Status ValidateArrayFull(const Array& array); + +ARROW_EXPORT +Status ValidateArrayFull(const ArrayData& data); + +ARROW_EXPORT +Status ValidateUTF8(const Array& array); + +ARROW_EXPORT +Status ValidateUTF8(const ArrayData& data); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h new file mode 100644 index 0000000000000000000000000000000000000000..ffcffe12e3c78bb92452f33644a8293c03aa8175 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h @@ -0,0 +1,328 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces to use for defining Flight RPC servers. API should be considered +// experimental for now + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/server_auth.h" +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" // IWYU pragma: keep +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/ipc/dictionary.h" +#include "arrow/ipc/options.h" +#include "arrow/record_batch.h" + +namespace arrow { + +class Schema; +class Status; + +namespace flight { + +/// \brief Interface that produces a sequence of IPC payloads to be sent in +/// FlightData protobuf messages +class ARROW_FLIGHT_EXPORT FlightDataStream { + public: + virtual ~FlightDataStream(); + + virtual std::shared_ptr schema() = 0; + + /// \brief Compute FlightPayload containing serialized RecordBatch schema + virtual arrow::Result GetSchemaPayload() = 0; + + // When the stream is completed, the last payload written will have null + // metadata + virtual arrow::Result Next() = 0; + + virtual Status Close(); +}; + +/// \brief A basic implementation of FlightDataStream that will provide +/// a sequence of FlightData messages to be written to a stream +class ARROW_FLIGHT_EXPORT RecordBatchStream : public FlightDataStream { + public: + /// \param[in] reader produces a sequence of record batches + /// \param[in] options IPC options for writing + explicit RecordBatchStream( + const std::shared_ptr& reader, + const ipc::IpcWriteOptions& options = ipc::IpcWriteOptions::Defaults()); + ~RecordBatchStream() override; + + // inherit deprecated API + using FlightDataStream::GetSchemaPayload; + using FlightDataStream::Next; + + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + + arrow::Result Next() override; + Status Close() override; + + private: + class RecordBatchStreamImpl; + std::unique_ptr impl_; +}; + +/// \brief A reader for IPC payloads uploaded by a client. Also allows +/// reading application-defined metadata via the Flight protocol. +class ARROW_FLIGHT_EXPORT FlightMessageReader : public MetadataRecordBatchReader { + public: + /// \brief Get the descriptor for this upload. + virtual const FlightDescriptor& descriptor() const = 0; +}; + +/// \brief A writer for application-specific metadata sent back to the +/// client during an upload. +class ARROW_FLIGHT_EXPORT FlightMetadataWriter { + public: + virtual ~FlightMetadataWriter(); + /// \brief Send a message to the client. + virtual Status WriteMetadata(const Buffer& app_metadata) = 0; +}; + +/// \brief A writer for IPC payloads to a client. Also allows sending +/// application-defined metadata via the Flight protocol. +/// +/// This class offers more control compared to FlightDataStream, +/// including the option to write metadata without data and the +/// ability to interleave reading and writing. +class ARROW_FLIGHT_EXPORT FlightMessageWriter : public MetadataRecordBatchWriter { + public: + virtual ~FlightMessageWriter() = default; +}; + +/// \brief Call state/contextual data. +class ARROW_FLIGHT_EXPORT ServerCallContext { + public: + virtual ~ServerCallContext() = default; + /// \brief The name of the authenticated peer (may be the empty string) + virtual const std::string& peer_identity() const = 0; + /// \brief The peer address (not validated) + virtual const std::string& peer() const = 0; + /// \brief Add a response header. This is only valid before the server + /// starts sending the response; generally this isn't an issue unless you + /// are implementing FlightDataStream, ResultStream, or similar interfaces + /// yourself, or during a DoExchange or DoPut. + virtual void AddHeader(const std::string& key, const std::string& value) const = 0; + /// \brief Add a response trailer. This is only valid before the server + /// sends the final status; generally this isn't an issue unless your RPC + /// handler launches a thread or similar. + virtual void AddTrailer(const std::string& key, const std::string& value) const = 0; + /// \brief Look up a middleware by key. Do not maintain a reference + /// to the object beyond the request body. + /// \return The middleware, or nullptr if not found. + virtual ServerMiddleware* GetMiddleware(const std::string& key) const = 0; + /// \brief Check if the current RPC has been cancelled (by the client, by + /// a network error, etc.). + virtual bool is_cancelled() const = 0; + /// \brief The headers sent by the client for this call. + virtual const CallHeaders& incoming_headers() const = 0; +}; + +class ARROW_FLIGHT_EXPORT FlightServerOptions { + public: + explicit FlightServerOptions(const Location& location_); + + ~FlightServerOptions(); + + /// \brief The host & port (or domain socket path) to listen on. + /// Use port 0 to bind to an available port. + Location location; + /// \brief The authentication handler to use. + std::shared_ptr auth_handler; + /// \brief A list of TLS certificate+key pairs to use. + std::vector tls_certificates; + /// \brief Enable mTLS and require that the client present a certificate. + bool verify_client; + /// \brief If using mTLS, the PEM-encoded root certificate to use. + std::string root_certificates; + /// \brief A list of server middleware to apply, along with a key to + /// identify them by. + /// + /// Middleware are always applied in the order provided. Duplicate + /// keys are an error. + std::vector>> + middleware; + + /// \brief An optional memory manager to control where to allocate incoming data. + std::shared_ptr memory_manager; + + /// \brief A Flight implementation-specific callback to customize + /// transport-specific options. + /// + /// Not guaranteed to be called. The type of the parameter is + /// specific to the Flight implementation. Users should take care to + /// link to the same transport implementation as Flight to avoid + /// runtime problems. See "Using Arrow C++ in your own project" in + /// the documentation for more details. + std::function builder_hook; +}; + +/// \brief Skeleton RPC server implementation which can be used to create +/// custom servers by implementing its abstract methods +class ARROW_FLIGHT_EXPORT FlightServerBase { + public: + FlightServerBase(); + virtual ~FlightServerBase(); + + // Lifecycle methods. + + /// \brief Initialize a Flight server listening at the given location. + /// This method must be called before any other method. + /// \param[in] options The configuration for this server. + Status Init(const FlightServerOptions& options); + + /// \brief Get the port that the Flight server is listening on. + /// This method must only be called after Init(). Will return a + /// non-positive value if no port exists (e.g. when listening on a + /// domain socket). + int port() const; + + /// \brief Get the address that the Flight server is listening on. + /// This method must only be called after Init(). + Location location() const; + + /// \brief Set the server to stop when receiving any of the given signal + /// numbers. + /// This method must be called before Serve(). + Status SetShutdownOnSignals(const std::vector sigs); + + /// \brief Start serving. + /// This method blocks until the server shuts down. + /// + /// The server will start to shut down when either Shutdown() is called + /// or one of the signals registered in SetShutdownOnSignals() is received. + Status Serve(); + + /// \brief Query whether Serve() was interrupted by a signal. + /// This method must be called after Serve() has returned. + /// + /// \return int the signal number that interrupted Serve(), if any, otherwise 0 + int GotSignal() const; + + /// \brief Shut down the server, blocking until current requests finish. + /// + /// Can be called from a signal handler or another thread while Serve() + /// blocks. Optionally a deadline can be set. Once the deadline expires + /// server will wait until remaining running calls complete. + /// + /// Should only be called once. + Status Shutdown(const std::chrono::system_clock::time_point* deadline = NULLPTR); + + /// \brief Block until server shuts down with Shutdown. + /// + /// Does not respond to signals like Serve(). + Status Wait(); + + // Implement these methods to create your own server. The default + // implementations will return a not-implemented result to the client + + /// \brief Retrieve a list of available fields given an optional opaque + /// criteria + /// \param[in] context The call context. + /// \param[in] criteria may be null + /// \param[out] listings the returned listings iterator + /// \return Status + virtual Status ListFlights(const ServerCallContext& context, const Criteria* criteria, + std::unique_ptr* listings); + + /// \brief Retrieve the schema and an access plan for the indicated + /// descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] info the returned flight info provider + /// \return Status + virtual Status GetFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the current status of the target query + /// \param[in] context The call context. + /// \param[in] request the dataset request or a descriptor returned by a + /// prior PollFlightInfo call + /// \param[out] info the returned retry info provider + /// \return Status + virtual Status PollFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the schema for the indicated descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] schema the returned flight schema provider + /// \return Status + virtual Status GetSchema(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* schema); + + /// \brief Get a stream of IPC payloads to put on the wire + /// \param[in] context The call context. + /// \param[in] request an opaque ticket + /// \param[out] stream the returned stream provider + /// \return Status + virtual Status DoGet(const ServerCallContext& context, const Ticket& request, + std::unique_ptr* stream); + + /// \brief Process a stream of IPC payloads sent from a client + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send metadata back to the client + /// \return Status + virtual Status DoPut(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Process a bidirectional stream of IPC payloads + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send data back to the client + /// \return Status + virtual Status DoExchange(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Execute an action, return stream of zero or more results + /// \param[in] context The call context. + /// \param[in] action the action to execute, with type and body + /// \param[out] result the result iterator + /// \return Status + virtual Status DoAction(const ServerCallContext& context, const Action& action, + std::unique_ptr* result); + + /// \brief Retrieve the list of available actions + /// \param[in] context The call context. + /// \param[out] actions a vector of available action types + /// \return Status + virtual Status ListActions(const ServerCallContext& context, + std::vector* actions); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace flight +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..c0b42d9b90c5a1f4c211c78d3685a31000d76f57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_util.h @@ -0,0 +1,264 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/util.h" + +#include "arrow/flight/client.h" +#include "arrow/flight/client_auth.h" +#include "arrow/flight/server.h" +#include "arrow/flight/server_auth.h" +#include "arrow/flight/types.h" +#include "arrow/flight/visibility.h" + +namespace boost { +namespace process { + +class child; + +} // namespace process +} // namespace boost + +namespace arrow { +namespace flight { + +// ---------------------------------------------------------------------- +// Helpers to compare values for equality + +inline void AssertEqual(const FlightInfo& expected, const FlightInfo& actual) { + ipc::DictionaryMemo expected_memo; + ipc::DictionaryMemo actual_memo; + ASSERT_OK_AND_ASSIGN(auto ex_schema, expected.GetSchema(&expected_memo)); + ASSERT_OK_AND_ASSIGN(auto actual_schema, actual.GetSchema(&actual_memo)); + + AssertSchemaEqual(*ex_schema, *actual_schema); + ASSERT_EQ(expected.total_records(), actual.total_records()); + ASSERT_EQ(expected.total_bytes(), actual.total_bytes()); + + ASSERT_EQ(expected.descriptor(), actual.descriptor()); + ASSERT_THAT(actual.endpoints(), ::testing::ContainerEq(expected.endpoints())); +} + +// ---------------------------------------------------------------------- +// Fixture to use for running test servers + +class ARROW_FLIGHT_EXPORT TestServer { + public: + explicit TestServer(const std::string& executable_name) + : executable_name_(executable_name), port_(::arrow::GetListenPort()) {} + TestServer(const std::string& executable_name, int port) + : executable_name_(executable_name), port_(port) {} + TestServer(const std::string& executable_name, const std::string& unix_sock) + : executable_name_(executable_name), unix_sock_(unix_sock) {} + + void Start(const std::vector& extra_args); + void Start() { Start({}); } + + int Stop(); + + bool IsRunning(); + + int port() const; + const std::string& unix_sock() const; + + private: + std::string executable_name_; + int port_; + std::string unix_sock_; + std::shared_ptr<::boost::process::child> server_process_; +}; + +/// \brief Create a simple Flight server for testing +ARROW_FLIGHT_EXPORT +std::unique_ptr ExampleTestServer(); + +// Helper to initialize a server and matching client with callbacks to +// populate options. +template +Status MakeServer(const Location& location, std::unique_ptr* server, + std::unique_ptr* client, + std::function make_server_options, + std::function make_client_options, + Args&&... server_args) { + *server = std::make_unique(std::forward(server_args)...); + FlightServerOptions server_options(location); + RETURN_NOT_OK(make_server_options(&server_options)); + RETURN_NOT_OK((*server)->Init(server_options)); + std::string uri = + location.scheme() + "://127.0.0.1:" + std::to_string((*server)->port()); + ARROW_ASSIGN_OR_RAISE(auto real_location, Location::Parse(uri)); + FlightClientOptions client_options = FlightClientOptions::Defaults(); + RETURN_NOT_OK(make_client_options(&client_options)); + return FlightClient::Connect(real_location, client_options).Value(client); +} + +// Helper to initialize a server and matching client with callbacks to +// populate options. +template +Status MakeServer(std::unique_ptr* server, + std::unique_ptr* client, + std::function make_server_options, + std::function make_client_options, + Args&&... server_args) { + ARROW_ASSIGN_OR_RAISE(auto location, Location::ForGrpcTcp("localhost", 0)); + return MakeServer(location, server, client, std::move(make_server_options), + std::move(make_client_options), + std::forward(server_args)...); +} + +// ---------------------------------------------------------------------- +// A FlightDataStream that numbers the record batches +/// \brief A basic implementation of FlightDataStream that will provide +/// a sequence of FlightData messages to be written to a stream +class ARROW_FLIGHT_EXPORT NumberingStream : public FlightDataStream { + public: + explicit NumberingStream(std::unique_ptr stream); + + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + arrow::Result Next() override; + + private: + int counter_; + std::shared_ptr stream_; +}; + +// ---------------------------------------------------------------------- +// Example data for test-server and unit tests + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleIntSchema(); + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleStringSchema(); + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleDictSchema(); + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleLargeSchema(); + +ARROW_FLIGHT_EXPORT +Status ExampleIntBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleFloatBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleDictBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleNestedBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleLargeBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +arrow::Result> VeryLargeBatch(); + +ARROW_FLIGHT_EXPORT +std::vector ExampleFlightInfo(); + +ARROW_FLIGHT_EXPORT +std::vector ExampleActionTypes(); + +ARROW_FLIGHT_EXPORT +FlightInfo MakeFlightInfo(const Schema& schema, const FlightDescriptor& descriptor, + const std::vector& endpoints, + int64_t total_records, int64_t total_bytes, bool ordered, + std::string app_metadata); + +// ---------------------------------------------------------------------- +// A pair of authentication handlers that check for a predefined password +// and set the peer identity to a predefined username. + +class ARROW_FLIGHT_EXPORT TestServerAuthHandler : public ServerAuthHandler { + public: + explicit TestServerAuthHandler(const std::string& username, + const std::string& password); + ~TestServerAuthHandler() override; + Status Authenticate(const ServerCallContext& context, ServerAuthSender* outgoing, + ServerAuthReader* incoming) override; + Status IsValid(const ServerCallContext& context, const std::string& token, + std::string* peer_identity) override; + + private: + std::string username_; + std::string password_; +}; + +class ARROW_FLIGHT_EXPORT TestServerBasicAuthHandler : public ServerAuthHandler { + public: + explicit TestServerBasicAuthHandler(const std::string& username, + const std::string& password); + ~TestServerBasicAuthHandler() override; + Status Authenticate(const ServerCallContext& context, ServerAuthSender* outgoing, + ServerAuthReader* incoming) override; + Status IsValid(const ServerCallContext& context, const std::string& token, + std::string* peer_identity) override; + + private: + BasicAuth basic_auth_; +}; + +class ARROW_FLIGHT_EXPORT TestClientAuthHandler : public ClientAuthHandler { + public: + explicit TestClientAuthHandler(const std::string& username, + const std::string& password); + ~TestClientAuthHandler() override; + Status Authenticate(ClientAuthSender* outgoing, ClientAuthReader* incoming) override; + Status GetToken(std::string* token) override; + + private: + std::string username_; + std::string password_; +}; + +class ARROW_FLIGHT_EXPORT TestClientBasicAuthHandler : public ClientAuthHandler { + public: + explicit TestClientBasicAuthHandler(const std::string& username, + const std::string& password); + ~TestClientBasicAuthHandler() override; + Status Authenticate(ClientAuthSender* outgoing, ClientAuthReader* incoming) override; + Status GetToken(std::string* token) override; + + private: + BasicAuth basic_auth_; + std::string token_; +}; + +ARROW_FLIGHT_EXPORT +Status ExampleTlsCertificates(std::vector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleTlsCertificateRoot(CertKeyPair* out); + +} // namespace flight +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h new file mode 100644 index 0000000000000000000000000000000000000000..ee7bd01720730331f3584c98a4098dfd4198d5b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h @@ -0,0 +1,302 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/// \file +/// Internal (but not private) interface for implementing +/// alternate network transports in Flight. +/// +/// \warning EXPERIMENTAL. Subject to change. +/// +/// To implement a transport, implement ServerTransport and +/// ClientTransport, and register the desired URI schemes with +/// TransportRegistry. Flight takes care of most of the per-RPC +/// details; transports only handle connections and providing a I/O +/// stream implementation (TransportDataStream). +/// +/// On the server side: +/// +/// 1. Applications subclass FlightServerBase and override RPC handlers. +/// 2. FlightServerBase::Init will look up and create a ServerTransport +/// based on the scheme of the Location given to it. +/// 3. The ServerTransport will start the actual server. (For instance, +/// for gRPC, it creates a gRPC server and registers a gRPC service.) +/// That server will handle connections. +/// 4. The transport should forward incoming calls to the server to the RPC +/// handlers defined on ServerTransport, which implements the actual +/// RPC handler using the interfaces here. Any I/O the RPC handler needs +/// to do is managed by transport-specific implementations of +/// TransportDataStream. +/// 5. ServerTransport calls FlightServerBase for the actual application +/// logic. +/// +/// On the client side: +/// +/// 1. Applications create a FlightClient with a Location. +/// 2. FlightClient will look up and create a ClientTransport based on +/// the scheme of the Location given to it. +/// 3. When calling a method on FlightClient, FlightClient will delegate to +/// the ClientTransport. There is some indirection, e.g. for DoGet, +/// FlightClient only requests that the ClientTransport start the +/// call and provide it with an I/O stream. The "Flight implementation" +/// itself still lives in FlightClient. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" +#include "arrow/flight/visibility.h" +#include "arrow/ipc/options.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace ipc { +class Message; +} +namespace flight { +class FlightStatusDetail; +namespace internal { + +/// Internal, not user-visible type used for memory-efficient reads +struct FlightData { + /// Used only for puts, may be null + std::unique_ptr descriptor; + + /// Non-length-prefixed Message header as described in format/Message.fbs + std::shared_ptr metadata; + + /// Application-defined metadata + std::shared_ptr app_metadata; + + /// Message body + std::shared_ptr body; + + /// Open IPC message from the metadata and body + ::arrow::Result> OpenMessage(); +}; + +/// \brief A transport-specific interface for reading/writing Arrow data. +/// +/// New transports will implement this to read/write IPC payloads to +/// the underlying stream. +class ARROW_FLIGHT_EXPORT TransportDataStream { + public: + virtual ~TransportDataStream() = default; + /// \brief Attempt to read the next FlightData message. + /// + /// \return success true if data was populated, false if there was + /// an error. For clients, the error can be retrieved from + /// Finish(Status). + virtual bool ReadData(FlightData* data); + /// \brief Attempt to write a FlightPayload. + /// + /// \param[in] payload The data to write. + /// \return true if the message was accepted by the transport, false + /// if not (e.g. due to client/server disconnect), Status if there + /// was an error (e.g. with the payload itself). + virtual arrow::Result WriteData(const FlightPayload& payload); + /// \brief Indicate that there are no more writes on this stream. + /// + /// This is only a hint for the underlying transport and may not + /// actually do anything. + virtual Status WritesDone(); +}; + +/// \brief A transport-specific interface for reading/writing Arrow +/// data for a client. +class ARROW_FLIGHT_EXPORT ClientDataStream : public TransportDataStream { + public: + /// \brief Attempt to read a non-data message. + /// + /// Only implemented for DoPut; mutually exclusive with + /// ReadData(FlightData*). + virtual bool ReadPutMetadata(std::shared_ptr* out); + /// \brief Attempt to cancel the call. + /// + /// This is only a hint and may not take effect immediately. The + /// client should still finish the call with Finish(Status) as usual. + virtual void TryCancel() {} + /// \brief Finish the call, reporting the server-sent status and/or + /// any client-side errors as appropriate. + /// + /// Implies WritesDone() and DoFinish(). + /// + /// \param[in] st A client-side status to combine with the + /// server-side error. That is, if an error occurs on the + /// client-side, call Finish(Status) to finish the server-side + /// call, get the server-side status, and merge the statuses + /// together so context is not lost. + Status Finish(Status st); + + protected: + /// \brief End the call, returning the final server status. + /// + /// For implementors: should imply WritesDone() (even if it does not + /// directly call it). + /// + /// Implies WritesDone(). + virtual Status DoFinish() = 0; +}; + +/// An implementation of a Flight client for a particular transport. +/// +/// Transports should override the methods they are capable of +/// supporting. The default method implementations return an error. +class ARROW_FLIGHT_EXPORT ClientTransport { + public: + virtual ~ClientTransport() = default; + + /// Initialize the client. + virtual Status Init(const FlightClientOptions& options, const Location& location, + const arrow::internal::Uri& uri) = 0; + /// Close the client. Once this returns, the client is no longer usable. + virtual Status Close() = 0; + + virtual Status Authenticate(const FlightCallOptions& options, + std::unique_ptr auth_handler); + virtual arrow::Result> AuthenticateBasicToken( + const FlightCallOptions& options, const std::string& username, + const std::string& password); + virtual Status DoAction(const FlightCallOptions& options, const Action& action, + std::unique_ptr* results); + virtual Status ListActions(const FlightCallOptions& options, + std::vector* actions); + virtual Status GetFlightInfo(const FlightCallOptions& options, + const FlightDescriptor& descriptor, + std::unique_ptr* info); + virtual void GetFlightInfoAsync(const FlightCallOptions& options, + const FlightDescriptor& descriptor, + std::shared_ptr> listener); + virtual Status PollFlightInfo(const FlightCallOptions& options, + const FlightDescriptor& descriptor, + std::unique_ptr* info); + virtual arrow::Result> GetSchema( + const FlightCallOptions& options, const FlightDescriptor& descriptor); + virtual Status ListFlights(const FlightCallOptions& options, const Criteria& criteria, + std::unique_ptr* listing); + virtual Status DoGet(const FlightCallOptions& options, const Ticket& ticket, + std::unique_ptr* stream); + virtual Status DoPut(const FlightCallOptions& options, + std::unique_ptr* stream); + virtual Status DoExchange(const FlightCallOptions& options, + std::unique_ptr* stream); + + bool supports_async() const { return CheckAsyncSupport().ok(); } + virtual Status CheckAsyncSupport() const { + return Status::NotImplemented( + "this Flight transport does not support async operations"); + } + + static void SetAsyncRpc(AsyncListenerBase* listener, std::unique_ptr&& rpc); + static AsyncRpc* GetAsyncRpc(AsyncListenerBase* listener); + static std::unique_ptr ReleaseAsyncRpc(AsyncListenerBase* listener); +}; + +/// A registry of transport implementations. +class ARROW_FLIGHT_EXPORT TransportRegistry { + public: + using ClientFactory = std::function>()>; + using ServerFactory = std::function>( + FlightServerBase*, std::shared_ptr memory_manager)>; + + TransportRegistry(); + ~TransportRegistry(); + + arrow::Result> MakeClient( + const std::string& scheme) const; + arrow::Result> MakeServer( + const std::string& scheme, FlightServerBase* base, + std::shared_ptr memory_manager) const; + + Status RegisterClient(const std::string& scheme, ClientFactory factory); + Status RegisterServer(const std::string& scheme, ServerFactory factory); + + private: + class Impl; + std::unique_ptr impl_; +}; + +/// \brief Get the registry of transport implementations. +ARROW_FLIGHT_EXPORT +TransportRegistry* GetDefaultTransportRegistry(); + +//------------------------------------------------------------ +// Async APIs + +/// \brief Transport-specific state for an async RPC. +/// +/// Transport implementations may subclass this to store their own +/// state, and stash an instance in a user-supplied AsyncListener via +/// ClientTransport::GetAsyncRpc and ClientTransport::SetAsyncRpc. +/// +/// This API is EXPERIMENTAL. +class ARROW_FLIGHT_EXPORT AsyncRpc { + public: + virtual ~AsyncRpc() = default; + /// \brief Request cancellation of the RPC. + virtual void TryCancel() {} + + /// Only needed for DoPut/DoExchange + virtual void Begin(const FlightDescriptor& descriptor, std::shared_ptr schema) { + } + /// Only needed for DoPut/DoExchange + virtual void Write(arrow::flight::FlightStreamChunk chunk) {} + /// Only needed for DoPut/DoExchange + virtual void DoneWriting() {} +}; + +//------------------------------------------------------------ +// Error propagation helpers + +/// \brief Abstract error status. +/// +/// Transport implementations may use side channels (e.g. HTTP +/// trailers) to convey additional information to reconstruct the +/// original C++ status for implementations that can use it. +struct ARROW_FLIGHT_EXPORT TransportStatus { + TransportStatusCode code; + std::string message; + + /// \brief Convert a C++ status to an abstract transport status. + static TransportStatus FromStatus(const Status& arrow_status); + + /// \brief Reconstruct a string-encoded TransportStatus. + static TransportStatus FromCodeStringAndMessage(const std::string& code_str, + std::string message); + + /// \brief Convert an abstract transport status to a C++ status. + Status ToStatus() const; +}; + +/// \brief Convert the string representation of an Arrow status code +/// back to an Arrow status. +ARROW_FLIGHT_EXPORT +Status ReconstructStatus(const std::string& code_str, const Status& current_status, + std::optional message, + std::optional detail_message, + std::optional detail_bin, + std::shared_ptr detail); + +} // namespace internal +} // namespace flight +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h new file mode 100644 index 0000000000000000000000000000000000000000..28a00f12a7a616136beb328d20120d6458294eab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "parquet/exception.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..7e746e8c5bbf551e84431552f688a493e2d62bc4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +// Column reader API +#include "parquet/column_reader.h" +#include "parquet/column_scanner.h" +#include "parquet/exception.h" +#include "parquet/file_reader.h" +#include "parquet/metadata.h" +#include "parquet/platform.h" +#include "parquet/printer.h" +#include "parquet/properties.h" +#include "parquet/statistics.h" + +// Schemas +#include "parquet/api/schema.h" + +// IO +#include "parquet/api/io.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h new file mode 100644 index 0000000000000000000000000000000000000000..7ca714f47b5448974c460e424ab3821d10f7a384 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +// Schemas +#include "parquet/schema.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..b072dcf74dea7233723ae55599d95be47c674716 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "parquet/api/io.h" +#include "parquet/api/schema.h" +#include "parquet/column_writer.h" +#include "parquet/exception.h" +#include "parquet/file_writer.h" +#include "parquet/statistics.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..6e46ca43f7b18ce0021cdd8064efde70f39f8eaa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h @@ -0,0 +1,379 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +// N.B. we don't include async_generator.h as it's relatively heavy +#include +#include +#include + +#include "parquet/file_reader.h" +#include "parquet/platform.h" +#include "parquet/properties.h" + +namespace arrow { + +class ChunkedArray; +class KeyValueMetadata; +class RecordBatchReader; +struct Scalar; +class Schema; +class Table; +class RecordBatch; + +} // namespace arrow + +namespace parquet { + +class FileMetaData; +class SchemaDescriptor; + +namespace arrow { + +class ColumnChunkReader; +class ColumnReader; +struct SchemaManifest; +class RowGroupReader; + +/// \brief Arrow read adapter class for deserializing Parquet files as Arrow row batches. +/// +/// This interfaces caters for different use cases and thus provides different +/// interfaces. In its most simplistic form, we cater for a user that wants to +/// read the whole Parquet at once with the `FileReader::ReadTable` method. +/// +/// More advanced users that also want to implement parallelism on top of each +/// single Parquet files should do this on the RowGroup level. For this, they can +/// call `FileReader::RowGroup(i)->ReadTable` to receive only the specified +/// RowGroup as a table. +/// +/// In the most advanced situation, where a consumer wants to independently read +/// RowGroups in parallel and consume each column individually, they can call +/// `FileReader::RowGroup(i)->Column(j)->Read` and receive an `arrow::Column` +/// instance. +/// +/// Finally, one can also get a stream of record batches using +/// `FileReader::GetRecordBatchReader()`. This can internally decode columns +/// in parallel if use_threads was enabled in the ArrowReaderProperties. +/// +/// The parquet format supports an optional integer field_id which can be assigned +/// to a field. Arrow will convert these field IDs to a metadata key named +/// PARQUET:field_id on the appropriate field. +// TODO(wesm): nested data does not always make sense with this user +// interface unless you are only reading a single leaf node from a branch of +// a table. For example: +// +// repeated group data { +// optional group record { +// optional int32 val1; +// optional byte_array val2; +// optional bool val3; +// } +// optional int32 val4; +// } +// +// In the Parquet file, there are 4 leaf nodes: +// +// * data.record.val1 +// * data.record.val2 +// * data.record.val3 +// * data.val4 +// +// When materializing this data in an Arrow array, we would have: +// +// data: list), +// val3: bool, +// >, +// val4: int32 +// >> +// +// However, in the Parquet format, each leaf node has its own repetition and +// definition levels describing the structure of the intermediate nodes in +// this array structure. Thus, we will need to scan the leaf data for a group +// of leaf nodes part of the same type tree to create a single result Arrow +// nested array structure. +// +// This is additionally complicated "chunky" repeated fields or very large byte +// arrays +class PARQUET_EXPORT FileReader { + public: + /// Factory function to create a FileReader from a ParquetFileReader and properties + static ::arrow::Status Make(::arrow::MemoryPool* pool, + std::unique_ptr reader, + const ArrowReaderProperties& properties, + std::unique_ptr* out); + + /// Factory function to create a FileReader from a ParquetFileReader + static ::arrow::Status Make(::arrow::MemoryPool* pool, + std::unique_ptr reader, + std::unique_ptr* out); + + // Since the distribution of columns amongst a Parquet file's row groups may + // be uneven (the number of values in each column chunk can be different), we + // provide a column-oriented read interface. The ColumnReader hides the + // details of paging through the file's row groups and yielding + // fully-materialized arrow::Array instances + // + // Returns error status if the column of interest is not flat. + // The indicated column index is relative to the schema + virtual ::arrow::Status GetColumn(int i, std::unique_ptr* out) = 0; + + /// \brief Return arrow schema for all the columns. + virtual ::arrow::Status GetSchema(std::shared_ptr<::arrow::Schema>* out) = 0; + + /// \brief Read column as a whole into a chunked array. + /// + /// The index i refers the index of the top level schema field, which may + /// be nested or flat - e.g. + /// + /// 0 foo.bar + /// foo.bar.baz + /// foo.qux + /// 1 foo2 + /// 2 foo3 + /// + /// i=0 will read the entire foo struct, i=1 the foo2 primitive column etc + virtual ::arrow::Status ReadColumn(int i, + std::shared_ptr<::arrow::ChunkedArray>* out) = 0; + + /// \brief Return a RecordBatchReader of all row groups and columns. + virtual ::arrow::Status GetRecordBatchReader( + std::unique_ptr<::arrow::RecordBatchReader>* out) = 0; + + /// \brief Return a RecordBatchReader of row groups selected from row_group_indices. + /// + /// Note that the ordering in row_group_indices matters. FileReaders must outlive + /// their RecordBatchReaders. + /// + /// \returns error Status if row_group_indices contains an invalid index + virtual ::arrow::Status GetRecordBatchReader( + const std::vector& row_group_indices, + std::unique_ptr<::arrow::RecordBatchReader>* out) = 0; + + /// \brief Return a RecordBatchReader of row groups selected from + /// row_group_indices, whose columns are selected by column_indices. + /// + /// Note that the ordering in row_group_indices and column_indices + /// matter. FileReaders must outlive their RecordBatchReaders. + /// + /// \returns error Status if either row_group_indices or column_indices + /// contains an invalid index + virtual ::arrow::Status GetRecordBatchReader( + const std::vector& row_group_indices, const std::vector& column_indices, + std::unique_ptr<::arrow::RecordBatchReader>* out) = 0; + + /// \brief Return a RecordBatchReader of row groups selected from + /// row_group_indices, whose columns are selected by column_indices. + /// + /// Note that the ordering in row_group_indices and column_indices + /// matter. FileReaders must outlive their RecordBatchReaders. + /// + /// \param row_group_indices which row groups to read (order determines read order). + /// \param column_indices which columns to read (order determines output schema). + /// \param[out] out record batch stream from parquet data. + /// + /// \returns error Status if either row_group_indices or column_indices + /// contains an invalid index + ::arrow::Status GetRecordBatchReader(const std::vector& row_group_indices, + const std::vector& column_indices, + std::shared_ptr<::arrow::RecordBatchReader>* out); + ::arrow::Status GetRecordBatchReader(const std::vector& row_group_indices, + std::shared_ptr<::arrow::RecordBatchReader>* out); + ::arrow::Status GetRecordBatchReader(std::shared_ptr<::arrow::RecordBatchReader>* out); + + /// \brief Return a generator of record batches. + /// + /// The FileReader must outlive the generator, so this requires that you pass in a + /// shared_ptr. + /// + /// \returns error Result if either row_group_indices or column_indices contains an + /// invalid index + virtual ::arrow::Result< + std::function<::arrow::Future>()>> + GetRecordBatchGenerator(std::shared_ptr reader, + const std::vector row_group_indices, + const std::vector column_indices, + ::arrow::internal::Executor* cpu_executor = NULLPTR, + int64_t rows_to_readahead = 0) = 0; + + /// Read all columns into a Table + virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0; + + /// \brief Read the given columns into a Table + /// + /// The indicated column indices are relative to the internal representation + /// of the parquet table. For instance : + /// 0 foo.bar + /// foo.bar.baz 0 + /// foo.bar.baz2 1 + /// foo.qux 2 + /// 1 foo2 3 + /// 2 foo3 4 + /// + /// i=0 will read foo.bar.baz, i=1 will read only foo.bar.baz2 and so on. + /// Only leaf fields have indices; foo itself doesn't have an index. + /// To get the index for a particular leaf field, one can use + /// manifest().schema_fields to get the top level fields, and then walk the + /// tree to identify the relevant leaf fields and access its column_index. + /// To get the total number of leaf fields, use FileMetadata.num_columns(). + virtual ::arrow::Status ReadTable(const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroup(int i, const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroup(int i, std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroups(const std::vector& row_groups, + const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroups(const std::vector& row_groups, + std::shared_ptr<::arrow::Table>* out) = 0; + + /// \brief Scan file contents with one thread, return number of rows + virtual ::arrow::Status ScanContents(std::vector columns, + const int32_t column_batch_size, + int64_t* num_rows) = 0; + + /// \brief Return a reader for the RowGroup, this object must not outlive the + /// FileReader. + virtual std::shared_ptr RowGroup(int row_group_index) = 0; + + /// \brief The number of row groups in the file + virtual int num_row_groups() const = 0; + + virtual ParquetFileReader* parquet_reader() const = 0; + + /// Set whether to use multiple threads during reads of multiple columns. + /// By default only one thread is used. + virtual void set_use_threads(bool use_threads) = 0; + + /// Set number of records to read per batch for the RecordBatchReader. + virtual void set_batch_size(int64_t batch_size) = 0; + + virtual const ArrowReaderProperties& properties() const = 0; + + virtual const SchemaManifest& manifest() const = 0; + + virtual ~FileReader() = default; +}; + +class RowGroupReader { + public: + virtual ~RowGroupReader() = default; + virtual std::shared_ptr Column(int column_index) = 0; + virtual ::arrow::Status ReadTable(const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0; + + private: + struct Iterator; +}; + +class ColumnChunkReader { + public: + virtual ~ColumnChunkReader() = default; + virtual ::arrow::Status Read(std::shared_ptr<::arrow::ChunkedArray>* out) = 0; +}; + +// At this point, the column reader is a stream iterator. It only knows how to +// read the next batch of values for a particular column from the file until it +// runs out. +// +// We also do not expose any internal Parquet details, such as row groups. This +// might change in the future. +class PARQUET_EXPORT ColumnReader { + public: + virtual ~ColumnReader() = default; + + // Scan the next array of the indicated size. The actual size of the + // returned array may be less than the passed size depending how much data is + // available in the file. + // + // When all the data in the file has been exhausted, the result is set to + // nullptr. + // + // Returns Status::OK on a successful read, including if you have exhausted + // the data available in the file. + virtual ::arrow::Status NextBatch(int64_t batch_size, + std::shared_ptr<::arrow::ChunkedArray>* out) = 0; +}; + +/// \brief Experimental helper class for bindings (like Python) that struggle +/// either with std::move or C++ exceptions +class PARQUET_EXPORT FileReaderBuilder { + public: + FileReaderBuilder(); + + /// Create FileReaderBuilder from Arrow file and optional properties / metadata + ::arrow::Status Open(std::shared_ptr<::arrow::io::RandomAccessFile> file, + const ReaderProperties& properties = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + /// Create FileReaderBuilder from file path and optional properties / metadata + ::arrow::Status OpenFile(const std::string& path, bool memory_map = false, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + ParquetFileReader* raw_reader() { return raw_reader_.get(); } + + /// Set Arrow MemoryPool for memory allocation + FileReaderBuilder* memory_pool(::arrow::MemoryPool* pool); + /// Set Arrow reader properties + FileReaderBuilder* properties(const ArrowReaderProperties& arg_properties); + /// Build FileReader instance + ::arrow::Status Build(std::unique_ptr* out); + ::arrow::Result> Build(); + + private: + ::arrow::MemoryPool* pool_; + ArrowReaderProperties properties_; + std::unique_ptr raw_reader_; +}; + +/// \defgroup parquet-arrow-reader-factories Factory functions for Parquet Arrow readers +/// +/// @{ + +/// \brief Build FileReader from Arrow file and MemoryPool +/// +/// Advanced settings are supported through the FileReaderBuilder class. +PARQUET_EXPORT +::arrow::Status OpenFile(std::shared_ptr<::arrow::io::RandomAccessFile>, + ::arrow::MemoryPool* allocator, + std::unique_ptr* reader); + +/// @} + +PARQUET_EXPORT +::arrow::Status StatisticsAsScalars(const Statistics& Statistics, + std::shared_ptr<::arrow::Scalar>* min, + std::shared_ptr<::arrow::Scalar>* max); + +namespace internal { + +PARQUET_EXPORT +::arrow::Status FuzzReader(const uint8_t* data, int64_t size); + +} // namespace internal +} // namespace arrow +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h new file mode 100644 index 0000000000000000000000000000000000000000..dd60fde43422889c53ebd7cf86fbac99c8c6f282 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" + +#include "parquet/level_conversion.h" +#include "parquet/platform.h" +#include "parquet/schema.h" + +namespace parquet { + +class ArrowReaderProperties; +class ArrowWriterProperties; +class WriterProperties; + +namespace arrow { + +/// \defgroup arrow-to-parquet-schema-conversion Functions to convert an Arrow +/// schema into a Parquet schema. +/// +/// @{ + +PARQUET_EXPORT +::arrow::Status FieldToNode(const std::shared_ptr<::arrow::Field>& field, + const WriterProperties& properties, + const ArrowWriterProperties& arrow_properties, + schema::NodePtr* out); + +PARQUET_EXPORT +::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema, + const WriterProperties& properties, + const ArrowWriterProperties& arrow_properties, + std::shared_ptr* out); + +PARQUET_EXPORT +::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema, + const WriterProperties& properties, + std::shared_ptr* out); + +/// @} + +/// \defgroup parquet-to-arrow-schema-conversion Functions to convert a Parquet +/// schema into an Arrow schema. +/// +/// @{ + +PARQUET_EXPORT +::arrow::Status FromParquetSchema( + const SchemaDescriptor* parquet_schema, const ArrowReaderProperties& properties, + const std::shared_ptr& key_value_metadata, + std::shared_ptr<::arrow::Schema>* out); + +PARQUET_EXPORT +::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema, + const ArrowReaderProperties& properties, + std::shared_ptr<::arrow::Schema>* out); + +PARQUET_EXPORT +::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema, + std::shared_ptr<::arrow::Schema>* out); + +/// @} + +/// \brief Bridge between an arrow::Field and parquet column indices. +struct PARQUET_EXPORT SchemaField { + std::shared_ptr<::arrow::Field> field; + std::vector children; + + // Only set for leaf nodes + int column_index = -1; + + parquet::internal::LevelInfo level_info; + + bool is_leaf() const { return column_index != -1; } +}; + +/// \brief Bridge between a parquet Schema and an arrow Schema. +/// +/// Expose parquet columns as a tree structure. Useful traverse and link +/// between arrow's Schema and parquet's Schema. +struct PARQUET_EXPORT SchemaManifest { + static ::arrow::Status Make( + const SchemaDescriptor* schema, + const std::shared_ptr& metadata, + const ArrowReaderProperties& properties, SchemaManifest* manifest); + + const SchemaDescriptor* descr; + std::shared_ptr<::arrow::Schema> origin_schema; + std::shared_ptr schema_metadata; + std::vector schema_fields; + + std::unordered_map column_index_to_field; + std::unordered_map child_to_parent; + + ::arrow::Status GetColumnField(int column_index, const SchemaField** out) const { + auto it = column_index_to_field.find(column_index); + if (it == column_index_to_field.end()) { + return ::arrow::Status::KeyError("Column index ", column_index, + " not found in schema manifest, may be malformed"); + } + *out = it->second; + return ::arrow::Status::OK(); + } + + const SchemaField* GetParent(const SchemaField* field) const { + // Returns nullptr also if not found + auto it = child_to_parent.find(field); + if (it == child_to_parent.end()) { + return NULLPTR; + } + return it->second; + } + + /// Coalesce a list of field indices (relative to the equivalent arrow::Schema) which + /// correspond to the column root (first node below the parquet schema's root group) of + /// each leaf referenced in column_indices. + /// + /// For example, for leaves `a.b.c`, `a.b.d.e`, and `i.j.k` (column_indices=[0,1,3]) + /// the roots are `a` and `i` (return=[0,2]). + /// + /// root + /// -- a <------ + /// -- -- b | | + /// -- -- -- c | + /// -- -- -- d | + /// -- -- -- -- e + /// -- f + /// -- -- g + /// -- -- -- h + /// -- i <--- + /// -- -- j | + /// -- -- -- k + ::arrow::Result> GetFieldIndices( + const std::vector& column_indices) const { + const schema::GroupNode* group = descr->group_node(); + std::unordered_set already_added; + + std::vector out; + for (int column_idx : column_indices) { + if (column_idx < 0 || column_idx >= descr->num_columns()) { + return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid"); + } + + auto field_node = descr->GetColumnRoot(column_idx); + auto field_idx = group->FieldIndex(*field_node); + if (field_idx == -1) { + return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid"); + } + + if (already_added.insert(field_idx).second) { + out.push_back(field_idx); + } + } + return out; + } +}; + +} // namespace arrow +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..b2be1b3c5354d7a28c6fad23dd745a9d32bbb7d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h @@ -0,0 +1,524 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/builder_decimal.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/random.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/decimal.h" +#include "arrow/util/float16.h" +#include "parquet/column_reader.h" +#include "parquet/test_util.h" + +namespace parquet { + +using internal::RecordReader; + +namespace arrow { + +using ::arrow::Array; +using ::arrow::ChunkedArray; +using ::arrow::Status; + +template +struct DecimalWithPrecisionAndScale { + static_assert(PRECISION >= 1 && PRECISION <= 38, "Invalid precision value"); + + using type = ::arrow::Decimal128Type; + static constexpr ::arrow::Type::type type_id = ::arrow::Decimal128Type::type_id; + static constexpr int32_t precision = PRECISION; + static constexpr int32_t scale = PRECISION - 1; +}; + +template +struct Decimal256WithPrecisionAndScale { + static_assert(PRECISION >= 1 && PRECISION <= 76, "Invalid precision value"); + + using type = ::arrow::Decimal256Type; + static constexpr ::arrow::Type::type type_id = ::arrow::Decimal256Type::type_id; + static constexpr int32_t precision = PRECISION; + static constexpr int32_t scale = PRECISION - 1; +}; + +template +::arrow::enable_if_floating_point NonNullArray( + size_t size, std::shared_ptr* out) { + using c_type = typename ArrowType::c_type; + std::vector values; + if constexpr (::arrow::is_half_float_type::value) { + values.resize(size); + test::random_float16_numbers(static_cast(size), 0, ::arrow::util::Float16(0.0f), + ::arrow::util::Float16(1.0f), values.data()); + } else { + ::arrow::random_real(size, 0, static_cast(0), static_cast(1), + &values); + } + ::arrow::NumericBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +template +::arrow::enable_if_integer NonNullArray(size_t size, + std::shared_ptr* out) { + std::vector values; + ::arrow::randint(size, 0, 64, &values); + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +template +::arrow::enable_if_date NonNullArray(size_t size, + std::shared_ptr* out) { + std::vector values; + ::arrow::randint(size, 0, 24, &values); + for (size_t i = 0; i < size; i++) { + values[i] *= 86400000; + } + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +template +::arrow::enable_if_base_binary NonNullArray( + size_t size, std::shared_ptr* out) { + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + BuilderType builder; + for (size_t i = 0; i < size; i++) { + RETURN_NOT_OK(builder.Append("test-string")); + } + return builder.Finish(out); +} + +template +::arrow::enable_if_fixed_size_binary NonNullArray( + size_t size, std::shared_ptr* out) { + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + // set byte_width to the length of "fixed": 5 + // todo: find a way to generate test data with more diversity. + BuilderType builder(::arrow::fixed_size_binary(5)); + for (size_t i = 0; i < size; i++) { + RETURN_NOT_OK(builder.Append("fixed")); + } + return builder.Finish(out); +} + +template +static void random_decimals(int64_t n, uint32_t seed, int32_t precision, uint8_t* out) { + auto gen = ::arrow::random::RandomArrayGenerator(seed); + std::shared_ptr decimals; + if constexpr (byte_width == 16) { + decimals = gen.Decimal128(::arrow::decimal128(precision, 0), n); + } else { + decimals = gen.Decimal256(::arrow::decimal256(precision, 0), n); + } + std::memcpy(out, decimals->data()->GetValues(1, 0), byte_width * n); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NonNullArray(size_t size, std::shared_ptr* out) { + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale::scale; + + const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale); + ::arrow::Decimal128Builder builder(type); + const int32_t byte_width = + static_cast(*type).byte_width(); + + constexpr int32_t seed = 0; + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, kDecimalPrecision, + out_buf->mutable_data()); + + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size)); + return builder.Finish(out); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NonNullArray(size_t size, std::shared_ptr* out) { + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale::scale; + + const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale); + ::arrow::Decimal256Builder builder(type); + const int32_t byte_width = + static_cast(*type).byte_width(); + + constexpr int32_t seed = 0; + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, kDecimalPrecision, + out_buf->mutable_data()); + + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size)); + return builder.Finish(out); +} + +template +::arrow::enable_if_boolean NonNullArray(size_t size, + std::shared_ptr* out) { + std::vector values; + ::arrow::randint(size, 0, 1, &values); + ::arrow::BooleanBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls. +template +::arrow::enable_if_floating_point NullableArray( + size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr* out) { + using c_type = typename ArrowType::c_type; + std::vector values; + if constexpr (::arrow::is_half_float_type::value) { + values.resize(size); + test::random_float16_numbers(static_cast(size), 0, ::arrow::util::Float16(-1e4f), + ::arrow::util::Float16(1e4f), values.data()); + } else { + ::arrow::random_real(size, seed, static_cast(-1e10), + static_cast(1e10), &values); + } + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + ::arrow::NumericBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls. +template +::arrow::enable_if_integer NullableArray(size_t size, size_t num_nulls, + uint32_t seed, + std::shared_ptr* out) { + std::vector values; + + // Seed is random in Arrow right now + (void)seed; + ::arrow::randint(size, 0, 64, &values); + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +template +::arrow::enable_if_date NullableArray(size_t size, size_t num_nulls, + uint32_t seed, + std::shared_ptr* out) { + std::vector values; + + // Seed is random in Arrow right now + (void)seed; + ::arrow::randint(size, 0, 24, &values); + for (size_t i = 0; i < size; i++) { + values[i] *= 86400000; + } + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls yet. +template +::arrow::enable_if_base_binary NullableArray( + size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + BuilderType builder; + + const int kBufferSize = 10; + uint8_t buffer[kBufferSize]; + for (size_t i = 0; i < size; i++) { + if (!valid_bytes[i]) { + RETURN_NOT_OK(builder.AppendNull()); + } else { + ::arrow::random_bytes(kBufferSize, seed + static_cast(i), buffer); + if (ArrowType::is_utf8) { + // Trivially force data to be valid UTF8 by making it all ASCII + for (auto& byte : buffer) { + byte &= 0x7f; + } + } + RETURN_NOT_OK(builder.Append(buffer, kBufferSize)); + } + } + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls yet, +// same as NullableArray(..) +template +::arrow::enable_if_fixed_size_binary NullableArray( + size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + const int byte_width = 10; + BuilderType builder(::arrow::fixed_size_binary(byte_width)); + + const int kBufferSize = byte_width; + uint8_t buffer[kBufferSize]; + for (size_t i = 0; i < size; i++) { + if (!valid_bytes[i]) { + RETURN_NOT_OK(builder.AppendNull()); + } else { + ::arrow::random_bytes(kBufferSize, seed + static_cast(i), buffer); + RETURN_NOT_OK(builder.Append(buffer)); + } + } + return builder.Finish(out); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NullableArray(size_t size, size_t num_nulls, uint32_t seed, + std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, '\1'); + + for (size_t i = 0; i < num_nulls; ++i) { + valid_bytes[i * 2] = '\0'; + } + + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale::scale; + const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale); + const int32_t byte_width = + static_cast(*type).byte_width(); + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + + random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, precision, + out_buf->mutable_data()); + + ::arrow::Decimal128Builder builder(type); + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data())); + return builder.Finish(out); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NullableArray(size_t size, size_t num_nulls, uint32_t seed, + std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, '\1'); + + for (size_t i = 0; i < num_nulls; ++i) { + valid_bytes[i * 2] = '\0'; + } + + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale::scale; + const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale); + const int32_t byte_width = + static_cast(*type).byte_width(); + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + + random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, precision, + out_buf->mutable_data()); + + ::arrow::Decimal256Builder builder(type); + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls yet. +template +::arrow::enable_if_boolean NullableArray(size_t size, size_t num_nulls, + uint32_t seed, + std::shared_ptr* out) { + std::vector values; + + // Seed is random in Arrow right now + (void)seed; + + ::arrow::randint(size, 0, 1, &values); + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + ::arrow::BooleanBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +/// Wrap an Array into a ListArray by splitting it up into size lists. +/// +/// This helper function only supports (size/2) nulls. +Status MakeListArray(const std::shared_ptr& values, int64_t size, + int64_t null_count, const std::string& item_name, + bool nullable_values, std::shared_ptr<::arrow::ListArray>* out) { + // We always include an empty list + int64_t non_null_entries = size - null_count - 1; + int64_t length_per_entry = values->length() / non_null_entries; + + auto offsets = AllocateBuffer(); + RETURN_NOT_OK(offsets->Resize((size + 1) * sizeof(int32_t))); + int32_t* offsets_ptr = reinterpret_cast(offsets->mutable_data()); + + auto null_bitmap = AllocateBuffer(); + int64_t bitmap_size = ::arrow::bit_util::BytesForBits(size); + RETURN_NOT_OK(null_bitmap->Resize(bitmap_size)); + uint8_t* null_bitmap_ptr = null_bitmap->mutable_data(); + memset(null_bitmap_ptr, 0, bitmap_size); + + int32_t current_offset = 0; + for (int64_t i = 0; i < size; i++) { + offsets_ptr[i] = current_offset; + if (!(((i % 2) == 0) && ((i / 2) < null_count))) { + // Non-null list (list with index 1 is always empty). + ::arrow::bit_util::SetBit(null_bitmap_ptr, i); + if (i != 1) { + current_offset += static_cast(length_per_entry); + } + } + } + offsets_ptr[size] = static_cast(values->length()); + + auto value_field = ::arrow::field(item_name, values->type(), nullable_values); + *out = std::make_shared<::arrow::ListArray>(::arrow::list(value_field), size, offsets, + values, null_bitmap, null_count); + + return Status::OK(); +} + +// Make an array containing only empty lists, with a null values array +Status MakeEmptyListsArray(int64_t size, std::shared_ptr* out_array) { + // Allocate an offsets buffer containing only zeroes + const int64_t offsets_nbytes = (size + 1) * sizeof(int32_t); + ARROW_ASSIGN_OR_RAISE(auto offsets_buffer, ::arrow::AllocateBuffer(offsets_nbytes)); + memset(offsets_buffer->mutable_data(), 0, offsets_nbytes); + + auto value_field = + ::arrow::field("item", ::arrow::float64(), false /* nullable_values */); + auto list_type = ::arrow::list(value_field); + + std::vector> child_buffers = {nullptr /* null bitmap */, + nullptr /* values */}; + auto child_data = + ::arrow::ArrayData::Make(value_field->type(), 0, std::move(child_buffers)); + + std::vector> buffers = {nullptr /* bitmap */, + std::move(offsets_buffer)}; + auto array_data = ::arrow::ArrayData::Make(list_type, size, std::move(buffers)); + array_data->child_data.push_back(child_data); + + *out_array = ::arrow::MakeArray(array_data); + return Status::OK(); +} + +std::shared_ptr<::arrow::Table> MakeSimpleTable( + const std::shared_ptr& values, bool nullable) { + auto schema = ::arrow::schema({::arrow::field("col", values->type(), nullable)}); + return ::arrow::Table::Make(schema, {values}); +} + +std::shared_ptr<::arrow::Table> MakeSimpleTable(const std::shared_ptr& values, + bool nullable) { + auto carr = std::make_shared<::arrow::ChunkedArray>(values); + return MakeSimpleTable(carr, nullable); +} + +template +void ExpectArray(T* expected, Array* result) { + auto p_array = static_cast<::arrow::PrimitiveArray*>(result); + for (int i = 0; i < result->length(); i++) { + EXPECT_EQ(expected[i], reinterpret_cast(p_array->values()->data())[i]); + } +} + +template +void ExpectArrayT(void* expected, Array* result) { + ::arrow::PrimitiveArray* p_array = static_cast<::arrow::PrimitiveArray*>(result); + for (int64_t i = 0; i < result->length(); i++) { + EXPECT_EQ(reinterpret_cast(expected)[i], + reinterpret_cast( + p_array->values()->data())[i]); + } +} + +template <> +void ExpectArrayT<::arrow::BooleanType>(void* expected, Array* result) { + ::arrow::BooleanBuilder builder; + ARROW_EXPECT_OK( + builder.AppendValues(reinterpret_cast(expected), result->length())); + + std::shared_ptr expected_array; + ARROW_EXPECT_OK(builder.Finish(&expected_array)); + EXPECT_TRUE(result->Equals(*expected_array)); +} + +} // namespace arrow + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..1decafedc97fd1e4da83300140cee19f0bab9de1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "parquet/platform.h" +#include "parquet/properties.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class RecordBatch; +class Schema; +class Table; + +} // namespace arrow + +namespace parquet { + +class FileMetaData; +class ParquetFileWriter; + +namespace arrow { + +/// \brief Iterative FileWriter class +/// +/// For basic usage, can write a Table at a time, creating one or more row +/// groups per write call. +/// +/// For advanced usage, can write column-by-column: Start a new RowGroup or +/// Chunk with NewRowGroup, then write column-by-column the whole column chunk. +/// +/// If PARQUET:field_id is present as a metadata key on a field, and the corresponding +/// value is a nonnegative integer, then it will be used as the field_id in the parquet +/// file. +class PARQUET_EXPORT FileWriter { + public: + static ::arrow::Status Make(MemoryPool* pool, std::unique_ptr writer, + std::shared_ptr<::arrow::Schema> schema, + std::shared_ptr arrow_properties, + std::unique_ptr* out); + + /// \brief Try to create an Arrow to Parquet file writer. + /// + /// \param schema schema of data that will be passed. + /// \param pool memory pool to use. + /// \param sink output stream to write Parquet data. + /// \param properties general Parquet writer properties. + /// \param arrow_properties Arrow-specific writer properties. + /// + /// \since 11.0.0 + static ::arrow::Result> Open( + const ::arrow::Schema& schema, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr properties = default_writer_properties(), + std::shared_ptr arrow_properties = + default_arrow_writer_properties()); + + ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.") + static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr properties, + std::unique_ptr* writer); + ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.") + static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr properties, + std::shared_ptr arrow_properties, + std::unique_ptr* writer); + + /// Return the Arrow schema to be written to. + virtual std::shared_ptr<::arrow::Schema> schema() const = 0; + + /// \brief Write a Table to Parquet. + /// + /// \param table Arrow table to write. + /// \param chunk_size maximum number of rows to write per row group. + virtual ::arrow::Status WriteTable( + const ::arrow::Table& table, int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH) = 0; + + /// \brief Start a new row group. + /// + /// Returns an error if not all columns have been written. + /// + /// \param chunk_size the number of rows in the next row group. + virtual ::arrow::Status NewRowGroup(int64_t chunk_size) = 0; + + /// \brief Write ColumnChunk in row group using an array. + virtual ::arrow::Status WriteColumnChunk(const ::arrow::Array& data) = 0; + + /// \brief Write ColumnChunk in row group using slice of a ChunkedArray + virtual ::arrow::Status WriteColumnChunk( + const std::shared_ptr<::arrow::ChunkedArray>& data, int64_t offset, + int64_t size) = 0; + + /// \brief Write ColumnChunk in a row group using a ChunkedArray + virtual ::arrow::Status WriteColumnChunk( + const std::shared_ptr<::arrow::ChunkedArray>& data) = 0; + + /// \brief Start a new buffered row group. + /// + /// Returns an error if not all columns have been written. + virtual ::arrow::Status NewBufferedRowGroup() = 0; + + /// \brief Write a RecordBatch into the buffered row group. + /// + /// Multiple RecordBatches can be written into the same row group + /// through this method. + /// + /// WriterProperties.max_row_group_length() is respected and a new + /// row group will be created if the current row group exceeds the + /// limit. + /// + /// Batches get flushed to the output stream once NewBufferedRowGroup() + /// or Close() is called. + /// + /// WARNING: If you are writing multiple files in parallel in the same + /// executor, deadlock may occur if ArrowWriterProperties::use_threads + /// is set to true to write columns in parallel. Please disable use_threads + /// option in this case. + virtual ::arrow::Status WriteRecordBatch(const ::arrow::RecordBatch& batch) = 0; + + /// \brief Write the footer and close the file. + virtual ::arrow::Status Close() = 0; + virtual ~FileWriter(); + + virtual MemoryPool* memory_pool() const = 0; + /// \brief Return the file metadata, only available after calling Close(). + virtual const std::shared_ptr metadata() const = 0; +}; + +/// \brief Write Parquet file metadata only to indicated Arrow OutputStream +PARQUET_EXPORT +::arrow::Status WriteFileMetaData(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +/// \brief Write metadata-only Parquet file to indicated Arrow OutputStream +PARQUET_EXPORT +::arrow::Status WriteMetaDataFile(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +/// \brief Write a Table to Parquet. +/// +/// This writes one table in a single shot. To write a Parquet file with +/// multiple tables iteratively, see parquet::arrow::FileWriter. +/// +/// \param table Table to write. +/// \param pool memory pool to use. +/// \param sink output stream to write Parquet data. +/// \param chunk_size maximum number of rows to write per row group. +/// \param properties general Parquet writer properties. +/// \param arrow_properties Arrow-specific writer properties. +::arrow::Status PARQUET_EXPORT +WriteTable(const ::arrow::Table& table, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH, + std::shared_ptr properties = default_writer_properties(), + std::shared_ptr arrow_properties = + default_arrow_writer_properties()); + +} // namespace arrow +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..909563d013fedcc6604ec8decc3d7384e0b2d693 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/logging.h" +#include "parquet/hasher.h" +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace parquet { + +// A Bloom filter is a compact structure to indicate whether an item is not in a set or +// probably in a set. The Bloom filter usually consists of a bit set that represents a +// set of elements, a hash strategy and a Bloom filter algorithm. +class PARQUET_EXPORT BloomFilter { + public: + // Maximum Bloom filter size, it sets to HDFS default block size 128MB + // This value will be reconsidered when implementing Bloom filter producer. + static constexpr uint32_t kMaximumBloomFilterBytes = 128 * 1024 * 1024; + + /// Determine whether an element exist in set or not. + /// + /// @param hash the element to contain. + /// @return false if value is definitely not in set, and true means PROBABLY + /// in set. + virtual bool FindHash(uint64_t hash) const = 0; + + /// Insert element to set represented by Bloom filter bitset. + /// @param hash the hash of value to insert into Bloom filter. + virtual void InsertHash(uint64_t hash) = 0; + + /// Insert elements to set represented by Bloom filter bitset. + /// @param hashes the hash values to insert into Bloom filter. + /// @param num_values the number of hash values to insert. + virtual void InsertHashes(const uint64_t* hashes, int num_values) = 0; + + /// Write this Bloom filter to an output stream. A Bloom filter structure should + /// include bitset length, hash strategy, algorithm, and bitset. + /// + /// @param sink the output stream to write + virtual void WriteTo(ArrowOutputStream* sink) const = 0; + + /// Get the number of bytes of bitset + virtual uint32_t GetBitsetSize() const = 0; + + /// Compute hash for 32 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int32_t value) const = 0; + + /// Compute hash for 64 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int64_t value) const = 0; + + /// Compute hash for float value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(float value) const = 0; + + /// Compute hash for double value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(double value) const = 0; + + /// Compute hash for Int96 value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const Int96* value) const = 0; + + /// Compute hash for ByteArray value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const ByteArray* value) const = 0; + + /// Compute hash for fixed byte array value by using its plain encoding result. + /// + /// @param value the value address. + /// @param len the value length. + /// @return hash result. + virtual uint64_t Hash(const FLBA* value, uint32_t len) const = 0; + + /// Batch compute hashes for 32 bits values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for 64 bits values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for float values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const float* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for double values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const double* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for Int96 values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const Int96* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for ByteArray values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const ByteArray* values, int num_values, + uint64_t* hashes) const = 0; + + /// Batch compute hashes for fixed byte array values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param type_len the value length. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const = 0; + + virtual ~BloomFilter() = default; + + protected: + // Hash strategy available for Bloom filter. + enum class HashStrategy : uint32_t { XXHASH = 0 }; + + // Bloom filter algorithm. + enum class Algorithm : uint32_t { BLOCK = 0 }; + + enum class CompressionStrategy : uint32_t { UNCOMPRESSED = 0 }; +}; + +/// The BlockSplitBloomFilter is implemented using block-based Bloom filters from +/// Putze et al.'s "Cache-,Hash- and Space-Efficient Bloom filters". The basic idea is to +/// hash the item to a tiny Bloom filter which size fit a single cache line or smaller. +/// +/// This implementation sets 8 bits in each tiny Bloom filter. Each tiny Bloom +/// filter is 32 bytes to take advantage of 32-byte SIMD instructions. +class PARQUET_EXPORT BlockSplitBloomFilter : public BloomFilter { + public: + /// The constructor of BlockSplitBloomFilter. It uses XXH64 as hash function. + /// + /// \param pool memory pool to use. + explicit BlockSplitBloomFilter( + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + /// Initialize the BlockSplitBloomFilter. The range of num_bytes should be within + /// [kMinimumBloomFilterBytes, kMaximumBloomFilterBytes], it will be + /// rounded up/down to lower/upper bound if num_bytes is out of range and also + /// will be rounded up to a power of 2. + /// + /// @param num_bytes The number of bytes to store Bloom filter bitset. + void Init(uint32_t num_bytes); + + /// Initialize the BlockSplitBloomFilter. It copies the bitset as underlying + /// bitset because the given bitset may not satisfy the 32-byte alignment requirement + /// which may lead to segfault when performing SIMD instructions. It is the caller's + /// responsibility to free the bitset passed in. This is used when reconstructing + /// a Bloom filter from a parquet file. + /// + /// @param bitset The given bitset to initialize the Bloom filter. + /// @param num_bytes The number of bytes of given bitset. + void Init(const uint8_t* bitset, uint32_t num_bytes); + + /// Minimum Bloom filter size, it sets to 32 bytes to fit a tiny Bloom filter. + static constexpr uint32_t kMinimumBloomFilterBytes = 32; + + /// Calculate optimal size according to the number of distinct values and false + /// positive probability. + /// + /// @param ndv The number of distinct values. + /// @param fpp The false positive probability. + /// @return it always return a value between kMinimumBloomFilterBytes and + /// kMaximumBloomFilterBytes, and the return value is always a power of 2 + static uint32_t OptimalNumOfBytes(uint32_t ndv, double fpp) { + uint32_t optimal_num_of_bits = OptimalNumOfBits(ndv, fpp); + DCHECK(::arrow::bit_util::IsMultipleOf8(optimal_num_of_bits)); + return optimal_num_of_bits >> 3; + } + + /// Calculate optimal size according to the number of distinct values and false + /// positive probability. + /// + /// @param ndv The number of distinct values. + /// @param fpp The false positive probability. + /// @return it always return a value between kMinimumBloomFilterBytes * 8 and + /// kMaximumBloomFilterBytes * 8, and the return value is always a power of 16 + static uint32_t OptimalNumOfBits(uint32_t ndv, double fpp) { + DCHECK(fpp > 0.0 && fpp < 1.0); + const double m = -8.0 * ndv / log(1 - pow(fpp, 1.0 / 8)); + uint32_t num_bits; + + // Handle overflow. + if (m < 0 || m > kMaximumBloomFilterBytes << 3) { + num_bits = static_cast(kMaximumBloomFilterBytes << 3); + } else { + num_bits = static_cast(m); + } + + // Round up to lower bound + if (num_bits < kMinimumBloomFilterBytes << 3) { + num_bits = kMinimumBloomFilterBytes << 3; + } + + // Get next power of 2 if bits is not power of 2. + if ((num_bits & (num_bits - 1)) != 0) { + num_bits = static_cast(::arrow::bit_util::NextPower2(num_bits)); + } + + // Round down to upper bound + if (num_bits > kMaximumBloomFilterBytes << 3) { + num_bits = kMaximumBloomFilterBytes << 3; + } + + return num_bits; + } + + bool FindHash(uint64_t hash) const override; + void InsertHash(uint64_t hash) override; + void InsertHashes(const uint64_t* hashes, int num_values) override; + void WriteTo(ArrowOutputStream* sink) const override; + uint32_t GetBitsetSize() const override { return num_bytes_; } + + uint64_t Hash(int32_t value) const override { return hasher_->Hash(value); } + uint64_t Hash(int64_t value) const override { return hasher_->Hash(value); } + uint64_t Hash(float value) const override { return hasher_->Hash(value); } + uint64_t Hash(double value) const override { return hasher_->Hash(value); } + uint64_t Hash(const Int96* value) const override { return hasher_->Hash(value); } + uint64_t Hash(const ByteArray* value) const override { return hasher_->Hash(value); } + uint64_t Hash(const FLBA* value, uint32_t len) const override { + return hasher_->Hash(value, len); + } + + void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const float* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const double* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const Int96* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const ByteArray* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const override { + hasher_->Hashes(values, type_len, num_values, hashes); + } + + uint64_t Hash(const int32_t* value) const { return hasher_->Hash(*value); } + uint64_t Hash(const int64_t* value) const { return hasher_->Hash(*value); } + uint64_t Hash(const float* value) const { return hasher_->Hash(*value); } + uint64_t Hash(const double* value) const { return hasher_->Hash(*value); } + + /// Deserialize the Bloom filter from an input stream. It is used when reconstructing + /// a Bloom filter from a parquet filter. + /// + /// @param properties The parquet reader properties. + /// @param input_stream The input stream from which to construct the bloom filter. + /// @param bloom_filter_length The length of the serialized bloom filter including + /// header. + /// @return The BlockSplitBloomFilter. + static BlockSplitBloomFilter Deserialize( + const ReaderProperties& properties, ArrowInputStream* input_stream, + std::optional bloom_filter_length = std::nullopt); + + private: + inline void InsertHashImpl(uint64_t hash); + + // Bytes in a tiny Bloom filter block. + static constexpr int kBytesPerFilterBlock = 32; + + // The number of bits to be set in each tiny Bloom filter + static constexpr int kBitsSetPerBlock = 8; + + // A mask structure used to set bits in each tiny Bloom filter. + struct BlockMask { + uint32_t item[kBitsSetPerBlock]; + }; + + // The block-based algorithm needs eight odd SALT values to calculate eight indexes + // of bit to set, one bit in each 32-bit word. + static constexpr uint32_t SALT[kBitsSetPerBlock] = { + 0x47b6137bU, 0x44974d91U, 0x8824ad5bU, 0xa2b7289dU, + 0x705495c7U, 0x2df1424bU, 0x9efc4947U, 0x5c6bfb31U}; + + // Memory pool to allocate aligned buffer for bitset + ::arrow::MemoryPool* pool_; + + // The underlying buffer of bitset. + std::shared_ptr data_; + + // The number of bytes of Bloom filter bitset. + uint32_t num_bytes_; + + // Hash strategy used in this Bloom filter. + HashStrategy hash_strategy_; + + // Algorithm used in this Bloom filter. + Algorithm algorithm_; + + // Compression used in this Bloom filter. + CompressionStrategy compression_strategy_; + + // The hash pointer points to actual hash class used. + std::unique_ptr hasher_; +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..cbd267dd1972dcde98382dda3c84a6a544ddc3e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/io/interfaces.h" +#include "parquet/properties.h" +#include "parquet/type_fwd.h" + +namespace parquet { + +class InternalFileDecryptor; +class BloomFilter; + +class PARQUET_EXPORT RowGroupBloomFilterReader { + public: + virtual ~RowGroupBloomFilterReader() = default; + + /// \brief Read bloom filter of a column chunk. + /// + /// \param[in] i column ordinal of the column chunk. + /// \returns bloom filter of the column or nullptr if it does not exist. + /// \throws ParquetException if the index is out of bound, or read bloom + /// filter failed. + virtual std::unique_ptr GetColumnBloomFilter(int i) = 0; +}; + +/// \brief Interface for reading the bloom filter for a Parquet file. +class PARQUET_EXPORT BloomFilterReader { + public: + virtual ~BloomFilterReader() = default; + + /// \brief Create a BloomFilterReader instance. + /// \returns a BloomFilterReader instance. + /// WARNING: The returned BloomFilterReader references to all the input parameters, so + /// it must not outlive all of the input parameters. Usually these input parameters + /// come from the same ParquetFileReader object, so it must not outlive the reader + /// that creates this BloomFilterReader. + static std::unique_ptr Make( + std::shared_ptr<::arrow::io::RandomAccessFile> input, + std::shared_ptr file_metadata, const ReaderProperties& properties, + std::shared_ptr file_decryptor = NULLPTR); + + /// \brief Get the bloom filter reader of a specific row group. + /// \param[in] i row group ordinal to get bloom filter reader. + /// \returns RowGroupBloomFilterReader of the specified row group. A nullptr may or may + /// not be returned if the bloom filter for the row group is unavailable. It + /// is the caller's responsibility to check the return value of follow-up calls + /// to the RowGroupBloomFilterReader. + /// \throws ParquetException if the index is out of bound. + virtual std::shared_ptr RowGroup(int i) = 0; +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h new file mode 100644 index 0000000000000000000000000000000000000000..905f805b8c9cc7d0ee71ed448647cb23e0e3c0e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h @@ -0,0 +1,171 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module defines an abstract interface for iterating through pages in a +// Parquet column chunk within a row group. It could be extended in the future +// to iterate through all data pages in all chunks in a file. + +#pragma once + +#include +#include +#include +#include + +#include "parquet/statistics.h" +#include "parquet/types.h" + +namespace parquet { + +// TODO: Parallel processing is not yet safe because of memory-ownership +// semantics (the PageReader may or may not own the memory referenced by a +// page) +// +// TODO(wesm): In the future Parquet implementations may store the crc code +// in format::PageHeader. parquet-mr currently does not, so we also skip it +// here, both on the read and write path +class Page { + public: + Page(const std::shared_ptr& buffer, PageType::type type) + : buffer_(buffer), type_(type) {} + + PageType::type type() const { return type_; } + + std::shared_ptr buffer() const { return buffer_; } + + // @returns: a pointer to the page's data + const uint8_t* data() const { return buffer_->data(); } + + // @returns: the total size in bytes of the page's data buffer + int32_t size() const { return static_cast(buffer_->size()); } + + private: + std::shared_ptr buffer_; + PageType::type type_; +}; + +/// \brief Base type for DataPageV1 and DataPageV2 including common attributes +class DataPage : public Page { + public: + int32_t num_values() const { return num_values_; } + Encoding::type encoding() const { return encoding_; } + int64_t uncompressed_size() const { return uncompressed_size_; } + const EncodedStatistics& statistics() const { return statistics_; } + /// Return the row ordinal within the row group to the first row in the data page. + /// Currently it is only present from data pages created by ColumnWriter in order + /// to collect page index. + std::optional first_row_index() const { return first_row_index_; } + + virtual ~DataPage() = default; + + protected: + DataPage(PageType::type type, const std::shared_ptr& buffer, int32_t num_values, + Encoding::type encoding, int64_t uncompressed_size, + const EncodedStatistics& statistics = EncodedStatistics(), + std::optional first_row_index = std::nullopt) + : Page(buffer, type), + num_values_(num_values), + encoding_(encoding), + uncompressed_size_(uncompressed_size), + statistics_(statistics), + first_row_index_(std::move(first_row_index)) {} + + int32_t num_values_; + Encoding::type encoding_; + int64_t uncompressed_size_; + EncodedStatistics statistics_; + /// Row ordinal within the row group to the first row in the data page. + std::optional first_row_index_; +}; + +class DataPageV1 : public DataPage { + public: + DataPageV1(const std::shared_ptr& buffer, int32_t num_values, + Encoding::type encoding, Encoding::type definition_level_encoding, + Encoding::type repetition_level_encoding, int64_t uncompressed_size, + const EncodedStatistics& statistics = EncodedStatistics(), + std::optional first_row_index = std::nullopt) + : DataPage(PageType::DATA_PAGE, buffer, num_values, encoding, uncompressed_size, + statistics, std::move(first_row_index)), + definition_level_encoding_(definition_level_encoding), + repetition_level_encoding_(repetition_level_encoding) {} + + Encoding::type repetition_level_encoding() const { return repetition_level_encoding_; } + + Encoding::type definition_level_encoding() const { return definition_level_encoding_; } + + private: + Encoding::type definition_level_encoding_; + Encoding::type repetition_level_encoding_; +}; + +class DataPageV2 : public DataPage { + public: + DataPageV2(const std::shared_ptr& buffer, int32_t num_values, int32_t num_nulls, + int32_t num_rows, Encoding::type encoding, + int32_t definition_levels_byte_length, int32_t repetition_levels_byte_length, + int64_t uncompressed_size, bool is_compressed = false, + const EncodedStatistics& statistics = EncodedStatistics(), + std::optional first_row_index = std::nullopt) + : DataPage(PageType::DATA_PAGE_V2, buffer, num_values, encoding, uncompressed_size, + statistics, std::move(first_row_index)), + num_nulls_(num_nulls), + num_rows_(num_rows), + definition_levels_byte_length_(definition_levels_byte_length), + repetition_levels_byte_length_(repetition_levels_byte_length), + is_compressed_(is_compressed) {} + + int32_t num_nulls() const { return num_nulls_; } + + int32_t num_rows() const { return num_rows_; } + + int32_t definition_levels_byte_length() const { return definition_levels_byte_length_; } + + int32_t repetition_levels_byte_length() const { return repetition_levels_byte_length_; } + + bool is_compressed() const { return is_compressed_; } + + private: + int32_t num_nulls_; + int32_t num_rows_; + int32_t definition_levels_byte_length_; + int32_t repetition_levels_byte_length_; + bool is_compressed_; +}; + +class DictionaryPage : public Page { + public: + DictionaryPage(const std::shared_ptr& buffer, int32_t num_values, + Encoding::type encoding, bool is_sorted = false) + : Page(buffer, PageType::DICTIONARY_PAGE), + num_values_(num_values), + encoding_(encoding), + is_sorted_(is_sorted) {} + + int32_t num_values() const { return num_values_; } + + Encoding::type encoding() const { return encoding_; } + + bool is_sorted() const { return is_sorted_; } + + private: + int32_t num_values_; + Encoding::type encoding_; + bool is_sorted_; +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h new file mode 100644 index 0000000000000000000000000000000000000000..a9953866fab22ee6db13a92578f85556ea6f99ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h @@ -0,0 +1,264 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "parquet/column_reader.h" +#include "parquet/exception.h" +#include "parquet/platform.h" +#include "parquet/schema.h" +#include "parquet/types.h" + +namespace parquet { + +static constexpr int64_t DEFAULT_SCANNER_BATCH_SIZE = 128; + +class PARQUET_EXPORT Scanner { + public: + explicit Scanner(std::shared_ptr reader, + int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) + : batch_size_(batch_size), + level_offset_(0), + levels_buffered_(0), + value_buffer_(AllocateBuffer(pool)), + value_offset_(0), + values_buffered_(0), + reader_(std::move(reader)) { + def_levels_.resize( + descr()->max_definition_level() > 0 ? static_cast(batch_size_) : 0); + rep_levels_.resize( + descr()->max_repetition_level() > 0 ? static_cast(batch_size_) : 0); + } + + virtual ~Scanner() {} + + static std::shared_ptr Make( + std::shared_ptr col_reader, + int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + virtual void PrintNext(std::ostream& out, int width, bool with_levels = false) = 0; + + bool HasNext() { return level_offset_ < levels_buffered_ || reader_->HasNext(); } + + const ColumnDescriptor* descr() const { return reader_->descr(); } + + int64_t batch_size() const { return batch_size_; } + + void SetBatchSize(int64_t batch_size) { batch_size_ = batch_size; } + + protected: + int64_t batch_size_; + + std::vector def_levels_; + std::vector rep_levels_; + int level_offset_; + int levels_buffered_; + + std::shared_ptr value_buffer_; + int value_offset_; + int64_t values_buffered_; + std::shared_ptr reader_; +}; + +template +class PARQUET_TEMPLATE_CLASS_EXPORT TypedScanner : public Scanner { + public: + typedef typename DType::c_type T; + + explicit TypedScanner(std::shared_ptr reader, + int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) + : Scanner(std::move(reader), batch_size, pool) { + typed_reader_ = static_cast*>(reader_.get()); + int value_byte_size = type_traits::value_byte_size; + PARQUET_THROW_NOT_OK(value_buffer_->Resize(batch_size_ * value_byte_size)); + values_ = reinterpret_cast(value_buffer_->mutable_data()); + } + + virtual ~TypedScanner() {} + + bool NextLevels(int16_t* def_level, int16_t* rep_level) { + if (level_offset_ == levels_buffered_) { + levels_buffered_ = static_cast( + typed_reader_->ReadBatch(static_cast(batch_size_), def_levels_.data(), + rep_levels_.data(), values_, &values_buffered_)); + + value_offset_ = 0; + level_offset_ = 0; + if (!levels_buffered_) { + return false; + } + } + *def_level = descr()->max_definition_level() > 0 ? def_levels_[level_offset_] : 0; + *rep_level = descr()->max_repetition_level() > 0 ? rep_levels_[level_offset_] : 0; + level_offset_++; + return true; + } + + bool Next(T* val, int16_t* def_level, int16_t* rep_level, bool* is_null) { + if (level_offset_ == levels_buffered_) { + if (!HasNext()) { + // Out of data pages + return false; + } + } + + NextLevels(def_level, rep_level); + *is_null = *def_level < descr()->max_definition_level(); + + if (*is_null) { + return true; + } + + if (value_offset_ == values_buffered_) { + throw ParquetException("Value was non-null, but has not been buffered"); + } + *val = values_[value_offset_++]; + return true; + } + + // Returns true if there is a next value + bool NextValue(T* val, bool* is_null) { + if (level_offset_ == levels_buffered_) { + if (!HasNext()) { + // Out of data pages + return false; + } + } + + // Out of values + int16_t def_level = -1; + int16_t rep_level = -1; + NextLevels(&def_level, &rep_level); + *is_null = def_level < descr()->max_definition_level(); + + if (*is_null) { + return true; + } + + if (value_offset_ == values_buffered_) { + throw ParquetException("Value was non-null, but has not been buffered"); + } + *val = values_[value_offset_++]; + return true; + } + + virtual void PrintNext(std::ostream& out, int width, bool with_levels = false) { + T val{}; + int16_t def_level = -1; + int16_t rep_level = -1; + bool is_null = false; + char buffer[80]; + + if (!Next(&val, &def_level, &rep_level, &is_null)) { + throw ParquetException("No more values buffered"); + } + + if (with_levels) { + out << " D:" << def_level << " R:" << rep_level << " "; + if (!is_null) { + out << "V:"; + } + } + + if (is_null) { + std::string null_fmt = format_fwf(width); + snprintf(buffer, sizeof(buffer), null_fmt.c_str(), "NULL"); + } else { + FormatValue(&val, buffer, sizeof(buffer), width); + } + out << buffer; + } + + private: + // The ownership of this object is expressed through the reader_ variable in the base + TypedColumnReader* typed_reader_; + + inline void FormatValue(void* val, char* buffer, int bufsize, int width); + + T* values_; +}; + +template +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + snprintf(buffer, bufsize, fmt.c_str(), *reinterpret_cast(val)); +} + +template <> +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + std::string result = Int96ToString(*reinterpret_cast(val)); + snprintf(buffer, bufsize, fmt.c_str(), result.c_str()); +} + +template <> +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + std::string result = ByteArrayToString(*reinterpret_cast(val)); + snprintf(buffer, bufsize, fmt.c_str(), result.c_str()); +} + +template <> +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + std::string result = FixedLenByteArrayToString( + *reinterpret_cast(val), descr()->type_length()); + snprintf(buffer, bufsize, fmt.c_str(), result.c_str()); +} + +typedef TypedScanner BoolScanner; +typedef TypedScanner Int32Scanner; +typedef TypedScanner Int64Scanner; +typedef TypedScanner Int96Scanner; +typedef TypedScanner FloatScanner; +typedef TypedScanner DoubleScanner; +typedef TypedScanner ByteArrayScanner; +typedef TypedScanner FixedLenByteArrayScanner; + +template +int64_t ScanAll(int32_t batch_size, int16_t* def_levels, int16_t* rep_levels, + uint8_t* values, int64_t* values_buffered, + parquet::ColumnReader* reader) { + typedef typename RType::T Type; + auto typed_reader = static_cast(reader); + auto vals = reinterpret_cast(&values[0]); + return typed_reader->ReadBatch(batch_size, def_levels, rep_levels, vals, + values_buffered); +} + +int64_t PARQUET_EXPORT ScanAllValues(int32_t batch_size, int16_t* def_levels, + int16_t* rep_levels, uint8_t* values, + int64_t* values_buffered, + parquet::ColumnReader* reader); + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h new file mode 100644 index 0000000000000000000000000000000000000000..de47bb7deb8393283a50ac878692e7dddde51e6b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h @@ -0,0 +1,469 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/spaced.h" + +#include "parquet/exception.h" +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace arrow { + +class Array; +class ArrayBuilder; +class BinaryArray; +class BinaryBuilder; +class BooleanBuilder; +class Int32Type; +class Int64Type; +class FloatType; +class DoubleType; +class FixedSizeBinaryType; +template +class NumericBuilder; +class FixedSizeBinaryBuilder; +template +class Dictionary32Builder; + +} // namespace arrow + +namespace parquet { + +template +class TypedEncoder; + +using BooleanEncoder = TypedEncoder; +using Int32Encoder = TypedEncoder; +using Int64Encoder = TypedEncoder; +using Int96Encoder = TypedEncoder; +using FloatEncoder = TypedEncoder; +using DoubleEncoder = TypedEncoder; +using ByteArrayEncoder = TypedEncoder; +using FLBAEncoder = TypedEncoder; + +template +class TypedDecoder; + +class BooleanDecoder; +using Int32Decoder = TypedDecoder; +using Int64Decoder = TypedDecoder; +using Int96Decoder = TypedDecoder; +using FloatDecoder = TypedDecoder; +using DoubleDecoder = TypedDecoder; +using ByteArrayDecoder = TypedDecoder; +class FLBADecoder; + +template +struct EncodingTraits; + +template <> +struct EncodingTraits { + using Encoder = BooleanEncoder; + using Decoder = BooleanDecoder; + + using ArrowType = ::arrow::BooleanType; + using Accumulator = ::arrow::BooleanBuilder; + struct DictAccumulator {}; +}; + +template <> +struct EncodingTraits { + using Encoder = Int32Encoder; + using Decoder = Int32Decoder; + + using ArrowType = ::arrow::Int32Type; + using Accumulator = ::arrow::NumericBuilder<::arrow::Int32Type>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::Int32Type>; +}; + +template <> +struct EncodingTraits { + using Encoder = Int64Encoder; + using Decoder = Int64Decoder; + + using ArrowType = ::arrow::Int64Type; + using Accumulator = ::arrow::NumericBuilder<::arrow::Int64Type>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::Int64Type>; +}; + +template <> +struct EncodingTraits { + using Encoder = Int96Encoder; + using Decoder = Int96Decoder; + + struct Accumulator {}; + struct DictAccumulator {}; +}; + +template <> +struct EncodingTraits { + using Encoder = FloatEncoder; + using Decoder = FloatDecoder; + + using ArrowType = ::arrow::FloatType; + using Accumulator = ::arrow::NumericBuilder<::arrow::FloatType>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::FloatType>; +}; + +template <> +struct EncodingTraits { + using Encoder = DoubleEncoder; + using Decoder = DoubleDecoder; + + using ArrowType = ::arrow::DoubleType; + using Accumulator = ::arrow::NumericBuilder<::arrow::DoubleType>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::DoubleType>; +}; + +template <> +struct EncodingTraits { + using Encoder = ByteArrayEncoder; + using Decoder = ByteArrayDecoder; + + using ArrowType = ::arrow::BinaryType; + /// \brief Internal helper class for decoding BYTE_ARRAY data where we can + /// overflow the capacity of a single arrow::BinaryArray + struct Accumulator { + std::unique_ptr<::arrow::BinaryBuilder> builder; + std::vector> chunks; + }; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::BinaryType>; +}; + +template <> +struct EncodingTraits { + using Encoder = FLBAEncoder; + using Decoder = FLBADecoder; + + using ArrowType = ::arrow::FixedSizeBinaryType; + using Accumulator = ::arrow::FixedSizeBinaryBuilder; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::FixedSizeBinaryType>; +}; + +class ColumnDescriptor; + +// Untyped base for all encoders +class Encoder { + public: + virtual ~Encoder() = default; + + virtual int64_t EstimatedDataEncodedSize() = 0; + virtual std::shared_ptr FlushValues() = 0; + virtual Encoding::type encoding() const = 0; + + virtual void Put(const ::arrow::Array& values) = 0; + + virtual MemoryPool* memory_pool() const = 0; +}; + +// Base class for value encoders. Since encoders may or not have state (e.g., +// dictionary encoding) we use a class instance to maintain any state. +// +// Encode interfaces are internal, subject to change without deprecation. +template +class TypedEncoder : virtual public Encoder { + public: + typedef typename DType::c_type T; + + using Encoder::Put; + + virtual void Put(const T* src, int num_values) = 0; + + virtual void Put(const std::vector& src, int num_values = -1); + + virtual void PutSpaced(const T* src, int num_values, const uint8_t* valid_bits, + int64_t valid_bits_offset) = 0; +}; + +template +void TypedEncoder::Put(const std::vector& src, int num_values) { + if (num_values == -1) { + num_values = static_cast(src.size()); + } + Put(src.data(), num_values); +} + +template <> +inline void TypedEncoder::Put(const std::vector& src, int num_values) { + // NOTE(wesm): This stub is here only to satisfy the compiler; it is + // overridden later with the actual implementation +} + +// Base class for dictionary encoders +template +class DictEncoder : virtual public TypedEncoder { + public: + /// Writes out any buffered indices to buffer preceded by the bit width of this data. + /// Returns the number of bytes written. + /// If the supplied buffer is not big enough, returns -1. + /// buffer must be preallocated with buffer_len bytes. Use EstimatedDataEncodedSize() + /// to size buffer. + virtual int WriteIndices(uint8_t* buffer, int buffer_len) = 0; + + virtual int dict_encoded_size() const = 0; + + virtual int bit_width() const = 0; + + /// Writes out the encoded dictionary to buffer. buffer must be preallocated to + /// dict_encoded_size() bytes. + virtual void WriteDict(uint8_t* buffer) const = 0; + + virtual int num_entries() const = 0; + + /// \brief EXPERIMENTAL: Append dictionary indices into the encoder. It is + /// assumed (without any boundschecking) that the indices reference + /// preexisting dictionary values + /// \param[in] indices the dictionary index values. Only Int32Array currently + /// supported + virtual void PutIndices(const ::arrow::Array& indices) = 0; + + /// \brief EXPERIMENTAL: Append dictionary into encoder, inserting indices + /// separately. Currently throws exception if the current dictionary memo is + /// non-empty + /// \param[in] values the dictionary values. Only valid for certain + /// Parquet/Arrow type combinations, like BYTE_ARRAY/BinaryArray + virtual void PutDictionary(const ::arrow::Array& values) = 0; +}; + +// ---------------------------------------------------------------------- +// Value decoding + +class Decoder { + public: + virtual ~Decoder() = default; + + // Sets the data for a new page. This will be called multiple times on the same + // decoder and should reset all internal state. + virtual void SetData(int num_values, const uint8_t* data, int len) = 0; + + // Returns the number of values left (for the last call to SetData()). This is + // the number of values left in this page. + virtual int values_left() const = 0; + virtual Encoding::type encoding() const = 0; +}; + +template +class TypedDecoder : virtual public Decoder { + public: + using T = typename DType::c_type; + + /// \brief Decode values into a buffer + /// + /// Subclasses may override the more specialized Decode methods below. + /// + /// \param[in] buffer destination for decoded values + /// \param[in] max_values maximum number of values to decode + /// \return The number of values decoded. Should be identical to max_values except + /// at the end of the current data page. + virtual int Decode(T* buffer, int max_values) = 0; + + /// \brief Decode the values in this data page but leave spaces for null entries. + /// + /// \param[in] buffer destination for decoded values + /// \param[in] num_values size of the def_levels and buffer arrays including the number + /// of null slots + /// \param[in] null_count number of null slots + /// \param[in] valid_bits bitmap data indicating position of valid slots + /// \param[in] valid_bits_offset offset into valid_bits + /// \return The number of values decoded, including nulls. + virtual int DecodeSpaced(T* buffer, int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset) { + if (null_count > 0) { + int values_to_read = num_values - null_count; + int values_read = Decode(buffer, values_to_read); + if (values_read != values_to_read) { + throw ParquetException("Number of values / definition_levels read did not match"); + } + + return ::arrow::util::internal::SpacedExpand(buffer, num_values, null_count, + valid_bits, valid_bits_offset); + } else { + return Decode(buffer, num_values); + } + } + + /// \brief Decode into an ArrayBuilder or other accumulator + /// + /// This function assumes the definition levels were already decoded + /// as a validity bitmap in the given `valid_bits`. `null_count` + /// is the number of 0s in `valid_bits`. + /// As a space optimization, it is allowed for `valid_bits` to be null + /// if `null_count` is zero. + /// + /// \return number of values decoded + virtual int DecodeArrow(int num_values, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, + typename EncodingTraits::Accumulator* out) = 0; + + /// \brief Decode into an ArrayBuilder or other accumulator ignoring nulls + /// + /// \return number of values decoded + int DecodeArrowNonNull(int num_values, + typename EncodingTraits::Accumulator* out) { + return DecodeArrow(num_values, 0, /*valid_bits=*/NULLPTR, 0, out); + } + + /// \brief Decode into a DictionaryBuilder + /// + /// This function assumes the definition levels were already decoded + /// as a validity bitmap in the given `valid_bits`. `null_count` + /// is the number of 0s in `valid_bits`. + /// As a space optimization, it is allowed for `valid_bits` to be null + /// if `null_count` is zero. + /// + /// \return number of values decoded + virtual int DecodeArrow(int num_values, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, + typename EncodingTraits::DictAccumulator* builder) = 0; + + /// \brief Decode into a DictionaryBuilder ignoring nulls + /// + /// \return number of values decoded + int DecodeArrowNonNull(int num_values, + typename EncodingTraits::DictAccumulator* builder) { + return DecodeArrow(num_values, 0, /*valid_bits=*/NULLPTR, 0, builder); + } +}; + +template +class DictDecoder : virtual public TypedDecoder { + public: + using T = typename DType::c_type; + + virtual void SetDict(TypedDecoder* dictionary) = 0; + + /// \brief Insert dictionary values into the Arrow dictionary builder's memo, + /// but do not append any indices + virtual void InsertDictionary(::arrow::ArrayBuilder* builder) = 0; + + /// \brief Decode only dictionary indices and append to dictionary + /// builder. The builder must have had the dictionary from this decoder + /// inserted already. + /// + /// \warning Remember to reset the builder each time the dict decoder is initialized + /// with a new dictionary page + virtual int DecodeIndicesSpaced(int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, + ::arrow::ArrayBuilder* builder) = 0; + + /// \brief Decode only dictionary indices (no nulls) + /// + /// \warning Remember to reset the builder each time the dict decoder is initialized + /// with a new dictionary page + virtual int DecodeIndices(int num_values, ::arrow::ArrayBuilder* builder) = 0; + + /// \brief Decode only dictionary indices (no nulls). Same as above + /// DecodeIndices but target is an array instead of a builder. + /// + /// \note API EXPERIMENTAL + virtual int DecodeIndices(int num_values, int32_t* indices) = 0; + + /// \brief Get dictionary. The reader will call this API when it encounters a + /// new dictionary. + /// + /// @param[out] dictionary The pointer to dictionary values. Dictionary is owned by + /// the decoder and is destroyed when the decoder is destroyed. + /// @param[out] dictionary_length The dictionary length. + /// + /// \note API EXPERIMENTAL + virtual void GetDictionary(const T** dictionary, int32_t* dictionary_length) = 0; +}; + +// ---------------------------------------------------------------------- +// TypedEncoder specializations, traits, and factory functions + +class BooleanDecoder : virtual public TypedDecoder { + public: + using TypedDecoder::Decode; + + /// \brief Decode and bit-pack values into a buffer + /// + /// \param[in] buffer destination for decoded values + /// This buffer will contain bit-packed values. + /// \param[in] max_values max values to decode. + /// \return The number of values decoded. Should be identical to max_values except + /// at the end of the current data page. + virtual int Decode(uint8_t* buffer, int max_values) = 0; +}; + +class FLBADecoder : virtual public TypedDecoder { + public: + using TypedDecoder::DecodeSpaced; + + // TODO(wesm): As possible follow-up to PARQUET-1508, we should examine if + // there is value in adding specialized read methods for + // FIXED_LEN_BYTE_ARRAY. If only Decimal data can occur with this data type + // then perhaps not +}; + +PARQUET_EXPORT +std::unique_ptr MakeEncoder( + Type::type type_num, Encoding::type encoding, bool use_dictionary = false, + const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + +template +std::unique_ptr::Encoder> MakeTypedEncoder( + Encoding::type encoding, bool use_dictionary = false, + const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + using OutType = typename EncodingTraits::Encoder; + std::unique_ptr base = + MakeEncoder(DType::type_num, encoding, use_dictionary, descr, pool); + return std::unique_ptr(dynamic_cast(base.release())); +} + +PARQUET_EXPORT +std::unique_ptr MakeDecoder( + Type::type type_num, Encoding::type encoding, const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + +namespace detail { + +PARQUET_EXPORT +std::unique_ptr MakeDictDecoder(Type::type type_num, + const ColumnDescriptor* descr, + ::arrow::MemoryPool* pool); + +} // namespace detail + +template +std::unique_ptr> MakeDictDecoder( + const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + using OutType = DictDecoder; + auto decoder = detail::MakeDictDecoder(DType::type_num, descr, pool); + return std::unique_ptr(dynamic_cast(decoder.release())); +} + +template +std::unique_ptr::Decoder> MakeTypedDecoder( + Encoding::type encoding, const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + using OutType = typename EncodingTraits::Decoder; + std::unique_ptr base = MakeDecoder(DType::type_num, encoding, descr, pool); + return std::unique_ptr(dynamic_cast(base.release())); +} + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..291cccf30f8e3f77c724dd2ebec28bb08a897ec0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h @@ -0,0 +1,154 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "parquet/encryption/encryption.h" +#include "parquet/encryption/file_key_wrapper.h" +#include "parquet/encryption/key_toolkit.h" +#include "parquet/encryption/kms_client_factory.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm = + ParquetCipher::AES_GCM_V1; +static constexpr bool kDefaultPlaintextFooter = false; +static constexpr bool kDefaultDoubleWrapping = true; +static constexpr double kDefaultCacheLifetimeSeconds = 600; // 10 minutes +static constexpr bool kDefaultInternalKeyMaterial = true; +static constexpr bool kDefaultUniformEncryption = false; +static constexpr int32_t kDefaultDataKeyLengthBits = 128; + +struct PARQUET_EXPORT EncryptionConfiguration { + explicit EncryptionConfiguration(const std::string& footer_key) + : footer_key(footer_key) {} + + /// ID of the master key for footer encryption/signing + std::string footer_key; + + /// List of columns to encrypt, with master key IDs (see HIVE-21848). + /// Format: "masterKeyID:colName,colName;masterKeyID:colName..." + /// Either + /// (1) column_keys must be set + /// or + /// (2) uniform_encryption must be set to true + /// If none of (1) and (2) are true, or if both are true, an exception will be + /// thrown. + std::string column_keys; + + /// Encrypt footer and all columns with the same encryption key. + bool uniform_encryption = kDefaultUniformEncryption; + + /// Parquet encryption algorithm. Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1". + ParquetCipher::type encryption_algorithm = kDefaultEncryptionAlgorithm; + + /// Write files with plaintext footer. + /// The default is false - files are written with encrypted footer. + bool plaintext_footer = kDefaultPlaintextFooter; + + /// Use double wrapping - where data encryption keys (DEKs) are encrypted with key + /// encryption keys (KEKs), which in turn are encrypted with master keys. + /// The default is true. If set to false, use single wrapping - where DEKs are + /// encrypted directly with master keys. + bool double_wrapping = kDefaultDoubleWrapping; + + /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client + /// objects). + /// The default is 600 (10 minutes). + double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds; + + /// Store key material inside Parquet file footers; this mode doesn’t produce + /// additional files. By default, true. If set to false, key material is stored in + /// separate files in the same folder, which enables key rotation for immutable + /// Parquet files. + bool internal_key_material = kDefaultInternalKeyMaterial; + + /// Length of data encryption keys (DEKs), randomly generated by parquet key + /// management tools. Can be 128, 192 or 256 bits. + /// The default is 128 bits. + int32_t data_key_length_bits = kDefaultDataKeyLengthBits; +}; + +struct PARQUET_EXPORT DecryptionConfiguration { + /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client + /// objects). + /// The default is 600 (10 minutes). + double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds; +}; + +/// This is a core class, that translates the parameters of high level encryption (like +/// the names of encrypted columns, names of master keys, etc), into parameters of low +/// level encryption (like the key metadata, DEK, etc). A factory that produces the low +/// level FileEncryptionProperties and FileDecryptionProperties objects, from the high +/// level parameters. +class PARQUET_EXPORT CryptoFactory { + public: + /// a KmsClientFactory object must be registered via this method before calling any of + /// GetFileEncryptionProperties()/GetFileDecryptionProperties() methods. + void RegisterKmsClientFactory(std::shared_ptr kms_client_factory); + + /// Get the encryption properties for a Parquet file. + /// If external key material is used then a file system and path to the + /// parquet file must be provided. + std::shared_ptr GetFileEncryptionProperties( + const KmsConnectionConfig& kms_connection_config, + const EncryptionConfiguration& encryption_config, const std::string& file_path = "", + const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR); + + /// Get decryption properties for a Parquet file. + /// The returned FileDecryptionProperties object will use the cache inside this + /// CryptoFactory object, so please keep this + /// CryptoFactory object alive along with the returned + /// FileDecryptionProperties object. + /// If external key material is used then a file system and path to the + /// parquet file must be provided. + std::shared_ptr GetFileDecryptionProperties( + const KmsConnectionConfig& kms_connection_config, + const DecryptionConfiguration& decryption_config, const std::string& file_path = "", + const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR); + + void RemoveCacheEntriesForToken(const std::string& access_token) { + key_toolkit_.RemoveCacheEntriesForToken(access_token); + } + + void RemoveCacheEntriesForAllTokens() { key_toolkit_.RemoveCacheEntriesForAllTokens(); } + + /// Rotates master encryption keys for a Parquet file that uses external key material. + /// In single wrapping mode, data encryption keys are decrypted with the old master keys + /// and then re-encrypted with new master keys. + /// In double wrapping mode, key encryption keys are decrypted with the old master keys + /// and then re-encrypted with new master keys. + /// This relies on the KMS supporting versioning, such that the old master key is + /// used when unwrapping a key, and the latest version is used when wrapping a key. + void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config, + const std::string& parquet_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system, + bool double_wrapping = kDefaultDoubleWrapping, + double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds); + + private: + ColumnPathToEncryptionPropertiesMap GetColumnEncryptionProperties( + int dek_length, const std::string& column_keys, FileKeyWrapper* key_wrapper); + + /// Key utilities object for kms client initialization and cache control + KeyToolkit key_toolkit_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h new file mode 100644 index 0000000000000000000000000000000000000000..8fd7ec8d3d015424cf7b4bd28e73db58da375bd4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h @@ -0,0 +1,510 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "parquet/exception.h" +#include "parquet/schema.h" +#include "parquet/types.h" + +namespace parquet { + +static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm = + ParquetCipher::AES_GCM_V1; +static constexpr int32_t kMaximalAadMetadataLength = 256; +static constexpr bool kDefaultEncryptedFooter = true; +static constexpr bool kDefaultCheckSignature = true; +static constexpr bool kDefaultAllowPlaintextFiles = false; +static constexpr int32_t kAadFileUniqueLength = 8; + +class ColumnDecryptionProperties; +using ColumnPathToDecryptionPropertiesMap = + std::map>; + +class ColumnEncryptionProperties; +using ColumnPathToEncryptionPropertiesMap = + std::map>; + +class PARQUET_EXPORT DecryptionKeyRetriever { + public: + virtual std::string GetKey(const std::string& key_metadata) = 0; + virtual ~DecryptionKeyRetriever() {} +}; + +/// Simple integer key retriever +class PARQUET_EXPORT IntegerKeyIdRetriever : public DecryptionKeyRetriever { + public: + void PutKey(uint32_t key_id, const std::string& key); + std::string GetKey(const std::string& key_metadata) override; + + private: + std::map key_map_; +}; + +// Simple string key retriever +class PARQUET_EXPORT StringKeyIdRetriever : public DecryptionKeyRetriever { + public: + void PutKey(const std::string& key_id, const std::string& key); + std::string GetKey(const std::string& key_metadata) override; + + private: + std::map key_map_; +}; + +class PARQUET_EXPORT HiddenColumnException : public ParquetException { + public: + explicit HiddenColumnException(const std::string& columnPath) + : ParquetException(columnPath.c_str()) {} +}; + +class PARQUET_EXPORT KeyAccessDeniedException : public ParquetException { + public: + explicit KeyAccessDeniedException(const std::string& columnPath) + : ParquetException(columnPath.c_str()) {} +}; + +inline const uint8_t* str2bytes(const std::string& str) { + if (str.empty()) return NULLPTR; + + char* cbytes = const_cast(str.c_str()); + return reinterpret_cast(cbytes); +} + +class PARQUET_EXPORT ColumnEncryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + /// Convenience builder for encrypted columns. + explicit Builder(const std::string& name) : Builder(name, true) {} + + /// Convenience builder for encrypted columns. + explicit Builder(const std::shared_ptr& path) + : Builder(path->ToDotString(), true) {} + + /// Set a column-specific key. + /// If key is not set on an encrypted column, the column will + /// be encrypted with the footer key. + /// keyBytes Key length must be either 16, 24 or 32 bytes. + /// The key is cloned, and will be wiped out (array values set to 0) upon completion + /// of file writing. + /// Caller is responsible for wiping out the input key array. + Builder* key(std::string column_key); + + /// Set a key retrieval metadata. + /// use either key_metadata() or key_id(), not both + Builder* key_metadata(const std::string& key_metadata); + + /// A convenience function to set key metadata using a string id. + /// Set a key retrieval metadata (converted from String). + /// use either key_metadata() or key_id(), not both + /// key_id will be converted to metadata (UTF-8 array). + Builder* key_id(const std::string& key_id); + + std::shared_ptr build() { + return std::shared_ptr( + new ColumnEncryptionProperties(encrypted_, column_path_, key_, key_metadata_)); + } + + private: + const std::string column_path_; + bool encrypted_; + std::string key_; + std::string key_metadata_; + + Builder(const std::string path, bool encrypted) + : column_path_(path), encrypted_(encrypted) {} + }; + + std::string column_path() const { return column_path_; } + bool is_encrypted() const { return encrypted_; } + bool is_encrypted_with_footer_key() const { return encrypted_with_footer_key_; } + std::string key() const { return key_; } + std::string key_metadata() const { return key_metadata_; } + + /// Upon completion of file writing, the encryption key + /// will be wiped out. + void WipeOutEncryptionKey() { key_.clear(); } + + bool is_utilized() { + if (key_.empty()) + return false; // can re-use column properties without encryption keys + return utilized_; + } + + /// ColumnEncryptionProperties object can be used for writing one file only. + /// Mark ColumnEncryptionProperties as utilized once it is used in + /// FileEncryptionProperties as the encryption key will be wiped out upon + /// completion of file writing. + void set_utilized() { utilized_ = true; } + + std::shared_ptr DeepClone() { + std::string key_copy = key_; + return std::shared_ptr(new ColumnEncryptionProperties( + encrypted_, column_path_, key_copy, key_metadata_)); + } + + ColumnEncryptionProperties() = default; + ColumnEncryptionProperties(const ColumnEncryptionProperties& other) = default; + ColumnEncryptionProperties(ColumnEncryptionProperties&& other) = default; + + private: + const std::string column_path_; + bool encrypted_; + bool encrypted_with_footer_key_; + std::string key_; + std::string key_metadata_; + bool utilized_; + explicit ColumnEncryptionProperties(bool encrypted, const std::string& column_path, + const std::string& key, + const std::string& key_metadata); +}; + +class PARQUET_EXPORT ColumnDecryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + explicit Builder(const std::string& name) : column_path_(name) {} + + explicit Builder(const std::shared_ptr& path) + : Builder(path->ToDotString()) {} + + /// Set an explicit column key. If applied on a file that contains + /// key metadata for this column the metadata will be ignored, + /// the column will be decrypted with this key. + /// key length must be either 16, 24 or 32 bytes. + Builder* key(const std::string& key); + + std::shared_ptr build(); + + private: + const std::string column_path_; + std::string key_; + }; + + ColumnDecryptionProperties() = default; + ColumnDecryptionProperties(const ColumnDecryptionProperties& other) = default; + ColumnDecryptionProperties(ColumnDecryptionProperties&& other) = default; + + std::string column_path() const { return column_path_; } + std::string key() const { return key_; } + bool is_utilized() { return utilized_; } + + /// ColumnDecryptionProperties object can be used for reading one file only. + /// Mark ColumnDecryptionProperties as utilized once it is used in + /// FileDecryptionProperties as the encryption key will be wiped out upon + /// completion of file reading. + void set_utilized() { utilized_ = true; } + + /// Upon completion of file reading, the encryption key + /// will be wiped out. + void WipeOutDecryptionKey(); + + std::shared_ptr DeepClone(); + + private: + const std::string column_path_; + std::string key_; + bool utilized_; + + /// This class is only required for setting explicit column decryption keys - + /// to override key retriever (or to provide keys when key metadata and/or + /// key retriever are not available) + explicit ColumnDecryptionProperties(const std::string& column_path, + const std::string& key); +}; + +class PARQUET_EXPORT AADPrefixVerifier { + public: + /// Verifies identity (AAD Prefix) of individual file, + /// or of file collection in a data set. + /// Throws exception if an AAD prefix is wrong. + /// In a data set, AAD Prefixes should be collected, + /// and then checked for missing files. + virtual void Verify(const std::string& aad_prefix) = 0; + virtual ~AADPrefixVerifier() {} +}; + +class PARQUET_EXPORT FileDecryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + Builder() { + check_plaintext_footer_integrity_ = kDefaultCheckSignature; + plaintext_files_allowed_ = kDefaultAllowPlaintextFiles; + } + + /// Set an explicit footer key. If applied on a file that contains + /// footer key metadata the metadata will be ignored, the footer + /// will be decrypted/verified with this key. + /// If explicit key is not set, footer key will be fetched from + /// key retriever. + /// With explicit keys or AAD prefix, new encryption properties object must be + /// created for each encrypted file. + /// Explicit encryption keys (footer and column) are cloned. + /// Upon completion of file reading, the cloned encryption keys in the properties + /// will be wiped out (array values set to 0). + /// Caller is responsible for wiping out the input key array. + /// param footerKey Key length must be either 16, 24 or 32 bytes. + Builder* footer_key(const std::string footer_key); + + /// Set explicit column keys (decryption properties). + /// Its also possible to set a key retriever on this property object. + /// Upon file decryption, availability of explicit keys is checked before + /// invocation of the retriever callback. + /// If an explicit key is available for a footer or a column, + /// its key metadata will be ignored. + Builder* column_keys( + const ColumnPathToDecryptionPropertiesMap& column_decryption_properties); + + /// Set a key retriever callback. Its also possible to + /// set explicit footer or column keys on this file property object. + /// Upon file decryption, availability of explicit keys is checked before + /// invocation of the retriever callback. + /// If an explicit key is available for a footer or a column, + /// its key metadata will be ignored. + Builder* key_retriever(const std::shared_ptr& key_retriever); + + /// Skip integrity verification of plaintext footers. + /// If not called, integrity of plaintext footers will be checked in runtime, + /// and an exception will be thrown in the following situations: + /// - footer signing key is not available + /// (not passed, or not found by key retriever) + /// - footer content and signature don't match + Builder* disable_footer_signature_verification() { + check_plaintext_footer_integrity_ = false; + return this; + } + + /// Explicitly supply the file AAD prefix. + /// A must when a prefix is used for file encryption, but not stored in file. + /// If AAD prefix is stored in file, it will be compared to the explicitly + /// supplied value and an exception will be thrown if they differ. + Builder* aad_prefix(const std::string& aad_prefix); + + /// Set callback for verification of AAD Prefixes stored in file. + Builder* aad_prefix_verifier(std::shared_ptr aad_prefix_verifier); + + /// By default, reading plaintext (unencrypted) files is not + /// allowed when using a decryptor + /// - in order to detect files that were not encrypted by mistake. + /// However, the default behavior can be overridden by calling this method. + /// The caller should use then a different method to ensure encryption + /// of files with sensitive data. + Builder* plaintext_files_allowed() { + plaintext_files_allowed_ = true; + return this; + } + + std::shared_ptr build() { + return std::shared_ptr(new FileDecryptionProperties( + footer_key_, key_retriever_, check_plaintext_footer_integrity_, aad_prefix_, + aad_prefix_verifier_, column_decryption_properties_, plaintext_files_allowed_)); + } + + private: + std::string footer_key_; + std::string aad_prefix_; + std::shared_ptr aad_prefix_verifier_; + ColumnPathToDecryptionPropertiesMap column_decryption_properties_; + + std::shared_ptr key_retriever_; + bool check_plaintext_footer_integrity_; + bool plaintext_files_allowed_; + }; + + std::string column_key(const std::string& column_path) const; + + std::string footer_key() const { return footer_key_; } + + std::string aad_prefix() const { return aad_prefix_; } + + const std::shared_ptr& key_retriever() const { + return key_retriever_; + } + + bool check_plaintext_footer_integrity() const { + return check_plaintext_footer_integrity_; + } + + bool plaintext_files_allowed() const { return plaintext_files_allowed_; } + + const std::shared_ptr& aad_prefix_verifier() const { + return aad_prefix_verifier_; + } + + /// Upon completion of file reading, the encryption keys in the properties + /// will be wiped out (array values set to 0). + void WipeOutDecryptionKeys(); + + bool is_utilized(); + + /// FileDecryptionProperties object can be used for reading one file only. + /// Mark FileDecryptionProperties as utilized once it is used to read a file as the + /// encryption keys will be wiped out upon completion of file reading. + void set_utilized() { utilized_ = true; } + + /// FileDecryptionProperties object can be used for reading one file only. + /// (unless this object keeps the keyRetrieval callback only, and no explicit + /// keys or aadPrefix). + /// At the end, keys are wiped out in the memory. + /// This method allows to clone identical properties for another file, + /// with an option to update the aadPrefix (if newAadPrefix is null, + /// aadPrefix will be cloned too) + std::shared_ptr DeepClone(std::string new_aad_prefix = ""); + + private: + std::string footer_key_; + std::string aad_prefix_; + std::shared_ptr aad_prefix_verifier_; + + const std::string empty_string_ = ""; + ColumnPathToDecryptionPropertiesMap column_decryption_properties_; + + std::shared_ptr key_retriever_; + bool check_plaintext_footer_integrity_; + bool plaintext_files_allowed_; + bool utilized_; + + FileDecryptionProperties( + const std::string& footer_key, + std::shared_ptr key_retriever, + bool check_plaintext_footer_integrity, const std::string& aad_prefix, + std::shared_ptr aad_prefix_verifier, + const ColumnPathToDecryptionPropertiesMap& column_decryption_properties, + bool plaintext_files_allowed); +}; + +class PARQUET_EXPORT FileEncryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + explicit Builder(const std::string& footer_key) + : parquet_cipher_(kDefaultEncryptionAlgorithm), + encrypted_footer_(kDefaultEncryptedFooter) { + footer_key_ = footer_key; + store_aad_prefix_in_file_ = false; + } + + /// Create files with plaintext footer. + /// If not called, the files will be created with encrypted footer (default). + Builder* set_plaintext_footer() { + encrypted_footer_ = false; + return this; + } + + /// Set encryption algorithm. + /// If not called, files will be encrypted with AES_GCM_V1 (default). + Builder* algorithm(ParquetCipher::type parquet_cipher) { + parquet_cipher_ = parquet_cipher; + return this; + } + + /// Set a key retrieval metadata (converted from String). + /// use either footer_key_metadata or footer_key_id, not both. + Builder* footer_key_id(const std::string& key_id); + + /// Set a key retrieval metadata. + /// use either footer_key_metadata or footer_key_id, not both. + Builder* footer_key_metadata(const std::string& footer_key_metadata); + + /// Set the file AAD Prefix. + Builder* aad_prefix(const std::string& aad_prefix); + + /// Skip storing AAD Prefix in file. + /// If not called, and if AAD Prefix is set, it will be stored. + Builder* disable_aad_prefix_storage(); + + /// Set the list of encrypted columns and their properties (keys etc). + /// If not called, all columns will be encrypted with the footer key. + /// If called, the file columns not in the list will be left unencrypted. + Builder* encrypted_columns( + const ColumnPathToEncryptionPropertiesMap& encrypted_columns); + + std::shared_ptr build() { + return std::shared_ptr(new FileEncryptionProperties( + parquet_cipher_, footer_key_, footer_key_metadata_, encrypted_footer_, + aad_prefix_, store_aad_prefix_in_file_, encrypted_columns_)); + } + + private: + ParquetCipher::type parquet_cipher_; + bool encrypted_footer_; + std::string footer_key_; + std::string footer_key_metadata_; + + std::string aad_prefix_; + bool store_aad_prefix_in_file_; + ColumnPathToEncryptionPropertiesMap encrypted_columns_; + }; + bool encrypted_footer() const { return encrypted_footer_; } + + EncryptionAlgorithm algorithm() const { return algorithm_; } + + std::string footer_key() const { return footer_key_; } + + std::string footer_key_metadata() const { return footer_key_metadata_; } + + std::string file_aad() const { return file_aad_; } + + std::shared_ptr column_encryption_properties( + const std::string& column_path); + + bool is_utilized() const { return utilized_; } + + /// FileEncryptionProperties object can be used for writing one file only. + /// Mark FileEncryptionProperties as utilized once it is used to write a file as the + /// encryption keys will be wiped out upon completion of file writing. + void set_utilized() { utilized_ = true; } + + /// Upon completion of file writing, the encryption keys + /// will be wiped out (array values set to 0). + void WipeOutEncryptionKeys(); + + /// FileEncryptionProperties object can be used for writing one file only. + /// (at the end, keys are wiped out in the memory). + /// This method allows to clone identical properties for another file, + /// with an option to update the aadPrefix (if newAadPrefix is null, + /// aadPrefix will be cloned too) + std::shared_ptr DeepClone(std::string new_aad_prefix = ""); + + ColumnPathToEncryptionPropertiesMap encrypted_columns() const { + return encrypted_columns_; + } + + private: + EncryptionAlgorithm algorithm_; + std::string footer_key_; + std::string footer_key_metadata_; + bool encrypted_footer_; + std::string file_aad_; + std::string aad_prefix_; + bool utilized_; + bool store_aad_prefix_in_file_; + ColumnPathToEncryptionPropertiesMap encrypted_columns_; + + FileEncryptionProperties(ParquetCipher::type cipher, const std::string& footer_key, + const std::string& footer_key_metadata, bool encrypted_footer, + const std::string& aad_prefix, bool store_aad_prefix_in_file, + const ColumnPathToEncryptionPropertiesMap& encrypted_columns); +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h new file mode 100644 index 0000000000000000000000000000000000000000..83f028a4bc1e9e0d24e21e7acfb785af0e5b37f7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +/// Stores encryption key material outside the Parquet file, for example in a separate +/// small file in the same folder. This is important for “key rotation”, when MEKs have to +/// be changed (if compromised; or periodically, just in case) - without modifying the +/// Parquet files (often immutable). +class PARQUET_EXPORT FileKeyMaterialStore { + public: + /// Add key material for one encryption key. + virtual void AddKeyMaterial(std::string key_id_in_file, std::string key_material) = 0; + + /// Get key material + virtual std::string GetKeyMaterial(std::string key_id_in_file) = 0; + + /// After key material was added for all keys in the given Parquet file, + /// save material in persistent store. + virtual void SaveMaterial() = 0; + + /// Remove key material from persistent store. Used in key rotation. + virtual void RemoveMaterial() = 0; + + /// Move key material to another store. Used in key rotation. + virtual void MoveMaterialTo(std::shared_ptr target_key_store) = 0; + + /// Returns the Set of all key IDs in this store (for the given Parquet file) + virtual std::vector GetKeyIDSet() = 0; + + virtual ~FileKeyMaterialStore() {} +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..26b9719de64dbafe39123c214d74f35301f8713b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/concurrent_map.h" + +#include "parquet/encryption/file_key_material_store.h" +#include "parquet/encryption/key_encryption_key.h" +#include "parquet/encryption/key_toolkit.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +// This class will generate "key metadata" from "data encryption key" and "master key", +// following these steps: +// 1. Wrap "data encryption key". There are 2 modes: +// 1.1. single wrapping: encrypt "data encryption key" directly with "master encryption +// key" +// 1.2. double wrapping: 2 steps: +// 1.2.1. "key encryption key" is randomized (see KeyEncryptionKey class) +// 1.2.2. "data encryption key" is encrypted with the above "key encryption key" +// 2. Create "key material" (see structure in KeyMaterial class) +// 3. Create "key metadata" with "key material" inside or a reference to outside "key +// material" (see structure in KeyMetadata class). +class PARQUET_EXPORT FileKeyWrapper { + public: + static constexpr int kKeyEncryptionKeyLength = 16; + static constexpr int kKeyEncryptionKeyIdLength = 16; + + /// key_toolkit and kms_connection_config is to get KmsClient from the cache or create + /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of + /// KmsClient in the cache. key_material_store is to store "key material" outside + /// parquet file, NULL if "key material" is stored inside parquet file. + FileKeyWrapper(KeyToolkit* key_toolkit, + const KmsConnectionConfig& kms_connection_config, + std::shared_ptr key_material_store, + double cache_entry_lifetime_seconds, bool double_wrapping); + + /// Creates key_metadata field for a given data key, via wrapping the key with the + /// master key. + /// When external key material is used, an identifier is usually generated automatically + /// but may be specified explicitly to support key rotation, + /// which requires keeping the same identifiers. + std::string GetEncryptionKeyMetadata(const std::string& data_key, + const std::string& master_key_id, + bool is_footer_key, + std::string key_id_in_file = ""); + + private: + KeyEncryptionKey CreateKeyEncryptionKey(const std::string& master_key_id); + + /// A map of Master Encryption Key ID -> KeyEncryptionKey, for the current token + std::shared_ptr<::arrow::util::ConcurrentMap> + kek_per_master_key_id_; + + std::shared_ptr kms_client_; + KmsConnectionConfig kms_connection_config_; + std::shared_ptr key_material_store_; + const double cache_entry_lifetime_seconds_; + const bool double_wrapping_; + uint16_t key_counter_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h new file mode 100644 index 0000000000000000000000000000000000000000..896a53202f589158ae684aa5df9c1f69cae86b28 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" + +#include "parquet/encryption/file_key_material_store.h" + +namespace parquet::encryption { + +/// A FileKeyMaterialStore that stores key material in a file system file in the same +/// folder as the Parquet file. +class PARQUET_EXPORT FileSystemKeyMaterialStore : public FileKeyMaterialStore { + public: + static constexpr const char kKeyMaterialFilePrefix[] = "_KEY_MATERIAL_FOR_"; + static constexpr const char kTempFilePrefix[] = "_TMP"; + static constexpr const char kKeyMaterialFileSuffix[] = ".json"; + + FileSystemKeyMaterialStore() {} + FileSystemKeyMaterialStore(const std::string& key_material_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system); + + /// Creates a new file system key material store for a parquet file. + /// When use_tmp_prefix is true, files are saved with an extra _TMP prefix so they don't + /// conflict with existing external material files. This is useful during key rotation + /// so that temporary key material files can be created while using the existing key + /// material, before moving the key material to the non-temporary location. + static std::shared_ptr Make( + const std::string& parquet_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system, bool use_tmp_prefix); + + /// Add key material for one encryption key. + void AddKeyMaterial(std::string key_id_in_file, std::string key_material) { + key_material_map_.insert({key_id_in_file, key_material}); + } + + /// Get key material + std::string GetKeyMaterial(std::string key_id_in_file) { + if (key_material_map_.empty()) { + LoadKeyMaterialMap(); + } + auto found = key_material_map_.find(key_id_in_file); + return found->second; + } + + /// After key material was added for all keys in the given Parquet file, + /// save material in persistent store. + void SaveMaterial(); + + /// Remove key material from persistent store. Used in key rotation. + void RemoveMaterial(); + + /// Move key material to another store. Used in key rotation. + void MoveMaterialTo(std::shared_ptr target_key_store); + + /// Returns the Set of all key IDs in this store (for the given Parquet file) + std::vector GetKeyIDSet(); + + private: + std::string GetStorageFilePath() { return key_material_file_path_; } + + std::string BuildKeyMaterialMapJson(); + void LoadKeyMaterialMap(); + std::string key_material_file_path_; + std::shared_ptr<::arrow::fs::FileSystem> file_system_; + /// Maps ID of a key in Parquet file and key material + std::unordered_map key_material_map_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h new file mode 100644 index 0000000000000000000000000000000000000000..62263ee3cd5062ece20ac0f79b89d3cf0312f360 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/base64.h" + +namespace parquet::encryption { + +// In the double wrapping mode, each "data encryption key" (DEK) is encrypted with a “key +// encryption key” (KEK), that in turn is encrypted with a "master encryption key" (MEK). +// In a writer process, a random KEK is generated for each MEK ID, and cached in a map. This allows to perform an interaction with a KMS server only once for each +// MEK, in order to wrap its KEK. "Data encryption key" (DEK) wrapping is performed +// locally, and does not involve an interaction with a KMS server. +class KeyEncryptionKey { + public: + KeyEncryptionKey(std::string kek_bytes, std::string kek_id, + std::string encoded_wrapped_kek) + : kek_bytes_(std::move(kek_bytes)), + kek_id_(std::move(kek_id)), + encoded_kek_id_(::arrow::util::base64_encode(kek_id_)), + encoded_wrapped_kek_(std::move(encoded_wrapped_kek)) {} + + const std::string& kek_bytes() const { return kek_bytes_; } + + const std::string& kek_id() const { return kek_id_; } + + const std::string& encoded_kek_id() const { return encoded_kek_id_; } + + const std::string& encoded_wrapped_kek() const { return encoded_wrapped_kek_; } + + private: + std::string kek_bytes_; + std::string kek_id_; + std::string encoded_kek_id_; + std::string encoded_wrapped_kek_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..6fe8ac7ccb9db3fb92da42064f9fe2aeabdbfb52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "parquet/encryption/key_material.h" +#include "parquet/exception.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +// Parquet encryption specification defines "key metadata" as an arbitrary byte array, +// generated by file writers for each encryption key, and passed to the low level API for +// storage in the file footer. The "key metadata" field is made available to file readers +// to enable recovery of the key. This interface can be utilized for implementation +// of any key management scheme. +// +// The keytools package (PARQUET-1373) implements one approach, of many possible, to key +// management and to generation of the "key metadata" fields. This approach, based on the +// "envelope encryption" pattern, allows integration with KMS servers. It keeps the actual +// material, required to recover a key, in a "key material" object (see the KeyMaterial +// class for details). This class is implemented to support version 1 of the parquet key +// management tools specification. +// +// KeyMetadata writes (and reads) the "key metadata" field as a flat json object, +// with the following fields: +// 1. "keyMaterialType" - a String, with the type of key material. +// 2. "internalStorage" - a boolean. If true, means that "key material" is kept inside the +// "key metadata" field. If false, "key material" is kept externally (outside Parquet +// files) - in this case, "key metadata" keeps a reference to the external "key material". +// 3. "keyReference" - a String, with the reference to the external "key material". +// Written only if internalStorage is false. +// +// If internalStorage is true, "key material" is a part of "key metadata", and the json +// keeps additional fields, described in the KeyMaterial class. +class PARQUET_EXPORT KeyMetadata { + public: + static constexpr const char kKeyMaterialInternalStorageField[] = "internalStorage"; + static constexpr const char kKeyReferenceField[] = "keyReference"; + + /// key_metadata_bytes is the key metadata field stored in the parquet file, + /// in the serialized json object format. + static KeyMetadata Parse(const std::string& key_metadata_bytes); + + static std::string CreateSerializedForExternalMaterial( + const std::string& key_reference); + + bool key_material_stored_internally() const { return is_internal_storage_; } + + const KeyMaterial& key_material() const { + if (!is_internal_storage_) { + throw ParquetException("key material is stored externally."); + } + return ::std::get(key_material_or_reference_); + } + + const std::string& key_reference() const { + if (is_internal_storage_) { + throw ParquetException("key material is stored internally."); + } + return ::std::get(key_material_or_reference_); + } + + private: + explicit KeyMetadata(const KeyMaterial& key_material); + explicit KeyMetadata(const std::string& key_reference); + + bool is_internal_storage_; + /// If is_internal_storage_ is true, KeyMaterial is set, + /// else a string referencing to an outside "key material" is set. + ::std::variant key_material_or_reference_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h new file mode 100644 index 0000000000000000000000000000000000000000..f63ade4c8c93f74f99f69baca9f626d5ba15c1e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "parquet/encryption/key_encryption_key.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/encryption/kms_client_factory.h" +#include "parquet/encryption/two_level_cache_with_expiration.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +static constexpr uint64_t kCacheCleanPeriodForKeyRotation = 60 * 60; // 1 hour + +// KeyToolkit is a utility that keeps various tools for key management (such as key +// rotation, kms client instantiation, cache control, etc), plus a number of auxiliary +// classes for internal use. +class PARQUET_EXPORT KeyToolkit { + public: + KeyToolkit() { last_cache_clean_for_key_rotation_time_ = {}; } + + /// KMS client two level cache: token -> KMSInstanceId -> KmsClient + TwoLevelCacheWithExpiration>& kms_client_cache_per_token() { + return kms_client_cache_; + } + /// Key encryption key two level cache for wrapping: token -> MasterEncryptionKeyId -> + /// KeyEncryptionKey + TwoLevelCacheWithExpiration& kek_write_cache_per_token() { + return key_encryption_key_write_cache_; + } + + /// Key encryption key two level cache for unwrapping: token -> KeyEncryptionKeyId -> + /// KeyEncryptionKeyBytes + TwoLevelCacheWithExpiration& kek_read_cache_per_token() { + return key_encryption_key_read_cache_; + } + + std::shared_ptr GetKmsClient( + const KmsConnectionConfig& kms_connection_config, double cache_entry_lifetime_ms); + + /// Flush any caches that are tied to the (compromised) access_token + void RemoveCacheEntriesForToken(const std::string& access_token); + + void RemoveCacheEntriesForAllTokens(); + + void RegisterKmsClientFactory(std::shared_ptr kms_client_factory) { + if (kms_client_factory_ != NULL) { + throw ParquetException("KMS client factory has already been registered."); + } + kms_client_factory_ = kms_client_factory; + } + + /// Key rotation. In the single wrapping mode, decrypts data keys with old master keys, + /// then encrypts them with new master keys. In the double wrapping mode, decrypts KEKs + /// (key encryption keys) with old master keys, generates new KEKs and encrypts them + /// with new master keys. Works only if key material is not stored internally in file + /// footers. Not supported in local key wrapping mode. Method can be run by multiple + /// threads, but each thread must work on different files. + void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config, + const std::string& parquet_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system, + bool double_wrapping, double cache_lifetime_seconds); + + private: + TwoLevelCacheWithExpiration> kms_client_cache_; + TwoLevelCacheWithExpiration key_encryption_key_write_cache_; + TwoLevelCacheWithExpiration key_encryption_key_read_cache_; + std::shared_ptr kms_client_factory_; + mutable ::arrow::util::Mutex last_cache_clean_for_key_rotation_time_mutex_; + internal::TimePoint last_cache_clean_for_key_rotation_time_; +}; + +// "data encryption key" and "master key identifier" are paired together as output when +// parsing from "key material" +class PARQUET_EXPORT KeyWithMasterId { + public: + KeyWithMasterId(std::string key_bytes, std::string master_id) + : key_bytes_(std::move(key_bytes)), master_id_(std::move(master_id)) {} + + const std::string& data_key() const { return key_bytes_; } + const std::string& master_id() const { return master_id_; } + + private: + const std::string key_bytes_; + const std::string master_id_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h new file mode 100644 index 0000000000000000000000000000000000000000..a55fd552eed5fcc1be5316bd47b4114592f888b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/mutex.h" + +#include "parquet/exception.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +/// This class wraps the key access token of a KMS server. If your token changes over +/// time, you should keep the reference to the KeyAccessToken object and call Refresh() +/// method every time you have a new token. +class PARQUET_EXPORT KeyAccessToken { + public: + KeyAccessToken() = default; + + explicit KeyAccessToken(const std::string value) : value_(value) {} + + void Refresh(const std::string& new_value) { + auto lock = mutex_.Lock(); + value_ = new_value; + } + + const std::string& value() const { + auto lock = mutex_.Lock(); + return value_; + } + + private: + std::string value_; + mutable ::arrow::util::Mutex mutex_; +}; + +struct PARQUET_EXPORT KmsConnectionConfig { + std::string kms_instance_id; + std::string kms_instance_url; + /// If the access token is changed in the future, you should keep a reference to + /// this object and call Refresh() on it whenever there is a new access token. + std::shared_ptr refreshable_key_access_token; + std::unordered_map custom_kms_conf; + + KmsConnectionConfig(); + + const std::string& key_access_token() const { + if (refreshable_key_access_token == NULL || + refreshable_key_access_token->value().empty()) { + throw ParquetException("key access token is not set!"); + } + return refreshable_key_access_token->value(); + } + + void SetDefaultIfEmpty(); +}; + +class PARQUET_EXPORT KmsClient { + public: + static constexpr const char kKmsInstanceIdDefault[] = "DEFAULT"; + static constexpr const char kKmsInstanceUrlDefault[] = "DEFAULT"; + static constexpr const char kKeyAccessTokenDefault[] = "DEFAULT"; + + /// Wraps a key - encrypts it with the master key, encodes the result + /// and potentially adds a KMS-specific metadata. + virtual std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) = 0; + + /// Decrypts (unwraps) a key with the master key. + virtual std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) = 0; + virtual ~KmsClient() {} +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..7a7c77c7eebbfbb687575acb12b89c1c2e99461a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "parquet/encryption/kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +class PARQUET_EXPORT KmsClientFactory { + public: + explicit KmsClientFactory(bool wrap_locally = false) : wrap_locally_(wrap_locally) {} + + virtual ~KmsClientFactory() = default; + + virtual std::shared_ptr CreateKmsClient( + const KmsConnectionConfig& kms_connection_config) = 0; + + protected: + bool wrap_locally_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h new file mode 100644 index 0000000000000000000000000000000000000000..3c90d82960525bf10c0dc23ea6a2c96c78104fea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/concurrent_map.h" + +#include "parquet/encryption/kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +/// This class supports local wrapping mode, master keys will be fetched from the KMS +/// server and used to encrypt other keys (data encryption keys or key encryption keys). +class PARQUET_EXPORT LocalWrapKmsClient : public KmsClient { + public: + static constexpr const char kLocalWrapNoKeyVersion[] = "NO_VERSION"; + + explicit LocalWrapKmsClient(const KmsConnectionConfig& kms_connection_config); + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + protected: + /// Get master key from the remote KMS server. + /// Note: this function might be called by multiple threads + virtual std::string GetMasterKeyFromServer( + const std::string& master_key_identifier) = 0; + + private: + /// KMS systems wrap keys by encrypting them by master keys, and attaching additional + /// information (such as the version number of the masker key) to the result of + /// encryption. The master key version is required in key rotation. Currently, the + /// local wrapping mode does not support key rotation (because not all KMS systems allow + /// to fetch a master key by its ID and version number). Still, the local wrapping mode + /// adds a placeholder for the master key version, that will enable support for key + /// rotation in this mode in the future, with appropriate KMS systems. This will also + /// enable backward compatibility, where future readers will be able to extract master + /// key version in the files written by the current code. + /// + /// LocalKeyWrap class writes (and reads) the "key wrap" as a flat json with the + /// following fields: + /// 1. "masterKeyVersion" - a String, with the master key version. In the current + /// version, only one value is allowed - "NO_VERSION". + /// 2. "encryptedKey" - a String, with the key encrypted by the master key + /// (base64-encoded). + class LocalKeyWrap { + public: + static constexpr const char kLocalWrapKeyVersionField[] = "masterKeyVersion"; + static constexpr const char kLocalWrapEncryptedKeyField[] = "encryptedKey"; + + LocalKeyWrap(std::string master_key_version, std::string encrypted_encoded_key); + + static std::string CreateSerialized(const std::string& encrypted_encoded_key); + + static LocalKeyWrap Parse(const std::string& wrapped_key); + + const std::string& master_key_version() const { return master_key_version_; } + + const std::string& encrypted_encoded_key() const { return encrypted_encoded_key_; } + + private: + std::string encrypted_encoded_key_; + std::string master_key_version_; + }; + + std::string GetKeyFromServer(const std::string& key_identifier); + + protected: + KmsConnectionConfig kms_connection_config_; + ::arrow::util::ConcurrentMap master_key_cache_; +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h new file mode 100644 index 0000000000000000000000000000000000000000..9bfc774278dde9ac42699339fb1a056e3fd14a70 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module defines an abstract interface for iterating through pages in a +// Parquet column chunk within a row group. It could be extended in the future +// to iterate through all data pages in all chunks in a file. + +#pragma once + +#include +#include +#include + +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/filesystem/localfs.h" +#include "arrow/status.h" +#include "arrow/util/io_util.h" + +#include "parquet/encryption/encryption.h" +#include "parquet/test_util.h" + +namespace parquet { +class ParquetFileReader; +namespace encryption::test { + +using ::arrow::internal::TemporaryDir; + +constexpr int kFixedLength = 10; + +const char kFooterEncryptionKey[] = "0123456789012345"; // 128bit/16 +const char kColumnEncryptionKey1[] = "1234567890123450"; +const char kColumnEncryptionKey2[] = "1234567890123451"; +const char kFileName[] = "tester"; + +// Get the path of file inside parquet test data directory +std::string data_file(const char* file); + +// A temporary directory that contains the encrypted files generated in the tests. +extern std::unique_ptr temp_dir; + +inline ::arrow::Result> temp_data_dir() { + return TemporaryDir::Make("parquet-encryption-test-"); +} + +const char kDoubleFieldName[] = "double_field"; +const char kFloatFieldName[] = "float_field"; +const char kBooleanFieldName[] = "boolean_field"; +const char kInt32FieldName[] = "int32_field"; +const char kInt64FieldName[] = "int64_field"; +const char kInt96FieldName[] = "int96_field"; +const char kByteArrayFieldName[] = "ba_field"; +const char kFixedLenByteArrayFieldName[] = "flba_field"; + +const char kFooterMasterKey[] = "0123456789012345"; +const char kFooterMasterKeyId[] = "kf"; +const char* const kColumnMasterKeys[] = {"1234567890123450", "1234567890123451", + "1234567890123452", "1234567890123453", + "1234567890123454", "1234567890123455"}; +const char* const kColumnMasterKeyIds[] = {"kc1", "kc2", "kc3", "kc4", "kc5", "kc6"}; + +// New master key values used to simulate key rotation +const char kNewFooterMasterKey[] = "9123456789012345"; +const char* const kNewColumnMasterKeys[] = {"9234567890123450", "9234567890123451", + "9234567890123452", "9234567890123453", + "9234567890123454", "9234567890123455"}; + +// The result of this function will be used to set into TestOnlyInMemoryKmsClientFactory +// as the key mapping to look at. +std::unordered_map BuildKeyMap(const char* const* column_ids, + const char* const* column_keys, + const char* footer_id, + const char* footer_key); + +// The result of this function will be used to set into EncryptionConfiguration +// as column keys. +std::string BuildColumnKeyMapping(); + +// FileEncryptor and FileDecryptor are helper classes to write/read an encrypted parquet +// file corresponding to each pair of FileEncryptionProperties/FileDecryptionProperties. +// FileEncryptor writes the file with fixed data values and FileDecryptor reads the file +// and verify the correctness of data values. +class FileEncryptor { + public: + FileEncryptor(); + + void EncryptFile( + std::string file, + std::shared_ptr encryption_configurations); + + private: + std::shared_ptr SetupEncryptionSchema(); + + int num_rowgroups_ = 5; + int rows_per_rowgroup_ = 50; + std::shared_ptr schema_; +}; + +class FileDecryptor { + public: + void DecryptFile( + const std::string& file_name, + const std::shared_ptr& file_decryption_properties); + void DecryptPageIndex( + const std::string& file_name, + const std::shared_ptr& file_decryption_properties); + + private: + void CheckFile( + parquet::ParquetFileReader* file_reader, + const std::shared_ptr& file_decryption_properties); + void CheckPageIndex( + parquet::ParquetFileReader* file_reader, + const std::shared_ptr& file_decryption_properties); +}; + +} // namespace encryption::test +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h new file mode 100644 index 0000000000000000000000000000000000000000..c5fdc797b8ca78a7eddbbdd57dc5a56cb8745526 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/base64.h" + +#include "parquet/encryption/kms_client_factory.h" +#include "parquet/encryption/local_wrap_kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +// This is a mock class, built for testing only. Don't use it as an example of +// LocalWrapKmsClient implementation. +class TestOnlyLocalWrapInMemoryKms : public LocalWrapKmsClient { + public: + explicit TestOnlyLocalWrapInMemoryKms(const KmsConnectionConfig& kms_connection_config); + + static void InitializeMasterKeys( + const std::unordered_map& master_keys_map); + + protected: + std::string GetMasterKeyFromServer(const std::string& master_key_identifier) override; + + private: + static std::unordered_map master_key_map_; +}; + +// This is a mock class, built for testing only. Don't use it as an example of KmsClient +// implementation. +class TestOnlyInServerWrapKms : public KmsClient { + public: + static void InitializeMasterKeys( + const std::unordered_map& master_keys_map); + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + static void StartKeyRotation( + const std::unordered_map& new_master_keys_map); + static void FinishKeyRotation(); + + private: + std::string GetMasterKeyFromServer(const std::string& master_key_identifier); + + // Different wrapping and unwrapping key maps to imitate versioning + // and support key rotation. + static std::unordered_map unwrapping_master_key_map_; + static std::unordered_map wrapping_master_key_map_; +}; + +// This is a mock class, built for testing only. Don't use it as an example of +// KmsClientFactory implementation. +class TestOnlyInMemoryKmsClientFactory : public KmsClientFactory { + public: + TestOnlyInMemoryKmsClientFactory( + bool wrap_locally, + const std::unordered_map& master_keys_map) + : KmsClientFactory(wrap_locally) { + TestOnlyLocalWrapInMemoryKms::InitializeMasterKeys(master_keys_map); + TestOnlyInServerWrapKms::InitializeMasterKeys(master_keys_map); + } + + std::shared_ptr CreateKmsClient( + const KmsConnectionConfig& kms_connection_config) { + if (wrap_locally_) { + return std::make_shared(kms_connection_config); + } else { + return std::make_shared(); + } + } +}; + +} // namespace parquet::encryption diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..623811718482c591e708a297dff9eb35ae0c85a9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace parquet { + +class Decryptor; +class Encryptor; + +class InternalFileDecryptor; +class InternalFileEncryptor; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h new file mode 100644 index 0000000000000000000000000000000000000000..826f5bdc8bf73741ac37d457d6013dfc8d0fb5a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h @@ -0,0 +1,158 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/string_builder.h" +#include "parquet/platform.h" + +// PARQUET-1085 +#if !defined(ARROW_UNUSED) +#define ARROW_UNUSED(x) UNUSED(x) +#endif + +// Parquet exception to Arrow Status + +#define BEGIN_PARQUET_CATCH_EXCEPTIONS try { +#define END_PARQUET_CATCH_EXCEPTIONS \ + } \ + catch (const ::parquet::ParquetStatusException& e) { \ + return e.status(); \ + } \ + catch (const ::parquet::ParquetException& e) { \ + return ::arrow::Status::IOError(e.what()); \ + } + +// clang-format off + +#define PARQUET_CATCH_NOT_OK(s) \ + BEGIN_PARQUET_CATCH_EXCEPTIONS \ + (s); \ + END_PARQUET_CATCH_EXCEPTIONS + +// clang-format on + +#define PARQUET_CATCH_AND_RETURN(s) \ + BEGIN_PARQUET_CATCH_EXCEPTIONS \ + return (s); \ + END_PARQUET_CATCH_EXCEPTIONS + +// Arrow Status to Parquet exception + +#define PARQUET_IGNORE_NOT_OK(s) \ + do { \ + ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \ + ARROW_UNUSED(_s); \ + } while (0) + +#define PARQUET_THROW_NOT_OK(s) \ + do { \ + ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \ + if (!_s.ok()) { \ + throw ::parquet::ParquetStatusException(std::move(_s)); \ + } \ + } while (0) + +#define PARQUET_ASSIGN_OR_THROW_IMPL(status_name, lhs, rexpr) \ + auto status_name = (rexpr); \ + PARQUET_THROW_NOT_OK(status_name.status()); \ + lhs = std::move(status_name).ValueOrDie(); + +#define PARQUET_ASSIGN_OR_THROW(lhs, rexpr) \ + PARQUET_ASSIGN_OR_THROW_IMPL(ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \ + lhs, rexpr); + +namespace parquet { + +class ParquetException : public std::exception { + public: + PARQUET_NORETURN static void EofException(const std::string& msg = "") { + static std::string prefix = "Unexpected end of stream"; + if (msg.empty()) { + throw ParquetException(prefix); + } + throw ParquetException(prefix, ": ", msg); + } + + PARQUET_NORETURN static void NYI(const std::string& msg = "") { + throw ParquetException("Not yet implemented: ", msg, "."); + } + + template + explicit ParquetException(Args&&... args) + : msg_(::arrow::util::StringBuilder(std::forward(args)...)) {} + + explicit ParquetException(std::string msg) : msg_(std::move(msg)) {} + + explicit ParquetException(const char* msg, const std::exception&) : msg_(msg) {} + + ParquetException(const ParquetException&) = default; + ParquetException& operator=(const ParquetException&) = default; + ParquetException(ParquetException&&) = default; + ParquetException& operator=(ParquetException&&) = default; + + const char* what() const noexcept override { return msg_.c_str(); } + + private: + std::string msg_; +}; + +// Support printing a ParquetException. +// This is needed for clang-on-MSVC as there operator<< is not defined for +// std::exception. +PARQUET_EXPORT +std::ostream& operator<<(std::ostream& os, const ParquetException& exception); + +class ParquetStatusException : public ParquetException { + public: + explicit ParquetStatusException(::arrow::Status status) + : ParquetException(status.ToString()), status_(std::move(status)) {} + + const ::arrow::Status& status() const { return status_; } + + private: + ::arrow::Status status_; +}; + +// This class exists for the purpose of detecting an invalid or corrupted file. +class ParquetInvalidOrCorruptedFileException : public ParquetStatusException { + public: + ParquetInvalidOrCorruptedFileException(const ParquetInvalidOrCorruptedFileException&) = + default; + + template ::value, + int>::type = 0, + typename... Args> + explicit ParquetInvalidOrCorruptedFileException(Arg arg, Args&&... args) + : ParquetStatusException(::arrow::Status::Invalid(std::forward(arg), + std::forward(args)...)) {} +}; + +template +void ThrowNotOk(StatusReturnBlock&& b) { + PARQUET_THROW_NOT_OK(b()); +} + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..b59b59f95c2d8766f216a9cd923847d5483de4ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h @@ -0,0 +1,231 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/util/type_fwd.h" +#include "parquet/metadata.h" // IWYU pragma: keep +#include "parquet/platform.h" +#include "parquet/properties.h" + +namespace parquet { + +class ColumnReader; +class FileMetaData; +class PageIndexReader; +class BloomFilterReader; +class PageReader; +class RowGroupMetaData; + +namespace internal { +class RecordReader; +} + +class PARQUET_EXPORT RowGroupReader { + public: + // Forward declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct Contents { + virtual ~Contents() {} + virtual std::unique_ptr GetColumnPageReader(int i) = 0; + virtual const RowGroupMetaData* metadata() const = 0; + virtual const ReaderProperties* properties() const = 0; + }; + + explicit RowGroupReader(std::unique_ptr contents); + + // Returns the rowgroup metadata + const RowGroupMetaData* metadata() const; + + // Construct a ColumnReader for the indicated row group-relative + // column. Ownership is shared with the RowGroupReader. + std::shared_ptr Column(int i); + + // EXPERIMENTAL: Construct a RecordReader for the indicated column of the row group. + // Ownership is shared with the RowGroupReader. + std::shared_ptr RecordReader(int i, + bool read_dictionary = false); + + // Construct a ColumnReader, trying to enable exposed encoding. + // + // For dictionary encoding, currently we only support column chunks that are fully + // dictionary encoded, i.e., all data pages in the column chunk are dictionary encoded. + // If a column chunk uses dictionary encoding but then falls back to plain encoding, the + // encoding will not be exposed. + // + // The returned column reader provides an API GetExposedEncoding() for the + // users to check the exposed encoding and determine how to read the batches. + // + // \note API EXPERIMENTAL + std::shared_ptr ColumnWithExposeEncoding( + int i, ExposedEncoding encoding_to_expose); + + // Construct a RecordReader, trying to enable exposed encoding. + // + // For dictionary encoding, currently we only support column chunks that are + // fully dictionary encoded byte arrays. The caller should verify if the reader can read + // and expose the dictionary by checking the reader's read_dictionary(). If a column + // chunk uses dictionary encoding but then falls back to plain encoding, the returned + // reader will read decoded data without exposing the dictionary. + // + // \note API EXPERIMENTAL + std::shared_ptr RecordReaderWithExposeEncoding( + int i, ExposedEncoding encoding_to_expose); + + std::unique_ptr GetColumnPageReader(int i); + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; +}; + +class PARQUET_EXPORT ParquetFileReader { + public: + // Declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct PARQUET_EXPORT Contents { + static std::unique_ptr Open( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + static ::arrow::Future> OpenAsync( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + virtual ~Contents() = default; + // Perform any cleanup associated with the file contents + virtual void Close() = 0; + virtual std::shared_ptr GetRowGroup(int i) = 0; + virtual std::shared_ptr metadata() const = 0; + virtual std::shared_ptr GetPageIndexReader() = 0; + virtual BloomFilterReader& GetBloomFilterReader() = 0; + }; + + ParquetFileReader(); + ~ParquetFileReader(); + + // Create a file reader instance from an Arrow file object. Thread-safety is + // the responsibility of the file implementation + static std::unique_ptr Open( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + // API Convenience to open a serialized Parquet file on disk, using Arrow IO + // interfaces. + static std::unique_ptr OpenFile( + const std::string& path, bool memory_map = false, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + // Asynchronously open a file reader from an Arrow file object. + // Does not throw - all errors are reported through the Future. + static ::arrow::Future> OpenAsync( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + void Open(std::unique_ptr contents); + void Close(); + + // The RowGroupReader is owned by the FileReader + std::shared_ptr RowGroup(int i); + + // Returns the file metadata. Only one instance is ever created + std::shared_ptr metadata() const; + + /// Returns the PageIndexReader. Only one instance is ever created. + /// + /// If the file does not have the page index, nullptr may be returned. + /// Because it pays to check existence of page index in the file, it + /// is possible to return a non null value even if page index does + /// not exist. It is the caller's responsibility to check the return + /// value and follow-up calls to PageIndexReader. + /// + /// WARNING: The returned PageIndexReader must not outlive the ParquetFileReader. + /// Initialize GetPageIndexReader() is not thread-safety. + std::shared_ptr GetPageIndexReader(); + + /// Returns the BloomFilterReader. Only one instance is ever created. + /// + /// WARNING: The returned BloomFilterReader must not outlive the ParquetFileReader. + /// Initialize GetBloomFilterReader() is not thread-safety. + BloomFilterReader& GetBloomFilterReader(); + + /// Pre-buffer the specified column indices in all row groups. + /// + /// Readers can optionally call this to cache the necessary slices + /// of the file in-memory before deserialization. Arrow readers can + /// automatically do this via an option. This is intended to + /// increase performance when reading from high-latency filesystems + /// (e.g. Amazon S3). + /// + /// After calling this, creating readers for row groups/column + /// indices that were not buffered may fail. Creating multiple + /// readers for the a subset of the buffered regions is + /// acceptable. This may be called again to buffer a different set + /// of row groups/columns. + /// + /// If memory usage is a concern, note that data will remain + /// buffered in memory until either \a PreBuffer() is called again, + /// or the reader itself is destructed. Reading - and buffering - + /// only one row group at a time may be useful. + /// + /// This method may throw. + void PreBuffer(const std::vector& row_groups, + const std::vector& column_indices, + const ::arrow::io::IOContext& ctx, + const ::arrow::io::CacheOptions& options); + + /// Wait for the specified row groups and column indices to be pre-buffered. + /// + /// After the returned Future completes, reading the specified row + /// groups/columns will not block. + /// + /// PreBuffer must be called first. This method does not throw. + ::arrow::Future<> WhenBuffered(const std::vector& row_groups, + const std::vector& column_indices) const; + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; +}; + +// Read only Parquet file metadata +std::shared_ptr PARQUET_EXPORT +ReadMetaData(const std::shared_ptr<::arrow::io::RandomAccessFile>& source); + +/// \brief Scan all values in file. Useful for performance testing +/// \param[in] columns the column numbers to scan. If empty scans all +/// \param[in] column_batch_size number of values to read at a time when scanning column +/// \param[in] reader a ParquetFileReader instance +/// \return number of semantic rows in file +PARQUET_EXPORT +int64_t ScanFileContents(std::vector columns, const int32_t column_batch_size, + ParquetFileReader* reader); + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/file_writer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/file_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..31706af86dbde8c57aa47c9678b1f16cf8e69eb0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/file_writer.h @@ -0,0 +1,245 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "parquet/metadata.h" +#include "parquet/platform.h" +#include "parquet/properties.h" +#include "parquet/schema.h" + +namespace parquet { + +class ColumnWriter; + +// FIXME: copied from reader-internal.cc +static constexpr uint8_t kParquetMagic[4] = {'P', 'A', 'R', '1'}; +static constexpr uint8_t kParquetEMagic[4] = {'P', 'A', 'R', 'E'}; + +class PARQUET_EXPORT RowGroupWriter { + public: + // Forward declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct Contents { + virtual ~Contents() = default; + virtual int num_columns() const = 0; + virtual int64_t num_rows() const = 0; + + // to be used only with ParquetFileWriter::AppendRowGroup + virtual ColumnWriter* NextColumn() = 0; + // to be used only with ParquetFileWriter::AppendBufferedRowGroup + virtual ColumnWriter* column(int i) = 0; + + virtual int current_column() const = 0; + virtual void Close() = 0; + + /// \brief total uncompressed bytes written by the page writer + virtual int64_t total_bytes_written() const = 0; + /// \brief total bytes still compressed but not written by the page writer + virtual int64_t total_compressed_bytes() const = 0; + /// \brief total compressed bytes written by the page writer + virtual int64_t total_compressed_bytes_written() const = 0; + + virtual bool buffered() const = 0; + }; + + explicit RowGroupWriter(std::unique_ptr contents); + + /// Construct a ColumnWriter for the indicated row group-relative column. + /// + /// To be used only with ParquetFileWriter::AppendRowGroup + /// Ownership is solely within the RowGroupWriter. The ColumnWriter is only + /// valid until the next call to NextColumn or Close. As the contents are + /// directly written to the sink, once a new column is started, the contents + /// of the previous one cannot be modified anymore. + ColumnWriter* NextColumn(); + /// Index of currently written column. Equal to -1 if NextColumn() + /// has not been called yet. + int current_column(); + void Close(); + + int num_columns() const; + + /// Construct a ColumnWriter for the indicated row group column. + /// + /// To be used only with ParquetFileWriter::AppendBufferedRowGroup + /// Ownership is solely within the RowGroupWriter. The ColumnWriter is + /// valid until Close. The contents are buffered in memory and written to sink + /// on Close + ColumnWriter* column(int i); + + /** + * Number of rows that shall be written as part of this RowGroup. + */ + int64_t num_rows() const; + + /// \brief total uncompressed bytes written by the page writer + int64_t total_bytes_written() const; + /// \brief total bytes still compressed but not written by the page writer. + /// It will always return 0 from the SerializedPageWriter. + int64_t total_compressed_bytes() const; + /// \brief total compressed bytes written by the page writer + int64_t total_compressed_bytes_written() const; + + /// Returns whether the current RowGroupWriter is in the buffered mode and is created + /// by calling ParquetFileWriter::AppendBufferedRowGroup. + bool buffered() const; + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; +}; + +PARQUET_EXPORT +void WriteFileMetaData(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +PARQUET_EXPORT +void WriteMetaDataFile(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +PARQUET_EXPORT +void WriteEncryptedFileMetadata(const FileMetaData& file_metadata, + ArrowOutputStream* sink, + const std::shared_ptr& encryptor, + bool encrypt_footer); + +PARQUET_EXPORT +void WriteEncryptedFileMetadata(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink, + const std::shared_ptr& encryptor = NULLPTR, + bool encrypt_footer = false); +PARQUET_EXPORT +void WriteFileCryptoMetaData(const FileCryptoMetaData& crypto_metadata, + ::arrow::io::OutputStream* sink); + +class PARQUET_EXPORT ParquetFileWriter { + public: + // Forward declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct Contents { + Contents(std::shared_ptr<::parquet::schema::GroupNode> schema, + std::shared_ptr key_value_metadata) + : schema_(), key_value_metadata_(std::move(key_value_metadata)) { + schema_.Init(std::move(schema)); + } + virtual ~Contents() {} + // Perform any cleanup associated with the file contents + virtual void Close() = 0; + + virtual RowGroupWriter* AppendRowGroup() = 0; + virtual RowGroupWriter* AppendBufferedRowGroup() = 0; + + virtual int64_t num_rows() const = 0; + virtual int num_columns() const = 0; + virtual int num_row_groups() const = 0; + + virtual const std::shared_ptr& properties() const = 0; + + const std::shared_ptr& key_value_metadata() const { + return key_value_metadata_; + } + + virtual void AddKeyValueMetadata( + const std::shared_ptr& key_value_metadata) = 0; + + // Return const-pointer to make it clear that this object is not to be copied + const SchemaDescriptor* schema() const { return &schema_; } + + SchemaDescriptor schema_; + + /// This should be the only place this is stored. Everything else is a const reference + std::shared_ptr key_value_metadata_; + + const std::shared_ptr& metadata() const { return file_metadata_; } + std::shared_ptr file_metadata_; + }; + + ParquetFileWriter(); + ~ParquetFileWriter(); + + static std::unique_ptr Open( + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr schema, + std::shared_ptr properties = default_writer_properties(), + std::shared_ptr key_value_metadata = NULLPTR); + + void Open(std::unique_ptr contents); + void Close(); + + /// Construct a RowGroupWriter with an arbitrary number of rows. + /// + /// Ownership is solely within the ParquetFileWriter. The RowGroupWriter is only valid + /// until the next call to AppendRowGroup or AppendBufferedRowGroup or Close. + RowGroupWriter* AppendRowGroup(); + + /// Construct a RowGroupWriter that buffers all the values until the RowGroup is ready. + /// Use this if you want to write a RowGroup based on a certain size + /// + /// Ownership is solely within the ParquetFileWriter. The RowGroupWriter is only valid + /// until the next call to AppendRowGroup or AppendBufferedRowGroup or Close. + RowGroupWriter* AppendBufferedRowGroup(); + + /// \brief Add key-value metadata to the file. + /// \param[in] key_value_metadata the metadata to add. + /// \note This will overwrite any existing metadata with the same key. + /// \throw ParquetException if Close() has been called. + void AddKeyValueMetadata( + const std::shared_ptr& key_value_metadata); + + /// Number of columns. + /// + /// This number is fixed during the lifetime of the writer as it is determined via + /// the schema. + int num_columns() const; + + /// Number of rows in the yet started RowGroups. + /// + /// Changes on the addition of a new RowGroup. + int64_t num_rows() const; + + /// Number of started RowGroups. + int num_row_groups() const; + + /// Configuration passed to the writer, e.g. the used Parquet format version. + const std::shared_ptr& properties() const; + + /// Returns the file schema descriptor + const SchemaDescriptor* schema() const; + + /// Returns a column descriptor in schema + const ColumnDescriptor* descr(int i) const; + + /// Returns the file custom metadata + const std::shared_ptr& key_value_metadata() const; + + /// Returns the file metadata, only available after calling Close(). + const std::shared_ptr metadata() const; + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; + std::shared_ptr file_metadata_; +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/hasher.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/hasher.h new file mode 100644 index 0000000000000000000000000000000000000000..519eb459b9ca832d382bc4f9c764b64b47dbd6b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/hasher.h @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include "parquet/types.h" + +namespace parquet { +// Abstract class for hash +class Hasher { + public: + /// Compute hash for 32 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int32_t value) const = 0; + + /// Compute hash for 64 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int64_t value) const = 0; + + /// Compute hash for float value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(float value) const = 0; + + /// Compute hash for double value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(double value) const = 0; + + /// Compute hash for Int96 value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const Int96* value) const = 0; + + /// Compute hash for ByteArray value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const ByteArray* value) const = 0; + + /// Compute hash for fixed byte array value by using its plain encoding result. + /// + /// @param value the value address. + /// @param len the value length. + virtual uint64_t Hash(const FLBA* value, uint32_t len) const = 0; + + /// Batch compute hashes for 32 bits values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for 64 bits values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for float values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const float* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for double values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const double* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for Int96 values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const Int96* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for ByteArray values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const ByteArray* values, int num_values, + uint64_t* hashes) const = 0; + + /// Batch compute hashes for fixed byte array values by using its plain encoding result. + /// + /// @param values the value address. + /// @param type_len the value length. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const = 0; + + virtual ~Hasher() = default; +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison.h new file mode 100644 index 0000000000000000000000000000000000000000..3ae442dd46e57b7f86b405d9502442d3195719e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison.h @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#pragma once + +#include +#include + +#include "parquet/platform.h" + +namespace parquet::internal { + +/// Builds a bitmap where each set bit indicates the corresponding level is greater +/// than rhs. +uint64_t PARQUET_EXPORT GreaterThanBitmap(const int16_t* levels, int64_t num_levels, + int16_t rhs); + +struct MinMax { + int16_t min; + int16_t max; +}; + +MinMax FindMinMax(const int16_t* levels, int64_t num_levels); + +} // namespace parquet::internal diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison_inc.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison_inc.h new file mode 100644 index 0000000000000000000000000000000000000000..cfee50665433182571a659fbd805f27532a3f7e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison_inc.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "parquet/level_comparison.h" + +// Used to make sure ODR rule isn't violated. +#ifndef PARQUET_IMPL_NAMESPACE +#error "PARQUET_IMPL_NAMESPACE must be defined" +#endif +namespace parquet::internal::PARQUET_IMPL_NAMESPACE { +/// Builds a bitmap by applying predicate to the level vector provided. +/// +/// \param[in] levels Rep or def level array. +/// \param[in] num_levels The number of levels to process (must be [0, 64]) +/// \param[in] predicate The predicate to apply (must have the signature `bool +/// predicate(int16_t)`. +/// \returns The bitmap using least significant "bit" ordering. +/// +template +inline uint64_t LevelsToBitmap(const int16_t* levels, int64_t num_levels, + Predicate predicate) { + // Both clang and GCC can vectorize this automatically with SSE4/AVX2. + uint64_t mask = 0; + for (int x = 0; x < num_levels; x++) { + mask |= static_cast(predicate(levels[x]) ? 1 : 0) << x; + } + return ::arrow::bit_util::ToLittleEndian(mask); +} + +inline MinMax FindMinMaxImpl(const int16_t* levels, int64_t num_levels) { + MinMax out{std::numeric_limits::max(), std::numeric_limits::min()}; + for (int x = 0; x < num_levels; x++) { + out.min = std::min(levels[x], out.min); + out.max = std::max(levels[x], out.max); + } + return out; +} + +inline uint64_t GreaterThanBitmapImpl(const int16_t* levels, int64_t num_levels, + int16_t rhs) { + return LevelsToBitmap(levels, num_levels, [rhs](int16_t value) { return value > rhs; }); +} + +} // namespace parquet::internal::PARQUET_IMPL_NAMESPACE diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion.h new file mode 100644 index 0000000000000000000000000000000000000000..31de95be41c473814c52cd9d2f5902d63f1b944b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion.h @@ -0,0 +1,216 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/endian.h" +#include "parquet/platform.h" +#include "parquet/schema.h" + +namespace parquet::internal { + +struct PARQUET_EXPORT LevelInfo { + LevelInfo() + : null_slot_usage(1), def_level(0), rep_level(0), repeated_ancestor_def_level(0) {} + LevelInfo(int32_t null_slots, int32_t definition_level, int32_t repetition_level, + int32_t repeated_ancestor_definition_level) + : null_slot_usage(null_slots), + def_level(static_cast(definition_level)), + rep_level(static_cast(repetition_level)), + repeated_ancestor_def_level( + static_cast(repeated_ancestor_definition_level)) {} + + bool operator==(const LevelInfo& b) const { + return null_slot_usage == b.null_slot_usage && def_level == b.def_level && + rep_level == b.rep_level && + repeated_ancestor_def_level == b.repeated_ancestor_def_level; + } + + bool HasNullableValues() const { return repeated_ancestor_def_level < def_level; } + + // How many slots an undefined but present (i.e. null) element in + // parquet consumes when decoding to Arrow. + // "Slot" is used in the same context as the Arrow specification + // (i.e. a value holder). + // This is only ever >1 for descendents of FixedSizeList. + int32_t null_slot_usage = 1; + + // The definition level at which the value for the field + // is considered not null (definition levels greater than + // or equal to this value indicate a not-null + // value for the field). For list fields definition levels + // greater than or equal to this field indicate a present, + // possibly null, child value. + int16_t def_level = 0; + + // The repetition level corresponding to this element + // or the closest repeated ancestor. Any repetition + // level less than this indicates either a new list OR + // an empty list (which is determined in conjunction + // with definition levels). + int16_t rep_level = 0; + + // The definition level indicating the level at which the closest + // repeated ancestor is not empty. This is used to discriminate + // between a value less than |def_level| being null or excluded entirely. + // For instance if we have an arrow schema like: + // list(struct(f0: int)). Then then there are the following + // definition levels: + // 0 = null list + // 1 = present but empty list. + // 2 = a null value in the list + // 3 = a non null struct but null integer. + // 4 = a present integer. + // When reconstructing, the struct and integer arrays' + // repeated_ancestor_def_level would be 2. Any + // def_level < 2 indicates that there isn't a corresponding + // child value in the list. + // i.e. [null, [], [null], [{f0: null}], [{f0: 1}]] + // has the def levels [0, 1, 2, 3, 4]. The actual + // struct array is only of length 3: [not-set, set, set] and + // the int array is also of length 3: [N/A, null, 1]. + // + int16_t repeated_ancestor_def_level = 0; + + /// Increments levels according to the cardinality of node. + void Increment(const schema::Node& node) { + if (node.is_repeated()) { + IncrementRepeated(); + return; + } + if (node.is_optional()) { + IncrementOptional(); + return; + } + } + + /// Increments level for a optional node. + void IncrementOptional() { def_level++; } + + /// Increments levels for the repeated node. Returns + /// the previous ancestor_list_def_level. + int16_t IncrementRepeated() { + int16_t last_repeated_ancestor = repeated_ancestor_def_level; + + // Repeated fields add both a repetition and definition level. This is used + // to distinguish between an empty list and a list with an item in it. + ++rep_level; + ++def_level; + // For levels >= repeated_ancestor_def_level it indicates the list was + // non-null and had at least one element. This is important + // for later decoding because we need to add a slot for these + // values. for levels < current_def_level no slots are added + // to arrays. + repeated_ancestor_def_level = def_level; + return last_repeated_ancestor; + } + + // Calculates and returns LevelInfo for a column descriptor. + static LevelInfo ComputeLevelInfo(const ColumnDescriptor* descr) { + LevelInfo level_info; + level_info.def_level = descr->max_definition_level(); + level_info.rep_level = descr->max_repetition_level(); + + int16_t min_spaced_def_level = descr->max_definition_level(); + const ::parquet::schema::Node* node = descr->schema_node().get(); + while (node && !node->is_repeated()) { + if (node->is_optional()) { + min_spaced_def_level--; + } + node = node->parent(); + } + level_info.repeated_ancestor_def_level = min_spaced_def_level; + return level_info; + } + + friend std::ostream& operator<<(std::ostream& os, const LevelInfo& levels) { + // This print method is to silence valgrind issues. What's printed + // is not important because all asserts happen directly on + // members. + os << "{def=" << levels.def_level << ", rep=" << levels.rep_level + << ", repeated_ancestor_def=" << levels.repeated_ancestor_def_level; + if (levels.null_slot_usage > 1) { + os << ", null_slot_usage=" << levels.null_slot_usage; + } + os << "}"; + return os; + } +}; + +// Input/Output structure for reconstructed validity bitmaps. +struct PARQUET_EXPORT ValidityBitmapInputOutput { + // Input only. + // The maximum number of values_read expected (actual + // values read must be less than or equal to this value). + // If this number is exceeded methods will throw a + // ParquetException. Exceeding this limit indicates + // either a corrupt or incorrectly written file. + int64_t values_read_upper_bound = 0; + // Output only. The number of values added to the encountered + // (this is logically the count of the number of elements + // for an Arrow array). + int64_t values_read = 0; + // Input/Output. The number of nulls encountered. + int64_t null_count = 0; + // Output only. The validity bitmap to populate. Maybe be null only + // for DefRepLevelsToListInfo (if all that is needed is list offsets). + uint8_t* valid_bits = NULLPTR; + // Input only, offset into valid_bits to start at. + int64_t valid_bits_offset = 0; +}; + +// Converts def_levels to validity bitmaps for non-list arrays and structs that have +// at least one member that is not a list and has no list descendents. +// For lists use DefRepLevelsToList and structs where all descendants contain +// a list use DefRepLevelsToBitmap. +void PARQUET_EXPORT DefLevelsToBitmap(const int16_t* def_levels, int64_t num_def_levels, + LevelInfo level_info, + ValidityBitmapInputOutput* output); + +// Reconstructs a validity bitmap and list offsets for a list arrays based on +// def/rep levels. The first element of offsets will not be modified if rep_levels +// starts with a new list. The first element of offsets will be used when calculating +// the next offset. See documentation onf DefLevelsToBitmap for when to use this +// method vs the other ones in this file for reconstruction. +// +// Offsets must be sized to 1 + values_read_upper_bound. +void PARQUET_EXPORT DefRepLevelsToList(const int16_t* def_levels, + const int16_t* rep_levels, int64_t num_def_levels, + LevelInfo level_info, + ValidityBitmapInputOutput* output, + int32_t* offsets); +void PARQUET_EXPORT DefRepLevelsToList(const int16_t* def_levels, + const int16_t* rep_levels, int64_t num_def_levels, + LevelInfo level_info, + ValidityBitmapInputOutput* output, + int64_t* offsets); + +// Reconstructs a validity bitmap for a struct every member is a list or has +// a list descendant. See documentation on DefLevelsToBitmap for when more +// details on this method compared to the other ones defined above. +void PARQUET_EXPORT DefRepLevelsToBitmap(const int16_t* def_levels, + const int16_t* rep_levels, + int64_t num_def_levels, LevelInfo level_info, + ValidityBitmapInputOutput* output); + +// This is exposed to ensure we can properly test a software simulated pext function +// (i.e. it isn't hidden by runtime dispatch). +uint64_t PARQUET_EXPORT TestOnlyExtractBitsSoftware(uint64_t bitmap, uint64_t selection); + +} // namespace parquet::internal diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion_inc.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion_inc.h new file mode 100644 index 0000000000000000000000000000000000000000..d1ccedabfde5084ab5c02c2314098122c68ffa81 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion_inc.h @@ -0,0 +1,354 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#pragma once + +#include "parquet/level_conversion.h" + +#include +#include +#include + +#include "arrow/util/bit_run_reader.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_writer.h" +#include "arrow/util/logging.h" +#include "arrow/util/simd.h" +#include "parquet/exception.h" +#include "parquet/level_comparison.h" + +#ifndef PARQUET_IMPL_NAMESPACE +#error "PARQUET_IMPL_NAMESPACE must be defined" +#endif + +namespace parquet::internal::PARQUET_IMPL_NAMESPACE { + +// clang-format off +/* Python code to generate lookup table: + +kLookupBits = 5 +count = 0 +print('constexpr int kLookupBits = {};'.format(kLookupBits)) +print('constexpr uint8_t kPextTable[1 << kLookupBits][1 << kLookupBits] = {') +print(' ', end = '') +for mask in range(1 << kLookupBits): + for data in range(1 << kLookupBits): + bit_value = 0 + bit_len = 0 + for i in range(kLookupBits): + if mask & (1 << i): + bit_value |= (((data >> i) & 1) << bit_len) + bit_len += 1 + out = '0x{:02X},'.format(bit_value) + count += 1 + if count % (1 << kLookupBits) == 1: + print(' {') + if count % 8 == 1: + print(' ', end = '') + if count % 8 == 0: + print(out, end = '\n') + else: + print(out, end = ' ') + if count % (1 << kLookupBits) == 0: + print(' },', end = '') +print('\n};') + +*/ +// clang-format on + +constexpr int kLookupBits = 5; +constexpr uint8_t kPextTable[1 << kLookupBits][1 << kLookupBits] = { + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, + 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, + 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, + 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, + 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, + 0x01, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, + 0x02, 0x03, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, + 0x01, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, + 0x03, 0x03, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, + 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, + 0x03, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, + 0x03, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, + 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x04, 0x05, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, + 0x02, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, + 0x05, 0x06, 0x07, 0x06, 0x07, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, + 0x02, 0x03, 0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, + 0x05, 0x06, 0x06, 0x07, 0x07, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, + 0x03, 0x03, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, + 0x01, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, + 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, + 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, + 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x04, 0x05, + 0x06, 0x07, 0x04, 0x05, 0x06, 0x07, 0x04, 0x05, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, + 0x03, 0x03, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, + 0x01, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, 0x05, 0x06, 0x07, + 0x06, 0x07, 0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, + 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, + 0x07, 0x07, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, + 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, + 0x03, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, 0x05, 0x04, 0x05, + 0x04, 0x05, 0x06, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, + 0x03, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, + 0x05, 0x05, 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x08, 0x09, + 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x0C, 0x0D, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, + 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, + 0x05, 0x05, 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, + 0x05, 0x06, 0x07, 0x06, 0x07, 0x08, 0x09, 0x08, 0x09, 0x0A, 0x0B, + 0x0A, 0x0B, 0x0C, 0x0D, 0x0C, 0x0D, 0x0E, 0x0F, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, + 0x05, 0x06, 0x06, 0x07, 0x07, 0x08, 0x08, 0x09, 0x09, 0x0A, 0x0A, + 0x0B, 0x0B, 0x0C, 0x0C, 0x0D, 0x0D, 0x0E, 0x0E, 0x0F, 0x0F, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, + }, +}; + +inline uint64_t ExtractBitsSoftware(uint64_t bitmap, uint64_t select_bitmap) { + // A software emulation of _pext_u64 + + // These checks should be inline and are likely to be common cases. + if (select_bitmap == ~uint64_t{0}) { + return bitmap; + } else if (select_bitmap == 0) { + return 0; + } + + // Fallback to lookup table method + uint64_t bit_value = 0; + int bit_len = 0; + constexpr uint8_t kLookupMask = (1U << kLookupBits) - 1; + while (select_bitmap != 0) { + const auto mask_len = ARROW_POPCOUNT32(select_bitmap & kLookupMask); + const uint64_t value = kPextTable[select_bitmap & kLookupMask][bitmap & kLookupMask]; + bit_value |= (value << bit_len); + bit_len += mask_len; + bitmap >>= kLookupBits; + select_bitmap >>= kLookupBits; + } + return bit_value; +} + +#ifdef ARROW_HAVE_BMI2 + +// Use _pext_u64 on 64-bit builds, _pext_u32 on 32-bit builds, +#if UINTPTR_MAX == 0xFFFFFFFF + +using extract_bitmap_t = uint32_t; +inline extract_bitmap_t ExtractBits(extract_bitmap_t bitmap, + extract_bitmap_t select_bitmap) { + return _pext_u32(bitmap, select_bitmap); +} + +#else + +using extract_bitmap_t = uint64_t; +inline extract_bitmap_t ExtractBits(extract_bitmap_t bitmap, + extract_bitmap_t select_bitmap) { + return _pext_u64(bitmap, select_bitmap); +} + +#endif + +#else // !defined(ARROW_HAVE_BMI2) + +// Use 64-bit pext emulation when BMI2 isn't available. +using extract_bitmap_t = uint64_t; +inline extract_bitmap_t ExtractBits(extract_bitmap_t bitmap, + extract_bitmap_t select_bitmap) { + return ExtractBitsSoftware(bitmap, select_bitmap); +} + +#endif + +static constexpr int64_t kExtractBitsSize = 8 * sizeof(extract_bitmap_t); + +template +int64_t DefLevelsBatchToBitmap(const int16_t* def_levels, const int64_t batch_size, + int64_t upper_bound_remaining, LevelInfo level_info, + ::arrow::internal::FirstTimeBitmapWriter* writer) { + DCHECK_LE(batch_size, kExtractBitsSize); + + // Greater than level_info.def_level - 1 implies >= the def_level + auto defined_bitmap = static_cast( + internal::GreaterThanBitmap(def_levels, batch_size, level_info.def_level - 1)); + + if (has_repeated_parent) { + // Greater than level_info.repeated_ancestor_def_level - 1 implies >= the + // repeated_ancestor_def_level + auto present_bitmap = static_cast(internal::GreaterThanBitmap( + def_levels, batch_size, level_info.repeated_ancestor_def_level - 1)); + auto selected_bits = ExtractBits(defined_bitmap, present_bitmap); + int64_t selected_count = ::arrow::bit_util::PopCount(present_bitmap); + if (ARROW_PREDICT_FALSE(selected_count > upper_bound_remaining)) { + throw ParquetException("Values read exceeded upper bound"); + } + writer->AppendWord(selected_bits, selected_count); + return ::arrow::bit_util::PopCount(selected_bits); + } else { + if (ARROW_PREDICT_FALSE(batch_size > upper_bound_remaining)) { + std::stringstream ss; + ss << "Values read exceeded upper bound"; + throw ParquetException(ss.str()); + } + + writer->AppendWord(defined_bitmap, batch_size); + return ::arrow::bit_util::PopCount(defined_bitmap); + } +} + +template +void DefLevelsToBitmapSimd(const int16_t* def_levels, int64_t num_def_levels, + LevelInfo level_info, ValidityBitmapInputOutput* output) { + ::arrow::internal::FirstTimeBitmapWriter writer( + output->valid_bits, + /*start_offset=*/output->valid_bits_offset, + /*length=*/output->values_read_upper_bound); + int64_t set_count = 0; + output->values_read = 0; + int64_t values_read_remaining = output->values_read_upper_bound; + while (num_def_levels > kExtractBitsSize) { + set_count += DefLevelsBatchToBitmap( + def_levels, kExtractBitsSize, values_read_remaining, level_info, &writer); + def_levels += kExtractBitsSize; + num_def_levels -= kExtractBitsSize; + values_read_remaining = output->values_read_upper_bound - writer.position(); + } + set_count += DefLevelsBatchToBitmap( + def_levels, num_def_levels, values_read_remaining, level_info, &writer); + + output->values_read = writer.position(); + output->null_count += output->values_read - set_count; + writer.Finish(); +} + +} // namespace parquet::internal::PARQUET_IMPL_NAMESPACE diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/metadata.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..e47c45ff0492a82d46c36a996eb721c7d808980c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/metadata.h @@ -0,0 +1,560 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/encryption/type_fwd.h" +#include "parquet/platform.h" +#include "parquet/properties.h" +#include "parquet/schema.h" +#include "parquet/types.h" + +namespace parquet { + +class ColumnDescriptor; +class EncodedStatistics; +class FileCryptoMetaData; +class Statistics; +class SchemaDescriptor; + +namespace schema { + +class ColumnPath; + +} // namespace schema + +using KeyValueMetadata = ::arrow::KeyValueMetadata; + +class PARQUET_EXPORT ApplicationVersion { + public: + // Known Versions with Issues + static const ApplicationVersion& PARQUET_251_FIXED_VERSION(); + static const ApplicationVersion& PARQUET_816_FIXED_VERSION(); + static const ApplicationVersion& PARQUET_CPP_FIXED_STATS_VERSION(); + static const ApplicationVersion& PARQUET_MR_FIXED_STATS_VERSION(); + static const ApplicationVersion& PARQUET_CPP_10353_FIXED_VERSION(); + + // Application that wrote the file. e.g. "IMPALA" + std::string application_; + // Build name + std::string build_; + + // Version of the application that wrote the file, expressed as + // (..). Unmatched parts default to 0. + // "1.2.3" => {1, 2, 3} + // "1.2" => {1, 2, 0} + // "1.2-cdh5" => {1, 2, 0} + struct { + int major; + int minor; + int patch; + std::string unknown; + std::string pre_release; + std::string build_info; + } version; + + ApplicationVersion() = default; + explicit ApplicationVersion(const std::string& created_by); + ApplicationVersion(std::string application, int major, int minor, int patch); + + // Returns true if version is strictly less than other_version + bool VersionLt(const ApplicationVersion& other_version) const; + + // Returns true if version is strictly equal with other_version + bool VersionEq(const ApplicationVersion& other_version) const; + + // Checks if the Version has the correct statistics for a given column + bool HasCorrectStatistics(Type::type primitive, EncodedStatistics& statistics, + SortOrder::type sort_order = SortOrder::SIGNED) const; +}; + +class PARQUET_EXPORT ColumnCryptoMetaData { + public: + static std::unique_ptr Make(const uint8_t* metadata); + ~ColumnCryptoMetaData(); + + bool Equals(const ColumnCryptoMetaData& other) const; + + std::shared_ptr path_in_schema() const; + bool encrypted_with_footer_key() const; + const std::string& key_metadata() const; + + private: + explicit ColumnCryptoMetaData(const uint8_t* metadata); + + class ColumnCryptoMetaDataImpl; + std::unique_ptr impl_; +}; + +/// \brief Public struct for Thrift PageEncodingStats in ColumnChunkMetaData +struct PageEncodingStats { + PageType::type page_type; + Encoding::type encoding; + int32_t count; +}; + +/// \brief Public struct for location to page index in ColumnChunkMetaData. +struct IndexLocation { + /// File offset of the given index, in bytes + int64_t offset; + /// Length of the given index, in bytes + int32_t length; +}; + +/// \brief ColumnChunkMetaData is a proxy around format::ColumnChunkMetaData. +class PARQUET_EXPORT ColumnChunkMetaData { + public: + // API convenience to get a MetaData accessor + + ARROW_DEPRECATED("Use the ReaderProperties-taking overload") + static std::unique_ptr Make( + const void* metadata, const ColumnDescriptor* descr, + const ApplicationVersion* writer_version, int16_t row_group_ordinal = -1, + int16_t column_ordinal = -1, + std::shared_ptr file_decryptor = NULLPTR); + + static std::unique_ptr Make( + const void* metadata, const ColumnDescriptor* descr, + const ReaderProperties& properties = default_reader_properties(), + const ApplicationVersion* writer_version = NULLPTR, int16_t row_group_ordinal = -1, + int16_t column_ordinal = -1, + std::shared_ptr file_decryptor = NULLPTR); + + ~ColumnChunkMetaData(); + + bool Equals(const ColumnChunkMetaData& other) const; + + // column chunk + int64_t file_offset() const; + + // parameter is only used when a dataset is spread across multiple files + const std::string& file_path() const; + + // column metadata + bool is_metadata_set() const; + Type::type type() const; + int64_t num_values() const; + std::shared_ptr path_in_schema() const; + bool is_stats_set() const; + std::shared_ptr statistics() const; + + Compression::type compression() const; + // Indicate if the ColumnChunk compression is supported by the current + // compiled parquet library. + bool can_decompress() const; + + const std::vector& encodings() const; + const std::vector& encoding_stats() const; + std::optional bloom_filter_offset() const; + std::optional bloom_filter_length() const; + bool has_dictionary_page() const; + int64_t dictionary_page_offset() const; + int64_t data_page_offset() const; + bool has_index_page() const; + int64_t index_page_offset() const; + int64_t total_compressed_size() const; + int64_t total_uncompressed_size() const; + std::unique_ptr crypto_metadata() const; + std::optional GetColumnIndexLocation() const; + std::optional GetOffsetIndexLocation() const; + + private: + explicit ColumnChunkMetaData( + const void* metadata, const ColumnDescriptor* descr, int16_t row_group_ordinal, + int16_t column_ordinal, const ReaderProperties& properties, + const ApplicationVersion* writer_version = NULLPTR, + std::shared_ptr file_decryptor = NULLPTR); + // PIMPL Idiom + class ColumnChunkMetaDataImpl; + std::unique_ptr impl_; +}; + +/// \brief RowGroupMetaData is a proxy around format::RowGroupMetaData. +class PARQUET_EXPORT RowGroupMetaData { + public: + ARROW_DEPRECATED("Use the ReaderProperties-taking overload") + static std::unique_ptr Make( + const void* metadata, const SchemaDescriptor* schema, + const ApplicationVersion* writer_version, + std::shared_ptr file_decryptor = NULLPTR); + + /// \brief Create a RowGroupMetaData from a serialized thrift message. + static std::unique_ptr Make( + const void* metadata, const SchemaDescriptor* schema, + const ReaderProperties& properties = default_reader_properties(), + const ApplicationVersion* writer_version = NULLPTR, + std::shared_ptr file_decryptor = NULLPTR); + + ~RowGroupMetaData(); + + bool Equals(const RowGroupMetaData& other) const; + + /// \brief The number of columns in this row group. The order must match the + /// parent's column ordering. + int num_columns() const; + + /// \brief Return the ColumnChunkMetaData of the corresponding column ordinal. + /// + /// WARNING, the returned object references memory location in it's parent + /// (RowGroupMetaData) object. Hence, the parent must outlive the returned + /// object. + /// + /// \param[in] index of the ColumnChunkMetaData to retrieve. + /// + /// \throws ParquetException if the index is out of bound. + std::unique_ptr ColumnChunk(int index) const; + + /// \brief Number of rows in this row group. + int64_t num_rows() const; + + /// \brief Total byte size of all the uncompressed column data in this row group. + int64_t total_byte_size() const; + + /// \brief Total byte size of all the compressed (and potentially encrypted) + /// column data in this row group. + /// + /// This information is optional and may be 0 if omitted. + int64_t total_compressed_size() const; + + /// \brief Byte offset from beginning of file to first page (data or + /// dictionary) in this row group + /// + /// The file_offset field that this method exposes is optional. This method + /// will return 0 if that field is not set to a meaningful value. + int64_t file_offset() const; + // Return const-pointer to make it clear that this object is not to be copied + const SchemaDescriptor* schema() const; + // Indicate if all of the RowGroup's ColumnChunks can be decompressed. + bool can_decompress() const; + // Sorting columns of the row group if any. + std::vector sorting_columns() const; + + private: + explicit RowGroupMetaData( + const void* metadata, const SchemaDescriptor* schema, + const ReaderProperties& properties, + const ApplicationVersion* writer_version = NULLPTR, + std::shared_ptr file_decryptor = NULLPTR); + // PIMPL Idiom + class RowGroupMetaDataImpl; + std::unique_ptr impl_; +}; + +class FileMetaDataBuilder; + +/// \brief FileMetaData is a proxy around format::FileMetaData. +class PARQUET_EXPORT FileMetaData { + public: + ARROW_DEPRECATED("Use the ReaderProperties-taking overload") + static std::shared_ptr Make( + const void* serialized_metadata, uint32_t* inout_metadata_len, + std::shared_ptr file_decryptor); + + /// \brief Create a FileMetaData from a serialized thrift message. + static std::shared_ptr Make( + const void* serialized_metadata, uint32_t* inout_metadata_len, + const ReaderProperties& properties = default_reader_properties(), + std::shared_ptr file_decryptor = NULLPTR); + + ~FileMetaData(); + + bool Equals(const FileMetaData& other) const; + + /// \brief The number of parquet "leaf" columns. + /// + /// Parquet thrift definition requires that nested schema elements are + /// flattened. This method returns the number of columns in the flattened + /// version. + /// For instance, if the schema looks like this : + /// 0 foo.bar + /// foo.bar.baz 0 + /// foo.bar.baz2 1 + /// foo.qux 2 + /// 1 foo2 3 + /// 2 foo3 4 + /// This method will return 5, because there are 5 "leaf" fields (so 5 + /// flattened fields) + int num_columns() const; + + /// \brief The number of flattened schema elements. + /// + /// Parquet thrift definition requires that nested schema elements are + /// flattened. This method returns the total number of elements in the + /// flattened list. + int num_schema_elements() const; + + /// \brief The total number of rows. + int64_t num_rows() const; + + /// \brief The number of row groups in the file. + int num_row_groups() const; + + /// \brief Return the RowGroupMetaData of the corresponding row group ordinal. + /// + /// WARNING, the returned object references memory location in it's parent + /// (FileMetaData) object. Hence, the parent must outlive the returned object. + /// + /// \param[in] index of the RowGroup to retrieve. + /// + /// \throws ParquetException if the index is out of bound. + std::unique_ptr RowGroup(int index) const; + + /// \brief Return the "version" of the file + /// + /// WARNING: The value returned by this method is unreliable as 1) the Parquet + /// file metadata stores the version as a single integer and 2) some producers + /// are known to always write a hardcoded value. Therefore, you cannot use + /// this value to know which features are used in the file. + ParquetVersion::type version() const; + + /// \brief Return the application's user-agent string of the writer. + const std::string& created_by() const; + + /// \brief Return the application's version of the writer. + const ApplicationVersion& writer_version() const; + + /// \brief Size of the original thrift encoded metadata footer. + uint32_t size() const; + + /// \brief Indicate if all of the FileMetadata's RowGroups can be decompressed. + /// + /// This will return false if any of the RowGroup's page is compressed with a + /// compression format which is not compiled in the current parquet library. + bool can_decompress() const; + + bool is_encryption_algorithm_set() const; + EncryptionAlgorithm encryption_algorithm() const; + const std::string& footer_signing_key_metadata() const; + + /// \brief Verify signature of FileMetaData when file is encrypted but footer + /// is not encrypted (plaintext footer). + bool VerifySignature(const void* signature); + + void WriteTo(::arrow::io::OutputStream* dst, + const std::shared_ptr& encryptor = NULLPTR) const; + + /// \brief Return Thrift-serialized representation of the metadata as a + /// string + std::string SerializeToString() const; + + // Return const-pointer to make it clear that this object is not to be copied + const SchemaDescriptor* schema() const; + + const std::shared_ptr& key_value_metadata() const; + + /// \brief Set a path to all ColumnChunk for all RowGroups. + /// + /// Commonly used by systems (Dask, Spark) who generates an metadata-only + /// parquet file. The path is usually relative to said index file. + /// + /// \param[in] path to set. + void set_file_path(const std::string& path); + + /// \brief Merge row groups from another metadata file into this one. + /// + /// The schema of the input FileMetaData must be equal to the + /// schema of this object. + /// + /// This is used by systems who creates an aggregate metadata-only file by + /// concatenating the row groups of multiple files. This newly created + /// metadata file acts as an index of all available row groups. + /// + /// \param[in] other FileMetaData to merge the row groups from. + /// + /// \throws ParquetException if schemas are not equal. + void AppendRowGroups(const FileMetaData& other); + + /// \brief Return a FileMetaData containing a subset of the row groups in this + /// FileMetaData. + std::shared_ptr Subset(const std::vector& row_groups) const; + + private: + friend FileMetaDataBuilder; + friend class SerializedFile; + + explicit FileMetaData(const void* serialized_metadata, uint32_t* metadata_len, + const ReaderProperties& properties, + std::shared_ptr file_decryptor = NULLPTR); + + void set_file_decryptor(std::shared_ptr file_decryptor); + + // PIMPL Idiom + FileMetaData(); + class FileMetaDataImpl; + std::unique_ptr impl_; +}; + +class PARQUET_EXPORT FileCryptoMetaData { + public: + // API convenience to get a MetaData accessor + static std::shared_ptr Make( + const uint8_t* serialized_metadata, uint32_t* metadata_len, + const ReaderProperties& properties = default_reader_properties()); + ~FileCryptoMetaData(); + + EncryptionAlgorithm encryption_algorithm() const; + const std::string& key_metadata() const; + + void WriteTo(::arrow::io::OutputStream* dst) const; + + private: + friend FileMetaDataBuilder; + FileCryptoMetaData(const uint8_t* serialized_metadata, uint32_t* metadata_len, + const ReaderProperties& properties); + + // PIMPL Idiom + FileCryptoMetaData(); + class FileCryptoMetaDataImpl; + std::unique_ptr impl_; +}; + +// Builder API +class PARQUET_EXPORT ColumnChunkMetaDataBuilder { + public: + // API convenience to get a MetaData reader + static std::unique_ptr Make( + std::shared_ptr props, const ColumnDescriptor* column); + + static std::unique_ptr Make( + std::shared_ptr props, const ColumnDescriptor* column, + void* contents); + + ~ColumnChunkMetaDataBuilder(); + + // column chunk + // Used when a dataset is spread across multiple files + void set_file_path(const std::string& path); + // column metadata + void SetStatistics(const EncodedStatistics& stats); + // get the column descriptor + const ColumnDescriptor* descr() const; + + int64_t total_compressed_size() const; + // commit the metadata + + void Finish(int64_t num_values, int64_t dictionary_page_offset, + int64_t index_page_offset, int64_t data_page_offset, + int64_t compressed_size, int64_t uncompressed_size, bool has_dictionary, + bool dictionary_fallback, + const std::map& dict_encoding_stats_, + const std::map& data_encoding_stats_, + const std::shared_ptr& encryptor = NULLPTR); + + // The metadata contents, suitable for passing to ColumnChunkMetaData::Make + const void* contents() const; + + // For writing metadata at end of column chunk + void WriteTo(::arrow::io::OutputStream* sink); + + private: + explicit ColumnChunkMetaDataBuilder(std::shared_ptr props, + const ColumnDescriptor* column); + explicit ColumnChunkMetaDataBuilder(std::shared_ptr props, + const ColumnDescriptor* column, void* contents); + // PIMPL Idiom + class ColumnChunkMetaDataBuilderImpl; + std::unique_ptr impl_; +}; + +class PARQUET_EXPORT RowGroupMetaDataBuilder { + public: + // API convenience to get a MetaData reader + static std::unique_ptr Make( + std::shared_ptr props, const SchemaDescriptor* schema_, + void* contents); + + ~RowGroupMetaDataBuilder(); + + ColumnChunkMetaDataBuilder* NextColumnChunk(); + int num_columns(); + int64_t num_rows(); + int current_column() const; + + void set_num_rows(int64_t num_rows); + + // commit the metadata + void Finish(int64_t total_bytes_written, int16_t row_group_ordinal = -1); + + private: + explicit RowGroupMetaDataBuilder(std::shared_ptr props, + const SchemaDescriptor* schema_, void* contents); + // PIMPL Idiom + class RowGroupMetaDataBuilderImpl; + std::unique_ptr impl_; +}; + +/// \brief Public struct for location to all page indexes in a parquet file. +struct PageIndexLocation { + /// Alias type of page index location of a row group. The index location + /// is located by column ordinal. If the column does not have the page index, + /// its value is set to std::nullopt. + using RowGroupIndexLocation = std::vector>; + /// Alias type of page index location of a parquet file. The index location + /// is located by the row group ordinal. + using FileIndexLocation = std::map; + /// Row group column index locations which uses row group ordinal as the key. + FileIndexLocation column_index_location; + /// Row group offset index locations which uses row group ordinal as the key. + FileIndexLocation offset_index_location; +}; + +class PARQUET_EXPORT FileMetaDataBuilder { + public: + ARROW_DEPRECATED("Deprecated in 12.0.0. Use overload without KeyValueMetadata instead.") + static std::unique_ptr Make( + const SchemaDescriptor* schema, std::shared_ptr props, + std::shared_ptr key_value_metadata); + + // API convenience to get a MetaData builder + static std::unique_ptr Make( + const SchemaDescriptor* schema, std::shared_ptr props); + + ~FileMetaDataBuilder(); + + // The prior RowGroupMetaDataBuilder (if any) is destroyed + RowGroupMetaDataBuilder* AppendRowGroup(); + + // Update location to all page indexes in the parquet file + void SetPageIndexLocation(const PageIndexLocation& location); + + // Complete the Thrift structure + std::unique_ptr Finish( + const std::shared_ptr& key_value_metadata = NULLPTR); + + // crypto metadata + std::unique_ptr GetCryptoMetaData(); + + private: + explicit FileMetaDataBuilder( + const SchemaDescriptor* schema, std::shared_ptr props, + std::shared_ptr key_value_metadata = NULLPTR); + // PIMPL Idiom + class FileMetaDataBuilderImpl; + std::unique_ptr impl_; +}; + +PARQUET_EXPORT std::string ParquetVersionToString(ParquetVersion::type ver); + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/parquet_version.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/parquet_version.h new file mode 100644 index 0000000000000000000000000000000000000000..967f0b831dc77c3160608dc5eaf3cf5496090414 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/parquet_version.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PARQUET_VERSION_H +#define PARQUET_VERSION_H + +#define PARQUET_VERSION_MAJOR 15 +#define PARQUET_VERSION_MINOR 0 +#define PARQUET_VERSION_PATCH 2 + +#define PARQUET_SO_VERSION "1500" +#define PARQUET_FULL_SO_VERSION "1500.2.0" + +// define the parquet created by version +#define CREATED_BY_VERSION "parquet-cpp-arrow version 15.0.2" + +#endif // PARQUET_VERSION_H diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/properties.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/properties.h new file mode 100644 index 0000000000000000000000000000000000000000..4d3acb491e3906c4183dfcb0a8d9637a47fd082f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/properties.h @@ -0,0 +1,1183 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/type.h" +#include "arrow/util/compression.h" +#include "arrow/util/type_fwd.h" +#include "parquet/encryption/encryption.h" +#include "parquet/exception.h" +#include "parquet/parquet_version.h" +#include "parquet/platform.h" +#include "parquet/schema.h" +#include "parquet/type_fwd.h" +#include "parquet/types.h" + +namespace parquet { + +/// Controls serialization format of data pages. parquet-format v2.0.0 +/// introduced a new data page metadata type DataPageV2 and serialized page +/// structure (for example, encoded levels are no longer compressed). Prior to +/// the completion of PARQUET-457 in 2020, this library did not implement +/// DataPageV2 correctly, so if you use the V2 data page format, you may have +/// forward compatibility issues (older versions of the library will be unable +/// to read the files). Note that some Parquet implementations do not implement +/// DataPageV2 at all. +enum class ParquetDataPageVersion { V1, V2 }; + +/// Align the default buffer size to a small multiple of a page size. +constexpr int64_t kDefaultBufferSize = 4096 * 4; + +constexpr int32_t kDefaultThriftStringSizeLimit = 100 * 1000 * 1000; +// Structs in the thrift definition are relatively large (at least 300 bytes). +// This limits total memory to the same order of magnitude as +// kDefaultStringSizeLimit. +constexpr int32_t kDefaultThriftContainerSizeLimit = 1000 * 1000; + +class PARQUET_EXPORT ReaderProperties { + public: + explicit ReaderProperties(MemoryPool* pool = ::arrow::default_memory_pool()) + : pool_(pool) {} + + MemoryPool* memory_pool() const { return pool_; } + + std::shared_ptr GetStream(std::shared_ptr source, + int64_t start, int64_t num_bytes); + + /// Buffered stream reading allows the user to control the memory usage of + /// parquet readers. This ensure that all `RandomAccessFile::ReadAt` calls are + /// wrapped in a buffered reader that uses a fix sized buffer (of size + /// `buffer_size()`) instead of the full size of the ReadAt. + /// + /// The primary reason for this control knobs is for resource control and not + /// performance. + bool is_buffered_stream_enabled() const { return buffered_stream_enabled_; } + /// Enable buffered stream reading. + void enable_buffered_stream() { buffered_stream_enabled_ = true; } + /// Disable buffered stream reading. + void disable_buffered_stream() { buffered_stream_enabled_ = false; } + + bool read_dense_for_nullable() const { return read_dense_for_nullable_; } + void enable_read_dense_for_nullable() { read_dense_for_nullable_ = true; } + void disable_read_dense_for_nullable() { read_dense_for_nullable_ = false; } + + /// Return the size of the buffered stream buffer. + int64_t buffer_size() const { return buffer_size_; } + /// Set the size of the buffered stream buffer in bytes. + void set_buffer_size(int64_t size) { buffer_size_ = size; } + + /// \brief Return the size limit on thrift strings. + /// + /// This limit helps prevent space and time bombs in files, but may need to + /// be increased in order to read files with especially large headers. + int32_t thrift_string_size_limit() const { return thrift_string_size_limit_; } + /// Set the size limit on thrift strings. + void set_thrift_string_size_limit(int32_t size) { thrift_string_size_limit_ = size; } + + /// \brief Return the size limit on thrift containers. + /// + /// This limit helps prevent space and time bombs in files, but may need to + /// be increased in order to read files with especially large headers. + int32_t thrift_container_size_limit() const { return thrift_container_size_limit_; } + /// Set the size limit on thrift containers. + void set_thrift_container_size_limit(int32_t size) { + thrift_container_size_limit_ = size; + } + + /// Set the decryption properties. + void file_decryption_properties(std::shared_ptr decryption) { + file_decryption_properties_ = std::move(decryption); + } + /// Return the decryption properties. + const std::shared_ptr& file_decryption_properties() const { + return file_decryption_properties_; + } + + bool page_checksum_verification() const { return page_checksum_verification_; } + void set_page_checksum_verification(bool check_crc) { + page_checksum_verification_ = check_crc; + } + + private: + MemoryPool* pool_; + int64_t buffer_size_ = kDefaultBufferSize; + int32_t thrift_string_size_limit_ = kDefaultThriftStringSizeLimit; + int32_t thrift_container_size_limit_ = kDefaultThriftContainerSizeLimit; + bool buffered_stream_enabled_ = false; + bool page_checksum_verification_ = false; + // Used with a RecordReader. + bool read_dense_for_nullable_ = false; + std::shared_ptr file_decryption_properties_; +}; + +ReaderProperties PARQUET_EXPORT default_reader_properties(); + +static constexpr int64_t kDefaultDataPageSize = 1024 * 1024; +static constexpr bool DEFAULT_IS_DICTIONARY_ENABLED = true; +static constexpr int64_t DEFAULT_DICTIONARY_PAGE_SIZE_LIMIT = kDefaultDataPageSize; +static constexpr int64_t DEFAULT_WRITE_BATCH_SIZE = 1024; +static constexpr int64_t DEFAULT_MAX_ROW_GROUP_LENGTH = 1024 * 1024; +static constexpr bool DEFAULT_ARE_STATISTICS_ENABLED = true; +static constexpr int64_t DEFAULT_MAX_STATISTICS_SIZE = 4096; +static constexpr Encoding::type DEFAULT_ENCODING = Encoding::UNKNOWN; +static const char DEFAULT_CREATED_BY[] = CREATED_BY_VERSION; +static constexpr Compression::type DEFAULT_COMPRESSION_TYPE = Compression::UNCOMPRESSED; +static constexpr bool DEFAULT_IS_PAGE_INDEX_ENABLED = false; + +class PARQUET_EXPORT ColumnProperties { + public: + ColumnProperties(Encoding::type encoding = DEFAULT_ENCODING, + Compression::type codec = DEFAULT_COMPRESSION_TYPE, + bool dictionary_enabled = DEFAULT_IS_DICTIONARY_ENABLED, + bool statistics_enabled = DEFAULT_ARE_STATISTICS_ENABLED, + size_t max_stats_size = DEFAULT_MAX_STATISTICS_SIZE, + bool page_index_enabled = DEFAULT_IS_PAGE_INDEX_ENABLED) + : encoding_(encoding), + codec_(codec), + dictionary_enabled_(dictionary_enabled), + statistics_enabled_(statistics_enabled), + max_stats_size_(max_stats_size), + page_index_enabled_(page_index_enabled) {} + + void set_encoding(Encoding::type encoding) { encoding_ = encoding; } + + void set_compression(Compression::type codec) { codec_ = codec; } + + void set_dictionary_enabled(bool dictionary_enabled) { + dictionary_enabled_ = dictionary_enabled; + } + + void set_statistics_enabled(bool statistics_enabled) { + statistics_enabled_ = statistics_enabled; + } + + void set_max_statistics_size(size_t max_stats_size) { + max_stats_size_ = max_stats_size; + } + + void set_compression_level(int compression_level) { + if (!codec_options_) { + codec_options_ = std::make_shared(); + } + codec_options_->compression_level = compression_level; + } + + void set_codec_options(const std::shared_ptr& codec_options) { + codec_options_ = codec_options; + } + + void set_page_index_enabled(bool page_index_enabled) { + page_index_enabled_ = page_index_enabled; + } + + Encoding::type encoding() const { return encoding_; } + + Compression::type compression() const { return codec_; } + + bool dictionary_enabled() const { return dictionary_enabled_; } + + bool statistics_enabled() const { return statistics_enabled_; } + + size_t max_statistics_size() const { return max_stats_size_; } + + int compression_level() const { + if (!codec_options_) { + return ::arrow::util::kUseDefaultCompressionLevel; + } + return codec_options_->compression_level; + } + + const std::shared_ptr& codec_options() const { return codec_options_; } + + bool page_index_enabled() const { return page_index_enabled_; } + + private: + Encoding::type encoding_; + Compression::type codec_; + bool dictionary_enabled_; + bool statistics_enabled_; + size_t max_stats_size_; + std::shared_ptr codec_options_; + bool page_index_enabled_; +}; + +class PARQUET_EXPORT WriterProperties { + public: + class Builder { + public: + Builder() + : pool_(::arrow::default_memory_pool()), + dictionary_pagesize_limit_(DEFAULT_DICTIONARY_PAGE_SIZE_LIMIT), + write_batch_size_(DEFAULT_WRITE_BATCH_SIZE), + max_row_group_length_(DEFAULT_MAX_ROW_GROUP_LENGTH), + pagesize_(kDefaultDataPageSize), + version_(ParquetVersion::PARQUET_2_6), + data_page_version_(ParquetDataPageVersion::V1), + created_by_(DEFAULT_CREATED_BY), + store_decimal_as_integer_(false), + page_checksum_enabled_(false) {} + + explicit Builder(const WriterProperties& properties) + : pool_(properties.memory_pool()), + dictionary_pagesize_limit_(properties.dictionary_pagesize_limit()), + write_batch_size_(properties.write_batch_size()), + max_row_group_length_(properties.max_row_group_length()), + pagesize_(properties.data_pagesize()), + version_(properties.version()), + data_page_version_(properties.data_page_version()), + created_by_(properties.created_by()), + store_decimal_as_integer_(properties.store_decimal_as_integer()), + page_checksum_enabled_(properties.page_checksum_enabled()), + sorting_columns_(properties.sorting_columns()), + default_column_properties_(properties.default_column_properties()) {} + + virtual ~Builder() {} + + /// Specify the memory pool for the writer. Default default_memory_pool. + Builder* memory_pool(MemoryPool* pool) { + pool_ = pool; + return this; + } + + /// Enable dictionary encoding in general for all columns. Default + /// enabled. + Builder* enable_dictionary() { + default_column_properties_.set_dictionary_enabled(true); + return this; + } + + /// Disable dictionary encoding in general for all columns. Default + /// enabled. + Builder* disable_dictionary() { + default_column_properties_.set_dictionary_enabled(false); + return this; + } + + /// Enable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* enable_dictionary(const std::string& path) { + dictionary_enabled_[path] = true; + return this; + } + + /// Enable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* enable_dictionary(const std::shared_ptr& path) { + return this->enable_dictionary(path->ToDotString()); + } + + /// Disable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* disable_dictionary(const std::string& path) { + dictionary_enabled_[path] = false; + return this; + } + + /// Disable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* disable_dictionary(const std::shared_ptr& path) { + return this->disable_dictionary(path->ToDotString()); + } + + /// Specify the dictionary page size limit per row group. Default 1MB. + Builder* dictionary_pagesize_limit(int64_t dictionary_psize_limit) { + dictionary_pagesize_limit_ = dictionary_psize_limit; + return this; + } + + /// Specify the write batch size while writing batches of Arrow values + /// into Parquet. Default 1024. + Builder* write_batch_size(int64_t write_batch_size) { + write_batch_size_ = write_batch_size; + return this; + } + + /// Specify the max number of rows to put in a single row group. + /// Default 1Mi rows. + Builder* max_row_group_length(int64_t max_row_group_length) { + max_row_group_length_ = max_row_group_length; + return this; + } + + /// Specify the data page size. + /// Default 1MB. + Builder* data_pagesize(int64_t pg_size) { + pagesize_ = pg_size; + return this; + } + + /// Specify the data page version. + /// Default V1. + Builder* data_page_version(ParquetDataPageVersion data_page_version) { + data_page_version_ = data_page_version; + return this; + } + + /// Specify the Parquet file version. + /// Default PARQUET_2_6. + Builder* version(ParquetVersion::type version) { + version_ = version; + return this; + } + + Builder* created_by(const std::string& created_by) { + created_by_ = created_by; + return this; + } + + Builder* enable_page_checksum() { + page_checksum_enabled_ = true; + return this; + } + + Builder* disable_page_checksum() { + page_checksum_enabled_ = false; + return this; + } + + /// \brief Define the encoding that is used when we don't utilise dictionary encoding. + // + /// This either apply if dictionary encoding is disabled or if we fallback + /// as the dictionary grew too large. + Builder* encoding(Encoding::type encoding_type) { + if (encoding_type == Encoding::PLAIN_DICTIONARY || + encoding_type == Encoding::RLE_DICTIONARY) { + throw ParquetException("Can't use dictionary encoding as fallback encoding"); + } + + default_column_properties_.set_encoding(encoding_type); + return this; + } + + /// \brief Define the encoding that is used when we don't utilise dictionary encoding. + // + /// This either apply if dictionary encoding is disabled or if we fallback + /// as the dictionary grew too large. + Builder* encoding(const std::string& path, Encoding::type encoding_type) { + if (encoding_type == Encoding::PLAIN_DICTIONARY || + encoding_type == Encoding::RLE_DICTIONARY) { + throw ParquetException("Can't use dictionary encoding as fallback encoding"); + } + + encodings_[path] = encoding_type; + return this; + } + + /// \brief Define the encoding that is used when we don't utilise dictionary encoding. + // + /// This either apply if dictionary encoding is disabled or if we fallback + /// as the dictionary grew too large. + Builder* encoding(const std::shared_ptr& path, + Encoding::type encoding_type) { + return this->encoding(path->ToDotString(), encoding_type); + } + + /// Specify compression codec in general for all columns. + /// Default UNCOMPRESSED. + Builder* compression(Compression::type codec) { + default_column_properties_.set_compression(codec); + return this; + } + + /// Specify max statistics size to store min max value. + /// Default 4KB. + Builder* max_statistics_size(size_t max_stats_sz) { + default_column_properties_.set_max_statistics_size(max_stats_sz); + return this; + } + + /// Specify compression codec for the column specified by `path`. + /// Default UNCOMPRESSED. + Builder* compression(const std::string& path, Compression::type codec) { + codecs_[path] = codec; + return this; + } + + /// Specify compression codec for the column specified by `path`. + /// Default UNCOMPRESSED. + Builder* compression(const std::shared_ptr& path, + Compression::type codec) { + return this->compression(path->ToDotString(), codec); + } + + /// \brief Specify the default compression level for the compressor in + /// every column. In case a column does not have an explicitly specified + /// compression level, the default one would be used. + /// + /// The provided compression level is compressor specific. The user would + /// have to familiarize oneself with the available levels for the selected + /// compressor. If the compressor does not allow for selecting different + /// compression levels, calling this function would not have any effect. + /// Parquet and Arrow do not validate the passed compression level. If no + /// level is selected by the user or if the special + /// std::numeric_limits::min() value is passed, then Arrow selects the + /// compression level. + /// + /// If other compressor-specific options need to be set in addition to the compression + /// level, use the codec_options method. + Builder* compression_level(int compression_level) { + default_column_properties_.set_compression_level(compression_level); + return this; + } + + /// \brief Specify a compression level for the compressor for the column + /// described by path. + /// + /// The provided compression level is compressor specific. The user would + /// have to familiarize oneself with the available levels for the selected + /// compressor. If the compressor does not allow for selecting different + /// compression levels, calling this function would not have any effect. + /// Parquet and Arrow do not validate the passed compression level. If no + /// level is selected by the user or if the special + /// std::numeric_limits::min() value is passed, then Arrow selects the + /// compression level. + Builder* compression_level(const std::string& path, int compression_level) { + if (!codec_options_[path]) { + codec_options_[path] = std::make_shared(); + } + codec_options_[path]->compression_level = compression_level; + return this; + } + + /// \brief Specify a compression level for the compressor for the column + /// described by path. + /// + /// The provided compression level is compressor specific. The user would + /// have to familiarize oneself with the available levels for the selected + /// compressor. If the compressor does not allow for selecting different + /// compression levels, calling this function would not have any effect. + /// Parquet and Arrow do not validate the passed compression level. If no + /// level is selected by the user or if the special + /// std::numeric_limits::min() value is passed, then Arrow selects the + /// compression level. + Builder* compression_level(const std::shared_ptr& path, + int compression_level) { + return this->compression_level(path->ToDotString(), compression_level); + } + + /// \brief Specify the default codec options for the compressor in + /// every column. + /// + /// The codec options allow configuring the compression level as well + /// as other codec-specific options. + Builder* codec_options( + const std::shared_ptr<::arrow::util::CodecOptions>& codec_options) { + default_column_properties_.set_codec_options(codec_options); + return this; + } + + /// \brief Specify the codec options for the compressor for the column + /// described by path. + Builder* codec_options( + const std::string& path, + const std::shared_ptr<::arrow::util::CodecOptions>& codec_options) { + codec_options_[path] = codec_options; + return this; + } + + /// \brief Specify the codec options for the compressor for the column + /// described by path. + Builder* codec_options( + const std::shared_ptr& path, + const std::shared_ptr<::arrow::util::CodecOptions>& codec_options) { + return this->codec_options(path->ToDotString(), codec_options); + } + + /// Define the file encryption properties. + /// Default NULL. + Builder* encryption( + std::shared_ptr file_encryption_properties) { + file_encryption_properties_ = std::move(file_encryption_properties); + return this; + } + + /// Enable statistics in general. + /// Default enabled. + Builder* enable_statistics() { + default_column_properties_.set_statistics_enabled(true); + return this; + } + + /// Disable statistics in general. + /// Default enabled. + Builder* disable_statistics() { + default_column_properties_.set_statistics_enabled(false); + return this; + } + + /// Enable statistics for the column specified by `path`. + /// Default enabled. + Builder* enable_statistics(const std::string& path) { + statistics_enabled_[path] = true; + return this; + } + + /// Enable statistics for the column specified by `path`. + /// Default enabled. + Builder* enable_statistics(const std::shared_ptr& path) { + return this->enable_statistics(path->ToDotString()); + } + + /// Define the sorting columns. + /// Default empty. + /// + /// If sorting columns are set, user should ensure that records + /// are sorted by sorting columns. Otherwise, the storing data + /// will be inconsistent with sorting_columns metadata. + Builder* set_sorting_columns(std::vector sorting_columns) { + sorting_columns_ = std::move(sorting_columns); + return this; + } + + /// Disable statistics for the column specified by `path`. + /// Default enabled. + Builder* disable_statistics(const std::string& path) { + statistics_enabled_[path] = false; + return this; + } + + /// Disable statistics for the column specified by `path`. + /// Default enabled. + Builder* disable_statistics(const std::shared_ptr& path) { + return this->disable_statistics(path->ToDotString()); + } + + /// Allow decimals with 1 <= precision <= 18 to be stored as integers. + /// + /// In Parquet, DECIMAL can be stored in any of the following physical types: + /// - int32: for 1 <= precision <= 9. + /// - int64: for 10 <= precision <= 18. + /// - fixed_len_byte_array: precision is limited by the array size. + /// Length n can store <= floor(log_10(2^(8*n - 1) - 1)) base-10 digits. + /// - binary: precision is unlimited. The minimum number of bytes to store + /// the unscaled value is used. + /// + /// By default, this is DISABLED and all decimal types annotate fixed_len_byte_array. + /// + /// When enabled, the C++ writer will use following physical types to store decimals: + /// - int32: for 1 <= precision <= 9. + /// - int64: for 10 <= precision <= 18. + /// - fixed_len_byte_array: for precision > 18. + /// + /// As a consequence, decimal columns stored in integer types are more compact. + Builder* enable_store_decimal_as_integer() { + store_decimal_as_integer_ = true; + return this; + } + + /// Disable decimal logical type with 1 <= precision <= 18 to be stored + /// as integer physical type. + /// + /// Default disabled. + Builder* disable_store_decimal_as_integer() { + store_decimal_as_integer_ = false; + return this; + } + + /// Enable writing page index in general for all columns. Default disabled. + /// + /// Writing statistics to the page index disables the old method of writing + /// statistics to each data page header. + /// The page index makes filtering more efficient than the page header, as + /// it gathers all the statistics for a Parquet file in a single place, + /// avoiding scattered I/O. + /// + /// Please check the link below for more details: + /// https://github.com/apache/parquet-format/blob/master/PageIndex.md + Builder* enable_write_page_index() { + default_column_properties_.set_page_index_enabled(true); + return this; + } + + /// Disable writing page index in general for all columns. Default disabled. + Builder* disable_write_page_index() { + default_column_properties_.set_page_index_enabled(false); + return this; + } + + /// Enable writing page index for column specified by `path`. Default disabled. + Builder* enable_write_page_index(const std::string& path) { + page_index_enabled_[path] = true; + return this; + } + + /// Enable writing page index for column specified by `path`. Default disabled. + Builder* enable_write_page_index(const std::shared_ptr& path) { + return this->enable_write_page_index(path->ToDotString()); + } + + /// Disable writing page index for column specified by `path`. Default disabled. + Builder* disable_write_page_index(const std::string& path) { + page_index_enabled_[path] = false; + return this; + } + + /// Disable writing page index for column specified by `path`. Default disabled. + Builder* disable_write_page_index(const std::shared_ptr& path) { + return this->disable_write_page_index(path->ToDotString()); + } + + /// \brief Build the WriterProperties with the builder parameters. + /// \return The WriterProperties defined by the builder. + std::shared_ptr build() { + std::unordered_map column_properties; + auto get = [&](const std::string& key) -> ColumnProperties& { + auto it = column_properties.find(key); + if (it == column_properties.end()) + return column_properties[key] = default_column_properties_; + else + return it->second; + }; + + for (const auto& item : encodings_) get(item.first).set_encoding(item.second); + for (const auto& item : codecs_) get(item.first).set_compression(item.second); + for (const auto& item : codec_options_) + get(item.first).set_codec_options(item.second); + for (const auto& item : dictionary_enabled_) + get(item.first).set_dictionary_enabled(item.second); + for (const auto& item : statistics_enabled_) + get(item.first).set_statistics_enabled(item.second); + for (const auto& item : page_index_enabled_) + get(item.first).set_page_index_enabled(item.second); + + return std::shared_ptr(new WriterProperties( + pool_, dictionary_pagesize_limit_, write_batch_size_, max_row_group_length_, + pagesize_, version_, created_by_, page_checksum_enabled_, + std::move(file_encryption_properties_), default_column_properties_, + column_properties, data_page_version_, store_decimal_as_integer_, + std::move(sorting_columns_))); + } + + private: + MemoryPool* pool_; + int64_t dictionary_pagesize_limit_; + int64_t write_batch_size_; + int64_t max_row_group_length_; + int64_t pagesize_; + ParquetVersion::type version_; + ParquetDataPageVersion data_page_version_; + std::string created_by_; + bool store_decimal_as_integer_; + bool page_checksum_enabled_; + + std::shared_ptr file_encryption_properties_; + + // If empty, there is no sorting columns. + std::vector sorting_columns_; + + // Settings used for each column unless overridden in any of the maps below + ColumnProperties default_column_properties_; + std::unordered_map encodings_; + std::unordered_map codecs_; + std::unordered_map> codec_options_; + std::unordered_map dictionary_enabled_; + std::unordered_map statistics_enabled_; + std::unordered_map page_index_enabled_; + }; + + inline MemoryPool* memory_pool() const { return pool_; } + + inline int64_t dictionary_pagesize_limit() const { return dictionary_pagesize_limit_; } + + inline int64_t write_batch_size() const { return write_batch_size_; } + + inline int64_t max_row_group_length() const { return max_row_group_length_; } + + inline int64_t data_pagesize() const { return pagesize_; } + + inline ParquetDataPageVersion data_page_version() const { + return parquet_data_page_version_; + } + + inline ParquetVersion::type version() const { return parquet_version_; } + + inline std::string created_by() const { return parquet_created_by_; } + + inline bool store_decimal_as_integer() const { return store_decimal_as_integer_; } + + inline bool page_checksum_enabled() const { return page_checksum_enabled_; } + + inline Encoding::type dictionary_index_encoding() const { + if (parquet_version_ == ParquetVersion::PARQUET_1_0) { + return Encoding::PLAIN_DICTIONARY; + } else { + return Encoding::RLE_DICTIONARY; + } + } + + inline Encoding::type dictionary_page_encoding() const { + if (parquet_version_ == ParquetVersion::PARQUET_1_0) { + return Encoding::PLAIN_DICTIONARY; + } else { + return Encoding::PLAIN; + } + } + + const ColumnProperties& column_properties( + const std::shared_ptr& path) const { + auto it = column_properties_.find(path->ToDotString()); + if (it != column_properties_.end()) return it->second; + return default_column_properties_; + } + + Encoding::type encoding(const std::shared_ptr& path) const { + return column_properties(path).encoding(); + } + + Compression::type compression(const std::shared_ptr& path) const { + return column_properties(path).compression(); + } + + int compression_level(const std::shared_ptr& path) const { + return column_properties(path).compression_level(); + } + + const std::shared_ptr codec_options( + const std::shared_ptr& path) const { + return column_properties(path).codec_options(); + } + + bool dictionary_enabled(const std::shared_ptr& path) const { + return column_properties(path).dictionary_enabled(); + } + + const std::vector& sorting_columns() const { return sorting_columns_; } + + bool statistics_enabled(const std::shared_ptr& path) const { + return column_properties(path).statistics_enabled(); + } + + size_t max_statistics_size(const std::shared_ptr& path) const { + return column_properties(path).max_statistics_size(); + } + + bool page_index_enabled(const std::shared_ptr& path) const { + return column_properties(path).page_index_enabled(); + } + + bool page_index_enabled() const { + if (default_column_properties_.page_index_enabled()) { + return true; + } + for (const auto& item : column_properties_) { + if (item.second.page_index_enabled()) { + return true; + } + } + return false; + } + + inline FileEncryptionProperties* file_encryption_properties() const { + return file_encryption_properties_.get(); + } + + std::shared_ptr column_encryption_properties( + const std::string& path) const { + if (file_encryption_properties_) { + return file_encryption_properties_->column_encryption_properties(path); + } else { + return NULLPTR; + } + } + + // \brief Return the default column properties + const ColumnProperties& default_column_properties() const { + return default_column_properties_; + } + + private: + explicit WriterProperties( + MemoryPool* pool, int64_t dictionary_pagesize_limit, int64_t write_batch_size, + int64_t max_row_group_length, int64_t pagesize, ParquetVersion::type version, + const std::string& created_by, bool page_write_checksum_enabled, + std::shared_ptr file_encryption_properties, + const ColumnProperties& default_column_properties, + const std::unordered_map& column_properties, + ParquetDataPageVersion data_page_version, bool store_short_decimal_as_integer, + std::vector sorting_columns) + : pool_(pool), + dictionary_pagesize_limit_(dictionary_pagesize_limit), + write_batch_size_(write_batch_size), + max_row_group_length_(max_row_group_length), + pagesize_(pagesize), + parquet_data_page_version_(data_page_version), + parquet_version_(version), + parquet_created_by_(created_by), + store_decimal_as_integer_(store_short_decimal_as_integer), + page_checksum_enabled_(page_write_checksum_enabled), + file_encryption_properties_(file_encryption_properties), + sorting_columns_(std::move(sorting_columns)), + default_column_properties_(default_column_properties), + column_properties_(column_properties) {} + + MemoryPool* pool_; + int64_t dictionary_pagesize_limit_; + int64_t write_batch_size_; + int64_t max_row_group_length_; + int64_t pagesize_; + ParquetDataPageVersion parquet_data_page_version_; + ParquetVersion::type parquet_version_; + std::string parquet_created_by_; + bool store_decimal_as_integer_; + bool page_checksum_enabled_; + + std::shared_ptr file_encryption_properties_; + + std::vector sorting_columns_; + + ColumnProperties default_column_properties_; + std::unordered_map column_properties_; +}; + +PARQUET_EXPORT const std::shared_ptr& default_writer_properties(); + +// ---------------------------------------------------------------------- +// Properties specific to Apache Arrow columnar read and write + +static constexpr bool kArrowDefaultUseThreads = false; + +// Default number of rows to read when using ::arrow::RecordBatchReader +static constexpr int64_t kArrowDefaultBatchSize = 64 * 1024; + +/// EXPERIMENTAL: Properties for configuring FileReader behavior. +class PARQUET_EXPORT ArrowReaderProperties { + public: + explicit ArrowReaderProperties(bool use_threads = kArrowDefaultUseThreads) + : use_threads_(use_threads), + read_dict_indices_(), + batch_size_(kArrowDefaultBatchSize), + pre_buffer_(true), + cache_options_(::arrow::io::CacheOptions::LazyDefaults()), + coerce_int96_timestamp_unit_(::arrow::TimeUnit::NANO) {} + + /// \brief Set whether to use the IO thread pool to parse columns in parallel. + /// + /// Default is false. + void set_use_threads(bool use_threads) { use_threads_ = use_threads; } + /// Return whether will use multiple threads. + bool use_threads() const { return use_threads_; } + + /// \brief Set whether to read a particular column as dictionary encoded. + /// + /// If the file metadata contains a serialized Arrow schema, then ... + //// + /// This is only supported for columns with a Parquet physical type of + /// BYTE_ARRAY, such as string or binary types. + void set_read_dictionary(int column_index, bool read_dict) { + if (read_dict) { + read_dict_indices_.insert(column_index); + } else { + read_dict_indices_.erase(column_index); + } + } + /// Return whether the column at the index will be read as dictionary. + bool read_dictionary(int column_index) const { + if (read_dict_indices_.find(column_index) != read_dict_indices_.end()) { + return true; + } else { + return false; + } + } + + /// \brief Set the maximum number of rows to read into a record batch. + /// + /// Will only be fewer rows when there are no more rows in the file. + /// Note that some APIs such as ReadTable may ignore this setting. + void set_batch_size(int64_t batch_size) { batch_size_ = batch_size; } + /// Return the batch size in rows. + /// + /// Note that some APIs such as ReadTable may ignore this setting. + int64_t batch_size() const { return batch_size_; } + + /// Enable read coalescing (default false). + /// + /// When enabled, the Arrow reader will pre-buffer necessary regions + /// of the file in-memory. This is intended to improve performance on + /// high-latency filesystems (e.g. Amazon S3). + void set_pre_buffer(bool pre_buffer) { pre_buffer_ = pre_buffer; } + /// Return whether read coalescing is enabled. + bool pre_buffer() const { return pre_buffer_; } + + /// Set options for read coalescing. This can be used to tune the + /// implementation for characteristics of different filesystems. + void set_cache_options(::arrow::io::CacheOptions options) { cache_options_ = options; } + /// Return the options for read coalescing. + const ::arrow::io::CacheOptions& cache_options() const { return cache_options_; } + + /// Set execution context for read coalescing. + void set_io_context(const ::arrow::io::IOContext& ctx) { io_context_ = ctx; } + /// Return the execution context used for read coalescing. + const ::arrow::io::IOContext& io_context() const { return io_context_; } + + /// Set timestamp unit to use for deprecated INT96-encoded timestamps + /// (default is NANO). + void set_coerce_int96_timestamp_unit(::arrow::TimeUnit::type unit) { + coerce_int96_timestamp_unit_ = unit; + } + + ::arrow::TimeUnit::type coerce_int96_timestamp_unit() const { + return coerce_int96_timestamp_unit_; + } + + private: + bool use_threads_; + std::unordered_set read_dict_indices_; + int64_t batch_size_; + bool pre_buffer_; + ::arrow::io::IOContext io_context_; + ::arrow::io::CacheOptions cache_options_; + ::arrow::TimeUnit::type coerce_int96_timestamp_unit_; +}; + +/// EXPERIMENTAL: Constructs the default ArrowReaderProperties +PARQUET_EXPORT +ArrowReaderProperties default_arrow_reader_properties(); + +class PARQUET_EXPORT ArrowWriterProperties { + public: + enum EngineVersion { + V1, // Supports only nested lists. + V2 // Full support for all nesting combinations + }; + class Builder { + public: + Builder() + : write_timestamps_as_int96_(false), + coerce_timestamps_enabled_(false), + coerce_timestamps_unit_(::arrow::TimeUnit::SECOND), + truncated_timestamps_allowed_(false), + store_schema_(false), + compliant_nested_types_(true), + engine_version_(V2), + use_threads_(kArrowDefaultUseThreads), + executor_(NULLPTR) {} + virtual ~Builder() = default; + + /// \brief Disable writing legacy int96 timestamps (default disabled). + Builder* disable_deprecated_int96_timestamps() { + write_timestamps_as_int96_ = false; + return this; + } + + /// \brief Enable writing legacy int96 timestamps (default disabled). + /// + /// May be turned on to write timestamps compatible with older Parquet writers. + /// This takes precedent over coerce_timestamps. + Builder* enable_deprecated_int96_timestamps() { + write_timestamps_as_int96_ = true; + return this; + } + + /// \brief Coerce all timestamps to the specified time unit. + /// \param unit time unit to truncate to. + /// For Parquet versions 1.0 and 2.4, nanoseconds are casted to microseconds. + Builder* coerce_timestamps(::arrow::TimeUnit::type unit) { + coerce_timestamps_enabled_ = true; + coerce_timestamps_unit_ = unit; + return this; + } + + /// \brief Allow loss of data when truncating timestamps. + /// + /// This is disallowed by default and an error will be returned. + Builder* allow_truncated_timestamps() { + truncated_timestamps_allowed_ = true; + return this; + } + + /// \brief Disallow loss of data when truncating timestamps (default). + Builder* disallow_truncated_timestamps() { + truncated_timestamps_allowed_ = false; + return this; + } + + /// \brief EXPERIMENTAL: Write binary serialized Arrow schema to the file, + /// to enable certain read options (like "read_dictionary") to be set + /// automatically + Builder* store_schema() { + store_schema_ = true; + return this; + } + + /// \brief When enabled, will not preserve Arrow field names for list types. + /// + /// Instead of using the field names Arrow uses for the values array of + /// list types (default "item"), will use "element", as is specified in + /// the Parquet spec. + /// + /// This is enabled by default. + Builder* enable_compliant_nested_types() { + compliant_nested_types_ = true; + return this; + } + + /// Preserve Arrow list field name. + Builder* disable_compliant_nested_types() { + compliant_nested_types_ = false; + return this; + } + + /// Set the version of the Parquet writer engine. + Builder* set_engine_version(EngineVersion version) { + engine_version_ = version; + return this; + } + + /// \brief Set whether to use multiple threads to write columns + /// in parallel in the buffered row group mode. + /// + /// WARNING: If writing multiple files in parallel in the same + /// executor, deadlock may occur if use_threads is true. Please + /// disable it in this case. + /// + /// Default is false. + Builder* set_use_threads(bool use_threads) { + use_threads_ = use_threads; + return this; + } + + /// \brief Set the executor to write columns in parallel in the + /// buffered row group mode. + /// + /// Default is nullptr and the default cpu executor will be used. + Builder* set_executor(::arrow::internal::Executor* executor) { + executor_ = executor; + return this; + } + + /// Create the final properties. + std::shared_ptr build() { + return std::shared_ptr(new ArrowWriterProperties( + write_timestamps_as_int96_, coerce_timestamps_enabled_, coerce_timestamps_unit_, + truncated_timestamps_allowed_, store_schema_, compliant_nested_types_, + engine_version_, use_threads_, executor_)); + } + + private: + bool write_timestamps_as_int96_; + + bool coerce_timestamps_enabled_; + ::arrow::TimeUnit::type coerce_timestamps_unit_; + bool truncated_timestamps_allowed_; + + bool store_schema_; + bool compliant_nested_types_; + EngineVersion engine_version_; + + bool use_threads_; + ::arrow::internal::Executor* executor_; + }; + + bool support_deprecated_int96_timestamps() const { return write_timestamps_as_int96_; } + + bool coerce_timestamps_enabled() const { return coerce_timestamps_enabled_; } + ::arrow::TimeUnit::type coerce_timestamps_unit() const { + return coerce_timestamps_unit_; + } + + bool truncated_timestamps_allowed() const { return truncated_timestamps_allowed_; } + + bool store_schema() const { return store_schema_; } + + /// \brief Enable nested type naming according to the parquet specification. + /// + /// Older versions of arrow wrote out field names for nested lists based on the name + /// of the field. According to the parquet specification they should always be + /// "element". + bool compliant_nested_types() const { return compliant_nested_types_; } + + /// \brief The underlying engine version to use when writing Arrow data. + /// + /// V2 is currently the latest V1 is considered deprecated but left in + /// place in case there are bugs detected in V2. + EngineVersion engine_version() const { return engine_version_; } + + /// \brief Returns whether the writer will use multiple threads + /// to write columns in parallel in the buffered row group mode. + bool use_threads() const { return use_threads_; } + + /// \brief Returns the executor used to write columns in parallel. + ::arrow::internal::Executor* executor() const; + + private: + explicit ArrowWriterProperties(bool write_nanos_as_int96, + bool coerce_timestamps_enabled, + ::arrow::TimeUnit::type coerce_timestamps_unit, + bool truncated_timestamps_allowed, bool store_schema, + bool compliant_nested_types, + EngineVersion engine_version, bool use_threads, + ::arrow::internal::Executor* executor) + : write_timestamps_as_int96_(write_nanos_as_int96), + coerce_timestamps_enabled_(coerce_timestamps_enabled), + coerce_timestamps_unit_(coerce_timestamps_unit), + truncated_timestamps_allowed_(truncated_timestamps_allowed), + store_schema_(store_schema), + compliant_nested_types_(compliant_nested_types), + engine_version_(engine_version), + use_threads_(use_threads), + executor_(executor) {} + + const bool write_timestamps_as_int96_; + const bool coerce_timestamps_enabled_; + const ::arrow::TimeUnit::type coerce_timestamps_unit_; + const bool truncated_timestamps_allowed_; + const bool store_schema_; + const bool compliant_nested_types_; + const EngineVersion engine_version_; + const bool use_threads_; + ::arrow::internal::Executor* executor_; +}; + +/// \brief State object used for writing Arrow data directly to a Parquet +/// column chunk. API possibly not stable +struct ArrowWriteContext { + ArrowWriteContext(MemoryPool* memory_pool, ArrowWriterProperties* properties) + : memory_pool(memory_pool), + properties(properties), + data_buffer(AllocateBuffer(memory_pool)), + def_levels_buffer(AllocateBuffer(memory_pool)) {} + + template + ::arrow::Status GetScratchData(const int64_t num_values, T** out) { + ARROW_RETURN_NOT_OK(this->data_buffer->Resize(num_values * sizeof(T), false)); + *out = reinterpret_cast(this->data_buffer->mutable_data()); + return ::arrow::Status::OK(); + } + + MemoryPool* memory_pool; + const ArrowWriterProperties* properties; + + // Buffer used for storing the data of an array converted to the physical type + // as expected by parquet-cpp. + std::shared_ptr data_buffer; + + // We use the shared ownership of this buffer + std::shared_ptr def_levels_buffer; +}; + +PARQUET_EXPORT +std::shared_ptr default_arrow_writer_properties(); + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/schema.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/schema.h new file mode 100644 index 0000000000000000000000000000000000000000..896ec1e47968d5cb8a4f8df8fd097b035075a4f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/schema.h @@ -0,0 +1,492 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module contains the logical parquet-cpp types (independent of Thrift +// structures), schema nodes, and related type tools + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/platform.h" +#include "parquet/types.h" +#include "parquet/windows_fixup.h" // for OPTIONAL + +namespace parquet { + +class SchemaDescriptor; + +namespace schema { + +class Node; + +// List encodings: using the terminology from Impala to define different styles +// of representing logical lists (a.k.a. ARRAY types) in Parquet schemas. Since +// the converted type named in the Parquet metadata is ConvertedType::LIST we +// use that terminology here. It also helps distinguish from the *_ARRAY +// primitive types. +// +// One-level encoding: Only allows required lists with required cells +// repeated value_type name +// +// Two-level encoding: Enables optional lists with only required cells +// group list +// repeated value_type item +// +// Three-level encoding: Enables optional lists with optional cells +// group bag +// repeated group list +// value_type item +// +// 2- and 1-level encoding are respectively equivalent to 3-level encoding with +// the non-repeated nodes set to required. +// +// The "official" encoding recommended in the Parquet spec is the 3-level, and +// we use that as the default when creating list types. For semantic completeness +// we allow the other two. Since all types of encodings will occur "in the +// wild" we need to be able to interpret the associated definition levels in +// the context of the actual encoding used in the file. +// +// NB: Some Parquet writers may not set ConvertedType::LIST on the repeated +// SchemaElement, which could make things challenging if we are trying to infer +// that a sequence of nodes semantically represents an array according to one +// of these encodings (versus a struct containing an array). We should refuse +// the temptation to guess, as they say. +struct ListEncoding { + enum type { ONE_LEVEL, TWO_LEVEL, THREE_LEVEL }; +}; + +class PARQUET_EXPORT ColumnPath { + public: + ColumnPath() : path_() {} + explicit ColumnPath(const std::vector& path) : path_(path) {} + explicit ColumnPath(std::vector&& path) : path_(std::move(path)) {} + + static std::shared_ptr FromDotString(const std::string& dotstring); + static std::shared_ptr FromNode(const Node& node); + + std::shared_ptr extend(const std::string& node_name) const; + std::string ToDotString() const; + const std::vector& ToDotVector() const; + + protected: + std::vector path_; +}; + +// Base class for logical schema types. A type has a name, repetition level, +// and optionally a logical type (ConvertedType in Parquet metadata parlance) +class PARQUET_EXPORT Node { + public: + enum type { PRIMITIVE, GROUP }; + + virtual ~Node() {} + + bool is_primitive() const { return type_ == Node::PRIMITIVE; } + + bool is_group() const { return type_ == Node::GROUP; } + + bool is_optional() const { return repetition_ == Repetition::OPTIONAL; } + + bool is_repeated() const { return repetition_ == Repetition::REPEATED; } + + bool is_required() const { return repetition_ == Repetition::REQUIRED; } + + virtual bool Equals(const Node* other) const = 0; + + const std::string& name() const { return name_; } + + Node::type node_type() const { return type_; } + + Repetition::type repetition() const { return repetition_; } + + ConvertedType::type converted_type() const { return converted_type_; } + + const std::shared_ptr& logical_type() const { return logical_type_; } + + /// \brief The field_id value for the serialized SchemaElement. If the + /// field_id is less than 0 (e.g. -1), it will not be set when serialized to + /// Thrift. + int field_id() const { return field_id_; } + + const Node* parent() const { return parent_; } + + const std::shared_ptr path() const; + + virtual void ToParquet(void* element) const = 0; + + // Node::Visitor abstract class for walking schemas with the visitor pattern + class Visitor { + public: + virtual ~Visitor() {} + + virtual void Visit(Node* node) = 0; + }; + class ConstVisitor { + public: + virtual ~ConstVisitor() {} + + virtual void Visit(const Node* node) = 0; + }; + + virtual void Visit(Visitor* visitor) = 0; + virtual void VisitConst(ConstVisitor* visitor) const = 0; + + protected: + friend class GroupNode; + + Node(Node::type type, const std::string& name, Repetition::type repetition, + ConvertedType::type converted_type = ConvertedType::NONE, int field_id = -1) + : type_(type), + name_(name), + repetition_(repetition), + converted_type_(converted_type), + field_id_(field_id), + parent_(NULLPTR) {} + + Node(Node::type type, const std::string& name, Repetition::type repetition, + std::shared_ptr logical_type, int field_id = -1) + : type_(type), + name_(name), + repetition_(repetition), + logical_type_(std::move(logical_type)), + field_id_(field_id), + parent_(NULLPTR) {} + + Node::type type_; + std::string name_; + Repetition::type repetition_; + ConvertedType::type converted_type_; + std::shared_ptr logical_type_; + int field_id_; + // Nodes should not be shared, they have a single parent. + const Node* parent_; + + bool EqualsInternal(const Node* other) const; + void SetParent(const Node* p_parent); + + private: + PARQUET_DISALLOW_COPY_AND_ASSIGN(Node); +}; + +// Save our breath all over the place with these typedefs +using NodePtr = std::shared_ptr; +using NodeVector = std::vector; + +// A type that is one of the primitive Parquet storage types. In addition to +// the other type metadata (name, repetition level, logical type), also has the +// physical storage type and their type-specific metadata (byte width, decimal +// parameters) +class PARQUET_EXPORT PrimitiveNode : public Node { + public: + static std::unique_ptr FromParquet(const void* opaque_element); + + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + Type::type type, + ConvertedType::type converted_type = ConvertedType::NONE, + int length = -1, int precision = -1, int scale = -1, + int field_id = -1) { + return NodePtr(new PrimitiveNode(name, repetition, type, converted_type, length, + precision, scale, field_id)); + } + + // If no logical type, pass LogicalType::None() or nullptr + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + std::shared_ptr logical_type, + Type::type primitive_type, int primitive_length = -1, + int field_id = -1) { + return NodePtr(new PrimitiveNode(name, repetition, std::move(logical_type), + primitive_type, primitive_length, field_id)); + } + + bool Equals(const Node* other) const override; + + Type::type physical_type() const { return physical_type_; } + + ColumnOrder column_order() const { return column_order_; } + + void SetColumnOrder(ColumnOrder column_order) { column_order_ = column_order; } + + int32_t type_length() const { return type_length_; } + + const DecimalMetadata& decimal_metadata() const { return decimal_metadata_; } + + void ToParquet(void* element) const override; + void Visit(Visitor* visitor) override; + void VisitConst(ConstVisitor* visitor) const override; + + private: + PrimitiveNode(const std::string& name, Repetition::type repetition, Type::type type, + ConvertedType::type converted_type = ConvertedType::NONE, int length = -1, + int precision = -1, int scale = -1, int field_id = -1); + + PrimitiveNode(const std::string& name, Repetition::type repetition, + std::shared_ptr logical_type, + Type::type primitive_type, int primitive_length = -1, int field_id = -1); + + Type::type physical_type_; + int32_t type_length_; + DecimalMetadata decimal_metadata_; + ColumnOrder column_order_; + + // For FIXED_LEN_BYTE_ARRAY + void SetTypeLength(int32_t length) { type_length_ = length; } + + bool EqualsInternal(const PrimitiveNode* other) const; + + FRIEND_TEST(TestPrimitiveNode, Attrs); + FRIEND_TEST(TestPrimitiveNode, Equals); + FRIEND_TEST(TestPrimitiveNode, PhysicalLogicalMapping); + FRIEND_TEST(TestPrimitiveNode, FromParquet); +}; + +class PARQUET_EXPORT GroupNode : public Node { + public: + static std::unique_ptr FromParquet(const void* opaque_element, + NodeVector fields = {}); + + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + const NodeVector& fields, + ConvertedType::type converted_type = ConvertedType::NONE, + int field_id = -1) { + return NodePtr(new GroupNode(name, repetition, fields, converted_type, field_id)); + } + + // If no logical type, pass nullptr + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + const NodeVector& fields, + std::shared_ptr logical_type, + int field_id = -1) { + return NodePtr(new GroupNode(name, repetition, fields, logical_type, field_id)); + } + + bool Equals(const Node* other) const override; + + const NodePtr& field(int i) const { return fields_[i]; } + // Get the index of a field by its name, or negative value if not found. + // If several fields share the same name, it is unspecified which one + // is returned. + int FieldIndex(const std::string& name) const; + // Get the index of a field by its node, or negative value if not found. + int FieldIndex(const Node& node) const; + + int field_count() const { return static_cast(fields_.size()); } + + void ToParquet(void* element) const override; + void Visit(Visitor* visitor) override; + void VisitConst(ConstVisitor* visitor) const override; + + /// \brief Return true if this node or any child node has REPEATED repetition + /// type + bool HasRepeatedFields() const; + + private: + GroupNode(const std::string& name, Repetition::type repetition, + const NodeVector& fields, + ConvertedType::type converted_type = ConvertedType::NONE, int field_id = -1); + + GroupNode(const std::string& name, Repetition::type repetition, + const NodeVector& fields, std::shared_ptr logical_type, + int field_id = -1); + + NodeVector fields_; + bool EqualsInternal(const GroupNode* other) const; + + // Mapping between field name to the field index + std::unordered_multimap field_name_to_idx_; + + FRIEND_TEST(TestGroupNode, Attrs); + FRIEND_TEST(TestGroupNode, Equals); + FRIEND_TEST(TestGroupNode, FieldIndex); + FRIEND_TEST(TestGroupNode, FieldIndexDuplicateName); +}; + +// ---------------------------------------------------------------------- +// Convenience primitive type factory functions + +#define PRIMITIVE_FACTORY(FuncName, TYPE) \ + static inline NodePtr FuncName(const std::string& name, \ + Repetition::type repetition = Repetition::OPTIONAL, \ + int field_id = -1) { \ + return PrimitiveNode::Make(name, repetition, Type::TYPE, ConvertedType::NONE, \ + /*length=*/-1, /*precision=*/-1, /*scale=*/-1, field_id); \ + } + +PRIMITIVE_FACTORY(Boolean, BOOLEAN) +PRIMITIVE_FACTORY(Int32, INT32) +PRIMITIVE_FACTORY(Int64, INT64) +PRIMITIVE_FACTORY(Int96, INT96) +PRIMITIVE_FACTORY(Float, FLOAT) +PRIMITIVE_FACTORY(Double, DOUBLE) +PRIMITIVE_FACTORY(ByteArray, BYTE_ARRAY) + +void PARQUET_EXPORT PrintSchema(const schema::Node* schema, std::ostream& stream, + int indent_width = 2); + +} // namespace schema + +// The ColumnDescriptor encapsulates information necessary to interpret +// primitive column data in the context of a particular schema. We have to +// examine the node structure of a column's path to the root in the schema tree +// to be able to reassemble the nested structure from the repetition and +// definition levels. +class PARQUET_EXPORT ColumnDescriptor { + public: + ColumnDescriptor(schema::NodePtr node, int16_t max_definition_level, + int16_t max_repetition_level, + const SchemaDescriptor* schema_descr = NULLPTR); + + bool Equals(const ColumnDescriptor& other) const; + + int16_t max_definition_level() const { return max_definition_level_; } + + int16_t max_repetition_level() const { return max_repetition_level_; } + + Type::type physical_type() const { return primitive_node_->physical_type(); } + + ConvertedType::type converted_type() const { return primitive_node_->converted_type(); } + + const std::shared_ptr& logical_type() const { + return primitive_node_->logical_type(); + } + + ColumnOrder column_order() const { return primitive_node_->column_order(); } + + SortOrder::type sort_order() const { + auto la = logical_type(); + auto pt = physical_type(); + return la ? GetSortOrder(la, pt) : GetSortOrder(converted_type(), pt); + } + + const std::string& name() const { return primitive_node_->name(); } + + const std::shared_ptr path() const; + + const schema::NodePtr& schema_node() const { return node_; } + + std::string ToString() const; + + int type_length() const; + + int type_precision() const; + + int type_scale() const; + + private: + schema::NodePtr node_; + const schema::PrimitiveNode* primitive_node_; + + int16_t max_definition_level_; + int16_t max_repetition_level_; +}; + +// Container for the converted Parquet schema with a computed information from +// the schema analysis needed for file reading +// +// * Column index to Node +// * Max repetition / definition levels for each primitive node +// +// The ColumnDescriptor objects produced by this class can be used to assist in +// the reconstruction of fully materialized data structures from the +// repetition-definition level encoding of nested data +// +// TODO(wesm): this object can be recomputed from a Schema +class PARQUET_EXPORT SchemaDescriptor { + public: + SchemaDescriptor() {} + ~SchemaDescriptor() {} + + // Analyze the schema + void Init(std::unique_ptr schema); + void Init(schema::NodePtr schema); + + const ColumnDescriptor* Column(int i) const; + + // Get the index of a column by its dotstring path, or negative value if not found. + // If several columns share the same dotstring path, it is unspecified which one + // is returned. + int ColumnIndex(const std::string& node_path) const; + // Get the index of a column by its node, or negative value if not found. + int ColumnIndex(const schema::Node& node) const; + + bool Equals(const SchemaDescriptor& other, std::ostream* diff_output = NULLPTR) const; + + // The number of physical columns appearing in the file + int num_columns() const { return static_cast(leaves_.size()); } + + const schema::NodePtr& schema_root() const { return schema_; } + + const schema::GroupNode* group_node() const { return group_node_; } + + // Returns the root (child of the schema root) node of the leaf(column) node + const schema::Node* GetColumnRoot(int i) const; + + const std::string& name() const { return group_node_->name(); } + + std::string ToString() const; + + void updateColumnOrders(const std::vector& column_orders); + + /// \brief Return column index corresponding to a particular + /// PrimitiveNode. Returns -1 if not found + int GetColumnIndex(const schema::PrimitiveNode& node) const; + + /// \brief Return true if any field or their children have REPEATED repetition + /// type + bool HasRepeatedFields() const; + + private: + friend class ColumnDescriptor; + + // Root Node + schema::NodePtr schema_; + // Root Node + const schema::GroupNode* group_node_; + + void BuildTree(const schema::NodePtr& node, int16_t max_def_level, + int16_t max_rep_level, const schema::NodePtr& base); + + // Result of leaf node / tree analysis + std::vector leaves_; + + std::unordered_map node_to_leaf_index_; + + // Mapping between leaf nodes and root group of leaf (first node + // below the schema's root group) + // + // For example, the leaf `a.b.c.d` would have a link back to `a` + // + // -- a <------ + // -- -- b | + // -- -- -- c | + // -- -- -- -- d + std::unordered_map leaf_to_base_; + + // Mapping between ColumnPath DotString to the leaf index + std::unordered_multimap leaf_to_idx_; +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/statistics.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/statistics.h new file mode 100644 index 0000000000000000000000000000000000000000..0d6ea9898f7ba2f9349b88ad7395aed85da6e8e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/statistics.h @@ -0,0 +1,382 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace arrow { + +class Array; +class BinaryArray; + +} // namespace arrow + +namespace parquet { + +class ColumnDescriptor; + +// ---------------------------------------------------------------------- +// Value comparator interfaces + +/// \brief Base class for value comparators. Generally used with +/// TypedComparator +class PARQUET_EXPORT Comparator { + public: + virtual ~Comparator() {} + + /// \brief Create a comparator explicitly from physical type and + /// sort order + /// \param[in] physical_type the physical type for the typed + /// comparator + /// \param[in] sort_order either SortOrder::SIGNED or + /// SortOrder::UNSIGNED + /// \param[in] type_length for FIXED_LEN_BYTE_ARRAY only + static std::shared_ptr Make(Type::type physical_type, + SortOrder::type sort_order, + int type_length = -1); + + /// \brief Create typed comparator inferring default sort order from + /// ColumnDescriptor + /// \param[in] descr the Parquet column schema + static std::shared_ptr Make(const ColumnDescriptor* descr); +}; + +/// \brief Interface for comparison of physical types according to the +/// semantics of a particular logical type. +template +class TypedComparator : public Comparator { + public: + using T = typename DType::c_type; + + /// \brief Scalar comparison of two elements, return true if first + /// is strictly less than the second + virtual bool Compare(const T& a, const T& b) const = 0; + + /// \brief Compute maximum and minimum elements in a batch of + /// elements without any nulls + virtual std::pair GetMinMax(const T* values, int64_t length) const = 0; + + /// \brief Compute minimum and maximum elements from an Arrow array. Only + /// valid for certain Parquet Type / Arrow Type combinations, like BYTE_ARRAY + /// / arrow::BinaryArray + virtual std::pair GetMinMax(const ::arrow::Array& values) const = 0; + + /// \brief Compute maximum and minimum elements in a batch of + /// elements with accompanying bitmap indicating which elements are + /// included (bit set) and excluded (bit not set) + /// + /// \param[in] values the sequence of values + /// \param[in] length the length of the sequence + /// \param[in] valid_bits a bitmap indicating which elements are + /// included (1) or excluded (0) + /// \param[in] valid_bits_offset the bit offset into the bitmap of + /// the first element in the sequence + virtual std::pair GetMinMaxSpaced(const T* values, int64_t length, + const uint8_t* valid_bits, + int64_t valid_bits_offset) const = 0; +}; + +/// \brief Typed version of Comparator::Make +template +std::shared_ptr> MakeComparator(Type::type physical_type, + SortOrder::type sort_order, + int type_length = -1) { + return std::static_pointer_cast>( + Comparator::Make(physical_type, sort_order, type_length)); +} + +/// \brief Typed version of Comparator::Make +template +std::shared_ptr> MakeComparator(const ColumnDescriptor* descr) { + return std::static_pointer_cast>(Comparator::Make(descr)); +} + +// ---------------------------------------------------------------------- + +/// \brief Structure represented encoded statistics to be written to +/// and read from Parquet serialized metadata. +class PARQUET_EXPORT EncodedStatistics { + std::string max_, min_; + bool is_signed_ = false; + + public: + EncodedStatistics() = default; + + const std::string& max() const { return max_; } + const std::string& min() const { return min_; } + + int64_t null_count = 0; + int64_t distinct_count = 0; + + bool has_min = false; + bool has_max = false; + bool has_null_count = false; + bool has_distinct_count = false; + + // When all values in the statistics are null, it is set to true. + // Otherwise, at least one value is not null, or we are not sure at all. + // Page index requires this information to decide whether a data page + // is a null page or not. + bool all_null_value = false; + + // From parquet-mr + // Don't write stats larger than the max size rather than truncating. The + // rationale is that some engines may use the minimum value in the page as + // the true minimum for aggregations and there is no way to mark that a + // value has been truncated and is a lower bound and not in the page. + void ApplyStatSizeLimits(size_t length) { + if (max_.length() > length) { + has_max = false; + max_.clear(); + } + if (min_.length() > length) { + has_min = false; + min_.clear(); + } + } + + bool is_set() const { + return has_min || has_max || has_null_count || has_distinct_count; + } + + bool is_signed() const { return is_signed_; } + + void set_is_signed(bool is_signed) { is_signed_ = is_signed; } + + EncodedStatistics& set_max(std::string value) { + max_ = std::move(value); + has_max = true; + return *this; + } + + EncodedStatistics& set_min(std::string value) { + min_ = std::move(value); + has_min = true; + return *this; + } + + EncodedStatistics& set_null_count(int64_t value) { + null_count = value; + has_null_count = true; + return *this; + } + + EncodedStatistics& set_distinct_count(int64_t value) { + distinct_count = value; + has_distinct_count = true; + return *this; + } +}; + +/// \brief Base type for computing column statistics while writing a file +class PARQUET_EXPORT Statistics { + public: + virtual ~Statistics() {} + + /// \brief Create a new statistics instance given a column schema + /// definition + /// \param[in] descr the column schema + /// \param[in] pool a memory pool to use for any memory allocations, optional + static std::shared_ptr Make( + const ColumnDescriptor* descr, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + /// \brief Create a new statistics instance given a column schema + /// definition and preexisting state + /// \param[in] descr the column schema + /// \param[in] encoded_min the encoded minimum value + /// \param[in] encoded_max the encoded maximum value + /// \param[in] num_values total number of values + /// \param[in] null_count number of null values + /// \param[in] distinct_count number of distinct values + /// \param[in] has_min_max whether the min/max statistics are set + /// \param[in] has_null_count whether the null_count statistics are set + /// \param[in] has_distinct_count whether the distinct_count statistics are set + /// \param[in] pool a memory pool to use for any memory allocations, optional + static std::shared_ptr Make( + const ColumnDescriptor* descr, const std::string& encoded_min, + const std::string& encoded_max, int64_t num_values, int64_t null_count, + int64_t distinct_count, bool has_min_max, bool has_null_count, + bool has_distinct_count, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + // Helper function to convert EncodedStatistics to Statistics. + // EncodedStatistics does not contain number of non-null values, and it can be + // passed using the num_values parameter. + static std::shared_ptr Make( + const ColumnDescriptor* descr, const EncodedStatistics* encoded_statistics, + int64_t num_values = -1, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + /// \brief Return true if the count of null values is set + virtual bool HasNullCount() const = 0; + + /// \brief The number of null values, may not be set + virtual int64_t null_count() const = 0; + + /// \brief Return true if the count of distinct values is set + virtual bool HasDistinctCount() const = 0; + + /// \brief The number of distinct values, may not be set + virtual int64_t distinct_count() const = 0; + + /// \brief The number of non-null values in the column + virtual int64_t num_values() const = 0; + + /// \brief Return true if the min and max statistics are set. Obtain + /// with TypedStatistics::min and max + virtual bool HasMinMax() const = 0; + + /// \brief Reset state of object to initial (no data observed) state + virtual void Reset() = 0; + + /// \brief Plain-encoded minimum value + virtual std::string EncodeMin() const = 0; + + /// \brief Plain-encoded maximum value + virtual std::string EncodeMax() const = 0; + + /// \brief The finalized encoded form of the statistics for transport + virtual EncodedStatistics Encode() = 0; + + /// \brief The physical type of the column schema + virtual Type::type physical_type() const = 0; + + /// \brief The full type descriptor from the column schema + virtual const ColumnDescriptor* descr() const = 0; + + /// \brief Check two Statistics for equality + virtual bool Equals(const Statistics& other) const = 0; + + protected: + static std::shared_ptr Make(Type::type physical_type, const void* min, + const void* max, int64_t num_values, + int64_t null_count, int64_t distinct_count); +}; + +/// \brief A typed implementation of Statistics +template +class TypedStatistics : public Statistics { + public: + using T = typename DType::c_type; + + /// \brief The current minimum value + virtual const T& min() const = 0; + + /// \brief The current maximum value + virtual const T& max() const = 0; + + /// \brief Update state with state of another Statistics object + virtual void Merge(const TypedStatistics& other) = 0; + + /// \brief Batch statistics update + virtual void Update(const T* values, int64_t num_values, int64_t null_count) = 0; + + /// \brief Batch statistics update with supplied validity bitmap + /// \param[in] values pointer to column values + /// \param[in] valid_bits Pointer to bitmap representing if values are non-null. + /// \param[in] valid_bits_offset Offset offset into valid_bits where the slice of + /// data begins. + /// \param[in] num_spaced_values The length of values in values/valid_bits to inspect + /// when calculating statistics. This can be smaller than + /// num_values+null_count as null_count can include nulls + /// from parents while num_spaced_values does not. + /// \param[in] num_values Number of values that are not null. + /// \param[in] null_count Number of values that are null. + virtual void UpdateSpaced(const T* values, const uint8_t* valid_bits, + int64_t valid_bits_offset, int64_t num_spaced_values, + int64_t num_values, int64_t null_count) = 0; + + /// \brief EXPERIMENTAL: Update statistics with an Arrow array without + /// conversion to a primitive Parquet C type. Only implemented for certain + /// Parquet type / Arrow type combinations like BYTE_ARRAY / + /// arrow::BinaryArray + /// + /// If update_counts is true then the null_count and num_values will be updated + /// based on the null_count of values. Set to false if these are updated + /// elsewhere (e.g. when updating a dictionary where the counts are taken from + /// the indices and not the values) + virtual void Update(const ::arrow::Array& values, bool update_counts = true) = 0; + + /// \brief Set min and max values to particular values + virtual void SetMinMax(const T& min, const T& max) = 0; + + /// \brief Increments the null count directly + /// Use Update to extract the null count from data. Use this if you determine + /// the null count through some other means (e.g. dictionary arrays where the + /// null count is determined from the indices) + virtual void IncrementNullCount(int64_t n) = 0; + + /// \brief Increments the number of values directly + /// The same note on IncrementNullCount applies here + virtual void IncrementNumValues(int64_t n) = 0; +}; + +using BoolStatistics = TypedStatistics; +using Int32Statistics = TypedStatistics; +using Int64Statistics = TypedStatistics; +using FloatStatistics = TypedStatistics; +using DoubleStatistics = TypedStatistics; +using ByteArrayStatistics = TypedStatistics; +using FLBAStatistics = TypedStatistics; + +/// \brief Typed version of Statistics::Make +template +std::shared_ptr> MakeStatistics( + const ColumnDescriptor* descr, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + return std::static_pointer_cast>(Statistics::Make(descr, pool)); +} + +/// \brief Create Statistics initialized to a particular state +/// \param[in] min the minimum value +/// \param[in] max the minimum value +/// \param[in] num_values number of values +/// \param[in] null_count number of null values +/// \param[in] distinct_count number of distinct values +template +std::shared_ptr> MakeStatistics(const typename DType::c_type& min, + const typename DType::c_type& max, + int64_t num_values, + int64_t null_count, + int64_t distinct_count) { + return std::static_pointer_cast>(Statistics::Make( + DType::type_num, &min, &max, num_values, null_count, distinct_count)); +} + +/// \brief Typed version of Statistics::Make +template +std::shared_ptr> MakeStatistics( + const ColumnDescriptor* descr, const std::string& encoded_min, + const std::string& encoded_max, int64_t num_values, int64_t null_count, + int64_t distinct_count, bool has_min_max, bool has_null_count, + bool has_distinct_count, ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + return std::static_pointer_cast>(Statistics::Make( + descr, encoded_min, encoded_max, num_values, null_count, distinct_count, + has_min_max, has_null_count, has_distinct_count, pool)); +} + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/stream_reader.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/stream_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..a7dadac92c89277a104e3acc4149a77258177c8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/stream_reader.h @@ -0,0 +1,303 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/column_reader.h" +#include "parquet/file_reader.h" +#include "parquet/stream_writer.h" + +namespace parquet { + +/// \brief A class for reading Parquet files using an output stream type API. +/// +/// The values given must be of the correct type i.e. the type must +/// match the file schema exactly otherwise a ParquetException will be +/// thrown. +/// +/// The user must explicitly advance to the next row using the +/// EndRow() function or EndRow input manipulator. +/// +/// Required and optional fields are supported: +/// - Required fields are read using operator>>(T) +/// - Optional fields are read with +/// operator>>(std::optional) +/// +/// Note that operator>>(std::optional) can be used to read +/// required fields. +/// +/// Similarly operator>>(T) can be used to read optional fields. +/// However, if the value is not present then a ParquetException will +/// be raised. +/// +/// Currently there is no support for repeated fields. +/// +class PARQUET_EXPORT StreamReader { + public: + template + using optional = ::std::optional; + + // N.B. Default constructed objects are not usable. This + // constructor is provided so that the object may be move + // assigned afterwards. + StreamReader() = default; + + explicit StreamReader(std::unique_ptr reader); + + ~StreamReader() = default; + + bool eof() const { return eof_; } + + int current_column() const { return column_index_; } + + int64_t current_row() const { return current_row_; } + + int num_columns() const; + + int64_t num_rows() const; + + // Moving is possible. + StreamReader(StreamReader&&) = default; + StreamReader& operator=(StreamReader&&) = default; + + // Copying is not allowed. + StreamReader(const StreamReader&) = delete; + StreamReader& operator=(const StreamReader&) = delete; + + StreamReader& operator>>(bool& v); + + StreamReader& operator>>(int8_t& v); + + StreamReader& operator>>(uint8_t& v); + + StreamReader& operator>>(int16_t& v); + + StreamReader& operator>>(uint16_t& v); + + StreamReader& operator>>(int32_t& v); + + StreamReader& operator>>(uint32_t& v); + + StreamReader& operator>>(int64_t& v); + + StreamReader& operator>>(uint64_t& v); + + StreamReader& operator>>(std::chrono::milliseconds& v); + + StreamReader& operator>>(std::chrono::microseconds& v); + + StreamReader& operator>>(float& v); + + StreamReader& operator>>(double& v); + + StreamReader& operator>>(char& v); + + template + StreamReader& operator>>(char (&v)[N]) { + ReadFixedLength(v, N); + return *this; + } + + template + StreamReader& operator>>(std::array& v) { + ReadFixedLength(v.data(), static_cast(N)); + return *this; + } + + // N.B. Cannot allow for reading to a arbitrary char pointer as the + // length cannot be verified. Also it would overshadow the + // char[N] input operator. + // StreamReader& operator>>(char * v); + + StreamReader& operator>>(std::string& v); + + StreamReader& operator>>(::arrow::Decimal128& v); + + // Input operators for optional fields. + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional<::arrow::Decimal128>& v); + + template + StreamReader& operator>>(optional>& v) { + CheckColumn(Type::FIXED_LEN_BYTE_ARRAY, ConvertedType::NONE, N); + FixedLenByteArray flba; + if (ReadOptional(&flba)) { + v = std::array{}; + std::memcpy(v->data(), flba.ptr, N); + } else { + v.reset(); + } + return *this; + } + + /// \brief Terminate current row and advance to next one. + /// \throws ParquetException if all columns in the row were not + /// read or skipped. + void EndRow(); + + /// \brief Skip the data in the next columns. + /// If the number of columns exceeds the columns remaining on the + /// current row then skipping is terminated - it does _not_ continue + /// skipping columns on the next row. + /// Skipping of columns still requires the use 'EndRow' even if all + /// remaining columns were skipped. + /// \return Number of columns actually skipped. + int64_t SkipColumns(int64_t num_columns_to_skip); + + /// \brief Skip the data in the next rows. + /// Skipping of rows is not allowed if reading of data for the + /// current row is not finished. + /// Skipping of rows will be terminated if the end of file is + /// reached. + /// \return Number of rows actually skipped. + int64_t SkipRows(int64_t num_rows_to_skip); + + protected: + [[noreturn]] void ThrowReadFailedException( + const std::shared_ptr& node); + + template + void Read(T* v) { + const auto& node = nodes_[column_index_]; + auto reader = static_cast(column_readers_[column_index_++].get()); + int16_t def_level; + int16_t rep_level; + int64_t values_read; + + reader->ReadBatch(kBatchSizeOne, &def_level, &rep_level, v, &values_read); + + if (values_read != 1) { + ThrowReadFailedException(node); + } + } + + template + void Read(T* v) { + const auto& node = nodes_[column_index_]; + auto reader = static_cast(column_readers_[column_index_++].get()); + int16_t def_level; + int16_t rep_level; + ReadType tmp; + int64_t values_read; + + reader->ReadBatch(kBatchSizeOne, &def_level, &rep_level, &tmp, &values_read); + + if (values_read == 1) { + *v = tmp; + } else { + ThrowReadFailedException(node); + } + } + + template + void ReadOptional(optional* v) { + const auto& node = nodes_[column_index_]; + auto reader = static_cast(column_readers_[column_index_++].get()); + int16_t def_level; + int16_t rep_level; + ReadType tmp; + int64_t values_read; + + reader->ReadBatch(kBatchSizeOne, &def_level, &rep_level, &tmp, &values_read); + + if (values_read == 1) { + *v = T(tmp); + } else if ((values_read == 0) && (def_level == 0)) { + v->reset(); + } else { + ThrowReadFailedException(node); + } + } + + void ReadFixedLength(char* ptr, int len); + + void Read(ByteArray* v); + + void Read(FixedLenByteArray* v); + + bool ReadOptional(ByteArray* v); + + bool ReadOptional(FixedLenByteArray* v); + + void NextRowGroup(); + + void CheckColumn(Type::type physical_type, ConvertedType::type converted_type, + int length = 0); + + void SkipRowsInColumn(ColumnReader* reader, int64_t num_rows_to_skip); + + void SetEof(); + + private: + std::unique_ptr file_reader_; + std::shared_ptr file_metadata_; + std::shared_ptr row_group_reader_; + std::vector> column_readers_; + std::vector> nodes_; + + bool eof_{true}; + int row_group_index_{0}; + int column_index_{0}; + int64_t current_row_{0}; + int64_t row_group_row_offset_{0}; + + static constexpr int64_t kBatchSizeOne = 1; +}; // namespace parquet + +PARQUET_EXPORT +StreamReader& operator>>(StreamReader&, EndRowType); + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/type_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..da0d0f7bdee96c944410ed19d7f9c28fb48bea50 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/type_fwd.h @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace parquet { + +/// \brief Feature selection when writing Parquet files +/// +/// `ParquetVersion::type` governs which data types are allowed and how they +/// are represented. For example, uint32_t data will be written differently +/// depending on this value (as INT64 for PARQUET_1_0, as UINT32 for other +/// versions). +/// +/// However, some features - such as compression algorithms, encryption, +/// or the improved "v2" data page format - must be enabled separately in +/// ArrowWriterProperties. +struct ParquetVersion { + enum type : int { + /// Enable only pre-2.2 Parquet format features when writing + /// + /// This setting is useful for maximum compatibility with legacy readers. + /// Note that logical types may still be emitted, as long they have a + /// corresponding converted type. + PARQUET_1_0, + + /// DEPRECATED: Enable Parquet format 2.6 features + /// + /// This misleadingly named enum value is roughly similar to PARQUET_2_6. + PARQUET_2_0 ARROW_DEPRECATED_ENUM_VALUE("use PARQUET_2_4 or PARQUET_2_6 " + "for fine-grained feature selection"), + + /// Enable Parquet format 2.4 and earlier features when writing + /// + /// This enables UINT32 as well as logical types which don't have + /// a corresponding converted type. + /// + /// Note: Parquet format 2.4.0 was released in October 2017. + PARQUET_2_4, + + /// Enable Parquet format 2.6 and earlier features when writing + /// + /// This enables the NANOS time unit in addition to the PARQUET_2_4 + /// features. + /// + /// Note: Parquet format 2.6.0 was released in September 2018. + PARQUET_2_6, + + /// Enable latest Parquet format 2.x features + /// + /// This value is equal to the greatest 2.x version supported by + /// this library. + PARQUET_2_LATEST = PARQUET_2_6 + }; +}; + +class FileMetaData; +class RowGroupMetaData; + +class ColumnDescriptor; +class SchemaDescriptor; + +class ReaderProperties; +class ArrowReaderProperties; + +class WriterProperties; +class WriterPropertiesBuilder; +class ArrowWriterProperties; +class ArrowWriterPropertiesBuilder; + +namespace arrow { + +class FileWriter; +class FileReader; + +} // namespace arrow +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/types.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/types.h new file mode 100644 index 0000000000000000000000000000000000000000..76dd0efc7cb4aca8e6b0cedb08d1298d2124efb0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/types.h @@ -0,0 +1,812 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/platform.h" +#include "parquet/type_fwd.h" +#include "parquet/windows_fixup.h" // for OPTIONAL + +namespace arrow::util { + +class Codec; + +} // namespace arrow::util + +namespace parquet { + +// ---------------------------------------------------------------------- +// Metadata enums to match Thrift metadata +// +// The reason we maintain our own enums is to avoid transitive dependency on +// the compiled Thrift headers (and thus thrift/Thrift.h) for users of the +// public API. After building parquet-cpp, you should not need to include +// Thrift headers in your application. This means some boilerplate to convert +// between our types and Parquet's Thrift types. +// +// We can also add special values like NONE to distinguish between metadata +// values being set and not set. As an example consider ConvertedType and +// CompressionCodec + +// Mirrors parquet::Type +struct Type { + enum type { + BOOLEAN = 0, + INT32 = 1, + INT64 = 2, + INT96 = 3, + FLOAT = 4, + DOUBLE = 5, + BYTE_ARRAY = 6, + FIXED_LEN_BYTE_ARRAY = 7, + // Should always be last element. + UNDEFINED = 8 + }; +}; + +// Mirrors parquet::ConvertedType +struct ConvertedType { + enum type { + NONE, // Not a real converted type, but means no converted type is specified + UTF8, + MAP, + MAP_KEY_VALUE, + LIST, + ENUM, + DECIMAL, + DATE, + TIME_MILLIS, + TIME_MICROS, + TIMESTAMP_MILLIS, + TIMESTAMP_MICROS, + UINT_8, + UINT_16, + UINT_32, + UINT_64, + INT_8, + INT_16, + INT_32, + INT_64, + JSON, + BSON, + INTERVAL, + // DEPRECATED INVALID ConvertedType for all-null data. + // Only useful for reading legacy files written out by interim Parquet C++ releases. + // For writing, always emit LogicalType::Null instead. + // See PARQUET-1990. + NA = 25, + UNDEFINED = 26 // Not a real converted type; should always be last element + }; +}; + +// forward declaration +namespace format { + +class LogicalType; + +} + +// Mirrors parquet::FieldRepetitionType +struct Repetition { + enum type { REQUIRED = 0, OPTIONAL = 1, REPEATED = 2, /*Always last*/ UNDEFINED = 3 }; +}; + +// Reference: +// parquet-mr/parquet-hadoop/src/main/java/org/apache/parquet/ +// format/converter/ParquetMetadataConverter.java +// Sort order for page and column statistics. Types are associated with sort +// orders (e.g., UTF8 columns should use UNSIGNED) and column stats are +// aggregated using a sort order. As of parquet-format version 2.3.1, the +// order used to aggregate stats is always SIGNED and is not stored in the +// Parquet file. These stats are discarded for types that need unsigned. +// See PARQUET-686. +struct SortOrder { + enum type { SIGNED, UNSIGNED, UNKNOWN }; +}; + +namespace schema { + +struct DecimalMetadata { + bool isset; + int32_t scale; + int32_t precision; +}; + +} // namespace schema + +/// \brief Implementation of parquet.thrift LogicalType types. +class PARQUET_EXPORT LogicalType { + public: + struct Type { + enum type { + UNDEFINED = 0, // Not a real logical type + STRING = 1, + MAP, + LIST, + ENUM, + DECIMAL, + DATE, + TIME, + TIMESTAMP, + INTERVAL, + INT, + NIL, // Thrift NullType: annotates data that is always null + JSON, + BSON, + UUID, + FLOAT16, + NONE // Not a real logical type; should always be last element + }; + }; + + struct TimeUnit { + enum unit { UNKNOWN = 0, MILLIS = 1, MICROS, NANOS }; + }; + + /// \brief If possible, return a logical type equivalent to the given legacy + /// converted type (and decimal metadata if applicable). + static std::shared_ptr FromConvertedType( + const parquet::ConvertedType::type converted_type, + const parquet::schema::DecimalMetadata converted_decimal_metadata = {false, -1, + -1}); + + /// \brief Return the logical type represented by the Thrift intermediary object. + static std::shared_ptr FromThrift( + const parquet::format::LogicalType& thrift_logical_type); + + /// \brief Return the explicitly requested logical type. + static std::shared_ptr String(); + static std::shared_ptr Map(); + static std::shared_ptr List(); + static std::shared_ptr Enum(); + static std::shared_ptr Decimal(int32_t precision, int32_t scale = 0); + static std::shared_ptr Date(); + static std::shared_ptr Time(bool is_adjusted_to_utc, + LogicalType::TimeUnit::unit time_unit); + + /// \brief Create a Timestamp logical type + /// \param[in] is_adjusted_to_utc set true if the data is UTC-normalized + /// \param[in] time_unit the resolution of the timestamp + /// \param[in] is_from_converted_type if true, the timestamp was generated + /// by translating a legacy converted type of TIMESTAMP_MILLIS or + /// TIMESTAMP_MICROS. Default is false. + /// \param[in] force_set_converted_type if true, always set the + /// legacy ConvertedType TIMESTAMP_MICROS and TIMESTAMP_MILLIS + /// metadata. Default is false + static std::shared_ptr Timestamp( + bool is_adjusted_to_utc, LogicalType::TimeUnit::unit time_unit, + bool is_from_converted_type = false, bool force_set_converted_type = false); + + static std::shared_ptr Interval(); + static std::shared_ptr Int(int bit_width, bool is_signed); + + /// \brief Create a logical type for data that's always null + /// + /// Any physical type can be annotated with this logical type. + static std::shared_ptr Null(); + + static std::shared_ptr JSON(); + static std::shared_ptr BSON(); + static std::shared_ptr UUID(); + static std::shared_ptr Float16(); + + /// \brief Create a placeholder for when no logical type is specified + static std::shared_ptr None(); + + /// \brief Return true if this logical type is consistent with the given underlying + /// physical type. + bool is_applicable(parquet::Type::type primitive_type, + int32_t primitive_length = -1) const; + + /// \brief Return true if this logical type is equivalent to the given legacy converted + /// type (and decimal metadata if applicable). + bool is_compatible(parquet::ConvertedType::type converted_type, + parquet::schema::DecimalMetadata converted_decimal_metadata = { + false, -1, -1}) const; + + /// \brief If possible, return the legacy converted type (and decimal metadata if + /// applicable) equivalent to this logical type. + parquet::ConvertedType::type ToConvertedType( + parquet::schema::DecimalMetadata* out_decimal_metadata) const; + + /// \brief Return a printable representation of this logical type. + std::string ToString() const; + + /// \brief Return a JSON representation of this logical type. + std::string ToJSON() const; + + /// \brief Return a serializable Thrift object for this logical type. + parquet::format::LogicalType ToThrift() const; + + /// \brief Return true if the given logical type is equivalent to this logical type. + bool Equals(const LogicalType& other) const; + + /// \brief Return the enumerated type of this logical type. + LogicalType::Type::type type() const; + + /// \brief Return the appropriate sort order for this logical type. + SortOrder::type sort_order() const; + + // Type checks ... + bool is_string() const; + bool is_map() const; + bool is_list() const; + bool is_enum() const; + bool is_decimal() const; + bool is_date() const; + bool is_time() const; + bool is_timestamp() const; + bool is_interval() const; + bool is_int() const; + bool is_null() const; + bool is_JSON() const; + bool is_BSON() const; + bool is_UUID() const; + bool is_float16() const; + bool is_none() const; + /// \brief Return true if this logical type is of a known type. + bool is_valid() const; + bool is_invalid() const; + /// \brief Return true if this logical type is suitable for a schema GroupNode. + bool is_nested() const; + bool is_nonnested() const; + /// \brief Return true if this logical type is included in the Thrift output for its + /// node. + bool is_serialized() const; + + LogicalType(const LogicalType&) = delete; + LogicalType& operator=(const LogicalType&) = delete; + virtual ~LogicalType() noexcept; + + protected: + LogicalType(); + + class Impl; + std::unique_ptr impl_; +}; + +/// \brief Allowed for physical type BYTE_ARRAY, must be encoded as UTF-8. +class PARQUET_EXPORT StringLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + StringLogicalType() = default; +}; + +/// \brief Allowed for group nodes only. +class PARQUET_EXPORT MapLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + MapLogicalType() = default; +}; + +/// \brief Allowed for group nodes only. +class PARQUET_EXPORT ListLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + ListLogicalType() = default; +}; + +/// \brief Allowed for physical type BYTE_ARRAY, must be encoded as UTF-8. +class PARQUET_EXPORT EnumLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + EnumLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32, INT64, FIXED_LEN_BYTE_ARRAY, or BYTE_ARRAY, +/// depending on the precision. +class PARQUET_EXPORT DecimalLogicalType : public LogicalType { + public: + static std::shared_ptr Make(int32_t precision, int32_t scale = 0); + int32_t precision() const; + int32_t scale() const; + + private: + DecimalLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32. +class PARQUET_EXPORT DateLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + DateLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32 (for MILLIS) or INT64 (for MICROS and NANOS). +class PARQUET_EXPORT TimeLogicalType : public LogicalType { + public: + static std::shared_ptr Make(bool is_adjusted_to_utc, + LogicalType::TimeUnit::unit time_unit); + bool is_adjusted_to_utc() const; + LogicalType::TimeUnit::unit time_unit() const; + + private: + TimeLogicalType() = default; +}; + +/// \brief Allowed for physical type INT64. +class PARQUET_EXPORT TimestampLogicalType : public LogicalType { + public: + static std::shared_ptr Make(bool is_adjusted_to_utc, + LogicalType::TimeUnit::unit time_unit, + bool is_from_converted_type = false, + bool force_set_converted_type = false); + bool is_adjusted_to_utc() const; + LogicalType::TimeUnit::unit time_unit() const; + + /// \brief If true, will not set LogicalType in Thrift metadata + bool is_from_converted_type() const; + + /// \brief If true, will set ConvertedType for micros and millis + /// resolution in legacy ConvertedType Thrift metadata + bool force_set_converted_type() const; + + private: + TimestampLogicalType() = default; +}; + +/// \brief Allowed for physical type FIXED_LEN_BYTE_ARRAY with length 12 +class PARQUET_EXPORT IntervalLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + IntervalLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32 (for bit widths 8, 16, and 32) and INT64 +/// (for bit width 64). +class PARQUET_EXPORT IntLogicalType : public LogicalType { + public: + static std::shared_ptr Make(int bit_width, bool is_signed); + int bit_width() const; + bool is_signed() const; + + private: + IntLogicalType() = default; +}; + +/// \brief Allowed for any physical type. +class PARQUET_EXPORT NullLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + NullLogicalType() = default; +}; + +/// \brief Allowed for physical type BYTE_ARRAY. +class PARQUET_EXPORT JSONLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + JSONLogicalType() = default; +}; + +/// \brief Allowed for physical type BYTE_ARRAY. +class PARQUET_EXPORT BSONLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + BSONLogicalType() = default; +}; + +/// \brief Allowed for physical type FIXED_LEN_BYTE_ARRAY with length 16, +/// must encode raw UUID bytes. +class PARQUET_EXPORT UUIDLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + UUIDLogicalType() = default; +}; + +/// \brief Allowed for physical type FIXED_LEN_BYTE_ARRAY with length 2, +/// must encode raw FLOAT16 bytes. +class PARQUET_EXPORT Float16LogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + Float16LogicalType() = default; +}; + +/// \brief Allowed for any physical type. +class PARQUET_EXPORT NoLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + NoLogicalType() = default; +}; + +// Internal API, for unrecognized logical types +class PARQUET_EXPORT UndefinedLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + UndefinedLogicalType() = default; +}; + +// Data encodings. Mirrors parquet::Encoding +struct Encoding { + enum type { + PLAIN = 0, + PLAIN_DICTIONARY = 2, + RLE = 3, + BIT_PACKED = 4, + DELTA_BINARY_PACKED = 5, + DELTA_LENGTH_BYTE_ARRAY = 6, + DELTA_BYTE_ARRAY = 7, + RLE_DICTIONARY = 8, + BYTE_STREAM_SPLIT = 9, + // Should always be last element (except UNKNOWN) + UNDEFINED = 10, + UNKNOWN = 999 + }; +}; + +// Exposed data encodings. It is the encoding of the data read from the file, +// rather than the encoding of the data in the file. E.g., the data encoded as +// RLE_DICTIONARY in the file can be read as dictionary indices by RLE +// decoding, in which case the data read from the file is DICTIONARY encoded. +enum class ExposedEncoding { + NO_ENCODING = 0, // data is not encoded, i.e. already decoded during reading + DICTIONARY = 1 +}; + +/// \brief Return true if Parquet supports indicated compression type +PARQUET_EXPORT +bool IsCodecSupported(Compression::type codec); + +PARQUET_EXPORT +std::unique_ptr GetCodec(Compression::type codec); + +PARQUET_EXPORT +std::unique_ptr GetCodec(Compression::type codec, + const CodecOptions& codec_options); + +PARQUET_EXPORT +std::unique_ptr GetCodec(Compression::type codec, int compression_level); + +struct ParquetCipher { + enum type { AES_GCM_V1 = 0, AES_GCM_CTR_V1 = 1 }; +}; + +struct AadMetadata { + std::string aad_prefix; + std::string aad_file_unique; + bool supply_aad_prefix; +}; + +struct EncryptionAlgorithm { + ParquetCipher::type algorithm; + AadMetadata aad; +}; + +// parquet::PageType +struct PageType { + enum type { + DATA_PAGE, + INDEX_PAGE, + DICTIONARY_PAGE, + DATA_PAGE_V2, + // Should always be last element + UNDEFINED + }; +}; + +bool PageCanUseChecksum(PageType::type pageType); + +class ColumnOrder { + public: + enum type { UNDEFINED, TYPE_DEFINED_ORDER }; + explicit ColumnOrder(ColumnOrder::type column_order) : column_order_(column_order) {} + // Default to Type Defined Order + ColumnOrder() : column_order_(type::TYPE_DEFINED_ORDER) {} + ColumnOrder::type get_order() { return column_order_; } + + static ColumnOrder undefined_; + static ColumnOrder type_defined_; + + private: + ColumnOrder::type column_order_; +}; + +/// \brief BoundaryOrder is a proxy around format::BoundaryOrder. +struct BoundaryOrder { + enum type { + Unordered = 0, + Ascending = 1, + Descending = 2, + // Should always be last element + UNDEFINED = 3 + }; +}; + +/// \brief SortingColumn is a proxy around format::SortingColumn. +struct PARQUET_EXPORT SortingColumn { + // The column index (in this row group) + int32_t column_idx; + + // If true, indicates this column is sorted in descending order. + bool descending; + + // If true, nulls will come before non-null values, otherwise, nulls go at the end. + bool nulls_first; +}; + +inline bool operator==(const SortingColumn& left, const SortingColumn& right) { + return left.nulls_first == right.nulls_first && left.descending == right.descending && + left.column_idx == right.column_idx; +} + +inline bool operator!=(const SortingColumn& left, const SortingColumn& right) { + return !(left == right); +} + +// ---------------------------------------------------------------------- + +struct ByteArray { + ByteArray() : len(0), ptr(NULLPTR) {} + ByteArray(uint32_t len, const uint8_t* ptr) : len(len), ptr(ptr) {} + + ByteArray(::std::string_view view) // NOLINT implicit conversion + : ByteArray(static_cast(view.size()), + reinterpret_cast(view.data())) {} + + explicit operator std::string_view() const { + return std::string_view{reinterpret_cast(ptr), len}; + } + + uint32_t len; + const uint8_t* ptr; +}; + +inline bool operator==(const ByteArray& left, const ByteArray& right) { + return left.len == right.len && + (left.len == 0 || std::memcmp(left.ptr, right.ptr, left.len) == 0); +} + +inline bool operator!=(const ByteArray& left, const ByteArray& right) { + return !(left == right); +} + +struct FixedLenByteArray { + FixedLenByteArray() : ptr(NULLPTR) {} + explicit FixedLenByteArray(const uint8_t* ptr) : ptr(ptr) {} + const uint8_t* ptr; +}; + +using FLBA = FixedLenByteArray; + +// Julian day at unix epoch. +// +// The Julian Day Number (JDN) is the integer assigned to a whole solar day in +// the Julian day count starting from noon Universal time, with Julian day +// number 0 assigned to the day starting at noon on Monday, January 1, 4713 BC, +// proleptic Julian calendar (November 24, 4714 BC, in the proleptic Gregorian +// calendar), +constexpr int64_t kJulianToUnixEpochDays = INT64_C(2440588); +constexpr int64_t kSecondsPerDay = INT64_C(60 * 60 * 24); +constexpr int64_t kMillisecondsPerDay = kSecondsPerDay * INT64_C(1000); +constexpr int64_t kMicrosecondsPerDay = kMillisecondsPerDay * INT64_C(1000); +constexpr int64_t kNanosecondsPerDay = kMicrosecondsPerDay * INT64_C(1000); + +MANUALLY_ALIGNED_STRUCT(1) Int96 { uint32_t value[3]; }; +STRUCT_END(Int96, 12); + +inline bool operator==(const Int96& left, const Int96& right) { + return std::equal(left.value, left.value + 3, right.value); +} + +inline bool operator!=(const Int96& left, const Int96& right) { return !(left == right); } + +static inline std::string ByteArrayToString(const ByteArray& a) { + return std::string(reinterpret_cast(a.ptr), a.len); +} + +static inline void Int96SetNanoSeconds(parquet::Int96& i96, int64_t nanoseconds) { + std::memcpy(&i96.value, &nanoseconds, sizeof(nanoseconds)); +} + +struct DecodedInt96 { + uint64_t days_since_epoch; + uint64_t nanoseconds; +}; + +static inline DecodedInt96 DecodeInt96Timestamp(const parquet::Int96& i96) { + // We do the computations in the unsigned domain to avoid unsigned behaviour + // on overflow. + DecodedInt96 result; + result.days_since_epoch = i96.value[2] - static_cast(kJulianToUnixEpochDays); + result.nanoseconds = 0; + + memcpy(&result.nanoseconds, &i96.value, sizeof(uint64_t)); + return result; +} + +static inline int64_t Int96GetNanoSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + return static_cast(decoded.days_since_epoch * kNanosecondsPerDay + + decoded.nanoseconds); +} + +static inline int64_t Int96GetMicroSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + uint64_t microseconds = decoded.nanoseconds / static_cast(1000); + return static_cast(decoded.days_since_epoch * kMicrosecondsPerDay + + microseconds); +} + +static inline int64_t Int96GetMilliSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + uint64_t milliseconds = decoded.nanoseconds / static_cast(1000000); + return static_cast(decoded.days_since_epoch * kMillisecondsPerDay + + milliseconds); +} + +static inline int64_t Int96GetSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + uint64_t seconds = decoded.nanoseconds / static_cast(1000000000); + return static_cast(decoded.days_since_epoch * kSecondsPerDay + seconds); +} + +static inline std::string Int96ToString(const Int96& a) { + std::ostringstream result; + std::copy(a.value, a.value + 3, std::ostream_iterator(result, " ")); + return result.str(); +} + +static inline std::string FixedLenByteArrayToString(const FixedLenByteArray& a, int len) { + std::ostringstream result; + std::copy(a.ptr, a.ptr + len, std::ostream_iterator(result, " ")); + return result.str(); +} + +template +struct type_traits {}; + +template <> +struct type_traits { + using value_type = bool; + + static constexpr int value_byte_size = 1; + static constexpr const char* printf_code = "d"; +}; + +template <> +struct type_traits { + using value_type = int32_t; + + static constexpr int value_byte_size = 4; + static constexpr const char* printf_code = "d"; +}; + +template <> +struct type_traits { + using value_type = int64_t; + + static constexpr int value_byte_size = 8; + static constexpr const char* printf_code = + (sizeof(long) == 64) ? "ld" : "lld"; // NOLINT: runtime/int +}; + +template <> +struct type_traits { + using value_type = Int96; + + static constexpr int value_byte_size = 12; + static constexpr const char* printf_code = "s"; +}; + +template <> +struct type_traits { + using value_type = float; + + static constexpr int value_byte_size = 4; + static constexpr const char* printf_code = "f"; +}; + +template <> +struct type_traits { + using value_type = double; + + static constexpr int value_byte_size = 8; + static constexpr const char* printf_code = "lf"; +}; + +template <> +struct type_traits { + using value_type = ByteArray; + + static constexpr int value_byte_size = sizeof(ByteArray); + static constexpr const char* printf_code = "s"; +}; + +template <> +struct type_traits { + using value_type = FixedLenByteArray; + + static constexpr int value_byte_size = sizeof(FixedLenByteArray); + static constexpr const char* printf_code = "s"; +}; + +template +struct PhysicalType { + using c_type = typename type_traits::value_type; + static constexpr Type::type type_num = TYPE; +}; + +using BooleanType = PhysicalType; +using Int32Type = PhysicalType; +using Int64Type = PhysicalType; +using Int96Type = PhysicalType; +using FloatType = PhysicalType; +using DoubleType = PhysicalType; +using ByteArrayType = PhysicalType; +using FLBAType = PhysicalType; + +template +inline std::string format_fwf(int width) { + std::stringstream ss; + ss << "%-" << width << type_traits::printf_code; + return ss.str(); +} + +PARQUET_EXPORT std::string EncodingToString(Encoding::type t); + +PARQUET_EXPORT std::string ConvertedTypeToString(ConvertedType::type t); + +PARQUET_EXPORT std::string TypeToString(Type::type t); + +PARQUET_EXPORT std::string FormatStatValue(Type::type parquet_type, + ::std::string_view val); + +PARQUET_EXPORT int GetTypeByteSize(Type::type t); + +PARQUET_EXPORT SortOrder::type DefaultSortOrder(Type::type primitive); + +PARQUET_EXPORT SortOrder::type GetSortOrder(ConvertedType::type converted, + Type::type primitive); + +PARQUET_EXPORT SortOrder::type GetSortOrder( + const std::shared_ptr& logical_type, Type::type primitive); + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/windows_fixup.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/windows_fixup.h new file mode 100644 index 0000000000000000000000000000000000000000..ce44480c5732e760841c2c29b0bede4159b6e33e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/windows_fixup.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This header needs to be included multiple times. + +#include "arrow/util/windows_fixup.h" + +#ifdef _WIN32 + +// parquet.thrift's OPTIONAL RepetitionType conflicts with a Windows #define +#ifdef OPTIONAL +#undef OPTIONAL +#endif + +#endif // _WIN32 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/xxhasher.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/xxhasher.h new file mode 100644 index 0000000000000000000000000000000000000000..a54f287883e006e9cd6d9aeeb2efeb1d6f9db2df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/xxhasher.h @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "parquet/hasher.h" +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace parquet { + +class PARQUET_EXPORT XxHasher : public Hasher { + public: + uint64_t Hash(int32_t value) const override; + uint64_t Hash(int64_t value) const override; + uint64_t Hash(float value) const override; + uint64_t Hash(double value) const override; + uint64_t Hash(const Int96* value) const override; + uint64_t Hash(const ByteArray* value) const override; + uint64_t Hash(const FLBA* val, uint32_t len) const override; + + void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const override; + void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const override; + void Hashes(const float* values, int num_values, uint64_t* hashes) const override; + void Hashes(const double* values, int num_values, uint64_t* hashes) const override; + void Hashes(const Int96* values, int num_values, uint64_t* hashes) const override; + void Hashes(const ByteArray* values, int num_values, uint64_t* hashes) const override; + void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const override; + + static constexpr int kParquetBloomXxHashSeed = 0; +}; + +} // namespace parquet diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1500 b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1500 new file mode 100644 index 0000000000000000000000000000000000000000..43f535a0a3c80a924a751c8380b127ac44e9c666 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1500 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c0dbb2d92e77d7f78f1fd8859e5af2f1405e89f49c68cee356b9865669068bd +size 19347672 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.cc b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.cc new file mode 100644 index 0000000000000000000000000000000000000000..1df3a94cef225f44de87c241bddded527f66804f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.cc @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "csv.h" + +#include + +#include "arrow/python/common.h" + +namespace arrow { + +using csv::InvalidRow; +using csv::InvalidRowHandler; +using csv::InvalidRowResult; + +namespace py { +namespace csv { + +InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback cb, PyObject* py_handler) { + if (cb == nullptr) { + return InvalidRowHandler{}; + } + + struct Handler { + PyInvalidRowCallback cb; + std::shared_ptr handler_ref; + + InvalidRowResult operator()(const InvalidRow& invalid_row) { + InvalidRowResult result; + auto st = SafeCallIntoPython([&]() -> Status { + result = cb(handler_ref->obj(), invalid_row); + if (PyErr_Occurred()) { + PyErr_WriteUnraisable(handler_ref->obj()); + } + return Status::OK(); + }); + ARROW_UNUSED(st); + return result; + } + }; + + Py_INCREF(py_handler); + return Handler{cb, std::make_shared(py_handler)}; +} + +} // namespace csv +} // namespace py +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..1187037aed29e2cc5910e156c260fc9d9d81bff5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" + +namespace arrow { + +class Decimal128; +class Decimal256; + +namespace py { + +class OwnedRef; + +// +// Python Decimal support +// + +namespace internal { + +// \brief Import the Python Decimal type +ARROW_PYTHON_EXPORT +Status ImportDecimalType(OwnedRef* decimal_type); + +// \brief Convert a Python Decimal object to a C++ string +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[out] The string representation of the Python Decimal instance +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status PythonDecimalToString(PyObject* python_decimal, std::string* out); + +// \brief Convert a C++ std::string to a Python Decimal instance +// \param[in] decimal_constructor The decimal type object +// \param[in] decimal_string A decimal string +// \return An instance of decimal.Decimal +ARROW_PYTHON_EXPORT +PyObject* DecimalFromString(PyObject* decimal_constructor, + const std::string& decimal_string); + +// \brief Convert a Python decimal to an Arrow Decimal128 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal128* out); + +// \brief Convert a Python object to an Arrow Decimal128 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal128* out); + +// \brief Convert a Python decimal to an Arrow Decimal256 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal256* out); + +// \brief Convert a Python object to an Arrow Decimal256 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal256* out); + +// \brief Check whether obj is an instance of Decimal +ARROW_PYTHON_EXPORT +bool PyDecimal_Check(PyObject* obj); + +// \brief Check whether obj is nan. This function will abort the program if the argument +// is not a Decimal instance +ARROW_PYTHON_EXPORT +bool PyDecimal_ISNAN(PyObject* obj); + +// \brief Helper class to track and update the precision and scale of a decimal +class ARROW_PYTHON_EXPORT DecimalMetadata { + public: + DecimalMetadata(); + DecimalMetadata(int32_t precision, int32_t scale); + + // \brief Adjust the precision and scale of a decimal type given a new precision and a + // new scale \param[in] suggested_precision A candidate precision \param[in] + // suggested_scale A candidate scale \return The status of the operation + Status Update(int32_t suggested_precision, int32_t suggested_scale); + + // \brief A convenient interface for updating the precision and scale based on a Python + // Decimal object \param object A Python Decimal object \return The status of the + // operation + Status Update(PyObject* object); + + int32_t precision() const { return precision_; } + int32_t scale() const { return scale_; } + + private: + int32_t precision_; + int32_t scale_; +}; + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.h new file mode 100644 index 0000000000000000000000000000000000000000..003fd5cb80551b4e54225327d56acff7172a754d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.h @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace py { +namespace fs { + +class ARROW_PYTHON_EXPORT PyFileSystemVtable { + public: + std::function get_type_name; + std::function equals; + + std::function + get_file_info; + std::function& paths, + std::vector* out)> + get_file_info_vector; + std::function* out)> + get_file_info_selector; + + std::function create_dir; + std::function delete_dir; + std::function delete_dir_contents; + std::function delete_root_dir_contents; + std::function delete_file; + std::function move; + std::function + copy_file; + + std::function* out)> + open_input_stream; + std::function* out)> + open_input_file; + std::function&, + std::shared_ptr* out)> + open_output_stream; + std::function&, + std::shared_ptr* out)> + open_append_stream; + + std::function + normalize_path; +}; + +class ARROW_PYTHON_EXPORT PyFileSystem : public arrow::fs::FileSystem { + public: + PyFileSystem(PyObject* handler, PyFileSystemVtable vtable); + ~PyFileSystem() override; + + static std::shared_ptr Make(PyObject* handler, PyFileSystemVtable vtable); + + std::string type_name() const override; + + bool Equals(const FileSystem& other) const override; + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo( + const std::vector& paths) override; + Result> GetFileInfo( + const arrow::fs::FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + Result NormalizePath(std::string path) override; + + PyObject* handler() const { return handler_.obj(); } + + private: + OwnedRefNoGIL handler_; + PyFileSystemVtable vtable_; +}; + +} // namespace fs +} // namespace py +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.h new file mode 100644 index 0000000000000000000000000000000000000000..1ddcbb51f6e0b70c1b16dc9a9ce6caf79fb2369e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace gdb { + +ARROW_PYTHON_EXPORT +void TestSession(); + +} // namespace gdb +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/platform.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..e71c7ac85399e4e3f7c93d4814fd7fdad774dc13 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/platform.h @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +// If PY_SSIZE_T_CLEAN is defined, argument parsing functions treat #-specifier +// to mean Py_ssize_t (defining this to suppress deprecation warning) +#define PY_SSIZE_T_CLEAN + +#include // IWYU pragma: export +#include + +// Work around C2528 error +#ifdef _MSC_VER +#if _MSC_VER >= 1900 +#undef timezone +#endif + +// https://bugs.python.org/issue36020 +// TODO(wjones127): Can remove once we drop support for CPython 3.9 +#ifdef snprintf +#undef snprintf +#endif +#endif