diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h new file mode 100644 index 0000000000000000000000000000000000000000..6411aebf804426cce5916410124f5f64a3865f94 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_base.h @@ -0,0 +1,301 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" +#include "arrow/visitor.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// User array accessor types + +/// \brief Array base type +/// Immutable data array with some logical type and some length. +/// +/// Any memory is owned by the respective Buffer instance (or its parents). +/// +/// The base class is only required to have a null bitmap buffer if the null +/// count is greater than 0 +/// +/// If known, the null count can be provided in the base Array constructor. If +/// the null count is not known, pass -1 to indicate that the null count is to +/// be computed on the first call to null_count() +class ARROW_EXPORT Array { + public: + virtual ~Array() = default; + + /// \brief Return true if value at index is null. Does not boundscheck + bool IsNull(int64_t i) const { return !IsValid(i); } + + /// \brief Return true if value at index is valid (not null). Does not + /// boundscheck + bool IsValid(int64_t i) const { + if (null_bitmap_data_ != NULLPTR) { + return bit_util::GetBit(null_bitmap_data_, i + data_->offset); + } + // Dispatching with a few conditionals like this makes IsNull more + // efficient for how it is used in practice. Making IsNull virtual + // would add a vtable lookup to every call and prevent inlining + + // a potential inner-branch removal. + if (type_id() == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*data_, i); + } + if (type_id() == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*data_, i); + } + if (type_id() == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*data_, i); + } + return data_->null_count != data_->length; + } + + /// \brief Return a Scalar containing the value of this array at i + Result> GetScalar(int64_t i) const; + + /// Size in the number of elements this array contains. + int64_t length() const { return data_->length; } + + /// A relative position into another array's data, to enable zero-copy + /// slicing. This value defaults to zero + int64_t offset() const { return data_->offset; } + + /// The number of null entries in the array. If the null count was not known + /// at time of construction (and set to a negative value), then the null + /// count will be computed and cached on the first invocation of this + /// function + int64_t null_count() const; + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// null_count(). For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + const std::shared_ptr& type() const { return data_->type; } + Type::type type_id() const { return data_->type->id(); } + + /// Buffer for the validity (null) bitmap, if any. Note that Union types + /// never have a null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const std::shared_ptr& null_bitmap() const { return data_->buffers[0]; } + + /// Raw pointer to the null bitmap. + /// + /// Note that for `null_count == 0` or for null type, this will be null. + /// This buffer does not account for any slice offset + const uint8_t* null_bitmap_data() const { return null_bitmap_data_; } + + /// Equality comparison with another array + bool Equals(const Array& arr, const EqualOptions& = EqualOptions::Defaults()) const; + bool Equals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Return the formatted unified diff of arrow::Diff between this + /// Array and another Array + std::string Diff(const Array& other) const; + + /// Approximate equality comparison with another array + /// + /// epsilon is only used if this is FloatArray or DoubleArray + bool ApproxEquals(const std::shared_ptr& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + bool ApproxEquals(const Array& arr, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// Compare if the range of slots specified are equal for the given array and + /// this array. end_idx exclusive. This methods does not bounds check. + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const Array& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(int64_t start_idx, int64_t end_idx, int64_t other_start_idx, + const std::shared_ptr& other, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const Array& other, int64_t start_idx, int64_t end_idx, + int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + bool RangeEquals(const std::shared_ptr& other, int64_t start_idx, + int64_t end_idx, int64_t other_start_idx, + const EqualOptions& = EqualOptions::Defaults()) const; + + /// \brief Apply the ArrayVisitor::Visit() method specialized to the array type + Status Accept(ArrayVisitor* visitor) const; + + /// Construct a zero-copy view of this array with the given type. + /// + /// This method checks if the types are layout-compatible. + /// Nested types are traversed in depth-first order. Data buffers must have + /// the same item sizes, even though the logical types may be different. + /// An error is returned if the types are not layout-compatible. + Result> View(const std::shared_ptr& type) const; + + /// \brief Construct a copy of the array with all buffers on destination + /// Memory Manager + /// + /// This method recursively copies the array's buffers and those of its children + /// onto the destination MemoryManager device and returns the new Array. + Result> CopyTo(const std::shared_ptr& to) const; + + /// \brief Construct a new array attempting to zero-copy view if possible. + /// + /// Like CopyTo this method recursively goes through all of the array's buffers + /// and those of it's children and first attempts to create zero-copy + /// views on the destination MemoryManager device. If it can't, it falls back + /// to performing a copy. See Buffer::ViewOrCopy. + Result> ViewOrCopyTo( + const std::shared_ptr& to) const; + + /// Construct a zero-copy slice of the array with the indicated offset and + /// length + /// + /// \param[in] offset the position of the first element in the constructed + /// slice + /// \param[in] length the length of the slice. If there are not enough + /// elements in the array, the length will be adjusted accordingly + /// + /// \return a new object wrapped in std::shared_ptr + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// Slice from offset until end of the array + std::shared_ptr Slice(int64_t offset) const; + + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset, int64_t length) const; + /// Input-checking variant of Array::Slice + Result> SliceSafe(int64_t offset) const; + + const std::shared_ptr& data() const { return data_; } + + int num_fields() const { return static_cast(data_->child_data.size()); } + + /// \return PrettyPrint representation of array suitable for debugging + std::string ToString() const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the array's internal data. + /// + /// This is O(k) where k is the number of descendents. + /// + /// \return Status + Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the array's internal data. + /// + /// This is potentially O(k*n) where k is the number of descendents and n + /// is the array length. + /// + /// \return Status + Status ValidateFull() const; + + protected: + Array() = default; + ARROW_DEFAULT_MOVE_AND_ASSIGN(Array); + + std::shared_ptr data_; + const uint8_t* null_bitmap_data_ = NULLPTR; + + /// Protected method for constructors + void SetData(const std::shared_ptr& data) { + if (data->buffers.size() > 0) { + null_bitmap_data_ = data->GetValuesSafe(0, /*offset=*/0); + } else { + null_bitmap_data_ = NULLPTR; + } + data_ = data; + } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(Array); + + ARROW_FRIEND_EXPORT friend void PrintTo(const Array& x, std::ostream* os); +}; + +static inline std::ostream& operator<<(std::ostream& os, const Array& x) { + os << x.ToString(); + return os; +} + +/// Base class for non-nested arrays +class ARROW_EXPORT FlatArray : public Array { + protected: + using Array::Array; +}; + +/// Base class for arrays of fixed-size logical types +class ARROW_EXPORT PrimitiveArray : public FlatArray { + public: + PrimitiveArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// Does not account for any slice offset + const std::shared_ptr& values() const { return data_->buffers[1]; } + + protected: + PrimitiveArray() : raw_values_(NULLPTR) {} + + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_values_ = data->GetValuesSafe(1, /*offset=*/0); + } + + explicit PrimitiveArray(const std::shared_ptr& data) { SetData(data); } + + const uint8_t* raw_values_; +}; + +/// Degenerate null type Array +class ARROW_EXPORT NullArray : public FlatArray { + public: + using TypeClass = NullType; + + explicit NullArray(const std::shared_ptr& data) { SetData(data); } + explicit NullArray(int64_t length); + + private: + void SetData(const std::shared_ptr& data) { + null_bitmap_data_ = NULLPTR; + data->null_count = data->length; + data_ = data; + } +}; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..19fdee61243d17de729e795d18102ece6cbf7bd8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h @@ -0,0 +1,329 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for Binary, LargeBinary, String, LargeString, +// FixedSizeBinary + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/stl_iterator.h" +#include "arrow/type.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +/// Base class for variable-sized binary arrays, regardless of offset size +/// and logical interpretation. +template +class BaseBinaryArray : public FlatArray { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + using IteratorType = stl::ArrayIterator>; + + /// Return the pointer to the given elements bytes + // XXX should GetValue(int64_t i) return a string_view? + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + // Account for base offset + i += data_->offset; + const offset_type pos = raw_value_offsets_[i]; + *out_length = raw_value_offsets_[i + 1] - pos; + return raw_data_ + pos; + } + + /// \brief Get binary value as a string_view + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view GetView(int64_t i) const { + // Account for base offset + i += data_->offset; + const offset_type pos = raw_value_offsets_[i]; + return std::string_view(reinterpret_cast(raw_data_ + pos), + raw_value_offsets_[i + 1] - pos); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + /// \brief Get binary value as a string_view + /// Provided for consistency with other arrays. + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view Value(int64_t i) const { return GetView(i); } + + /// \brief Get binary value as a std::string + /// + /// \param i the value index + /// \return the value copied into a std::string + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_offsets() const { return data_->buffers[1]; } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_data() const { return data_->buffers[2]; } + + const offset_type* raw_value_offsets() const { + return raw_value_offsets_ + data_->offset; + } + + const uint8_t* raw_data() const { return raw_data_; } + + /// \brief Return the data buffer absolute offset of the data for the value + /// at the passed index. + /// + /// Does not perform boundschecking + offset_type value_offset(int64_t i) const { + return raw_value_offsets_[i + data_->offset]; + } + + /// \brief Return the length of the data for the value at the passed index. + /// + /// Does not perform boundschecking + offset_type value_length(int64_t i) const { + i += data_->offset; + return raw_value_offsets_[i + 1] - raw_value_offsets_[i]; + } + + /// \brief Return the total length of the memory in the data buffer + /// referenced by this array. If the array has been sliced then this may be + /// less than the size of the data buffer (data_->buffers[2]). + offset_type total_values_length() const { + if (data_->length > 0) { + return raw_value_offsets_[data_->length + data_->offset] - + raw_value_offsets_[data_->offset]; + } else { + return 0; + } + } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + // For subclasses + BaseBinaryArray() = default; + + // Protected method for constructors + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_value_offsets_ = data->GetValuesSafe(1, /*offset=*/0); + raw_data_ = data->GetValuesSafe(2, /*offset=*/0); + } + + const offset_type* raw_value_offsets_ = NULLPTR; + const uint8_t* raw_data_ = NULLPTR; +}; + +/// Concrete Array class for variable-size binary data +class ARROW_EXPORT BinaryArray : public BaseBinaryArray { + public: + explicit BinaryArray(const std::shared_ptr& data); + + BinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as StringArray + BinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for variable-size string (utf-8) data +class ARROW_EXPORT StringArray : public BinaryArray { + public: + using TypeClass = StringType; + + explicit StringArray(const std::shared_ptr& data); + + StringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +/// Concrete Array class for large variable-size binary data +class ARROW_EXPORT LargeBinaryArray : public BaseBinaryArray { + public: + explicit LargeBinaryArray(const std::shared_ptr& data); + + LargeBinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as LargeStringArray + LargeBinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for large variable-size string (utf-8) data +class ARROW_EXPORT LargeStringArray : public LargeBinaryArray { + public: + using TypeClass = LargeStringType; + + explicit LargeStringArray(const std::shared_ptr& data); + + LargeStringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// BinaryView and StringView + +/// Concrete Array class for variable-size binary view data using the +/// BinaryViewType::c_type struct to reference in-line or out-of-line string values +class ARROW_EXPORT BinaryViewArray : public FlatArray { + public: + using TypeClass = BinaryViewType; + using IteratorType = stl::ArrayIterator; + using c_type = BinaryViewType::c_type; + + explicit BinaryViewArray(std::shared_ptr data); + + BinaryViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr views, BufferVector data_buffers, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + // For API compatibility with BinaryArray etc. + std::string_view GetView(int64_t i) const; + std::string GetString(int64_t i) const { return std::string{GetView(i)}; } + + const auto& values() const { return data_->buffers[1]; } + const c_type* raw_values() const { return raw_values_; } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + IteratorType begin() const { return IteratorType(*this); } + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using FlatArray::FlatArray; + + void SetData(std::shared_ptr data) { + FlatArray::SetData(std::move(data)); + raw_values_ = data_->GetValuesSafe(1); + } + + const c_type* raw_values_; +}; + +/// Concrete Array class for variable-size string view (utf-8) data using +/// BinaryViewType::c_type to reference in-line or out-of-line string values +class ARROW_EXPORT StringViewArray : public BinaryViewArray { + public: + using TypeClass = StringViewType; + + explicit StringViewArray(std::shared_ptr data); + + using BinaryViewArray::BinaryViewArray; + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// Fixed width binary + +/// Concrete Array class for fixed-size binary data +class ARROW_EXPORT FixedSizeBinaryArray : public PrimitiveArray { + public: + using TypeClass = FixedSizeBinaryType; + using IteratorType = stl::ArrayIterator; + + explicit FixedSizeBinaryArray(const std::shared_ptr& data); + + FixedSizeBinaryArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const uint8_t* GetValue(int64_t i) const; + const uint8_t* Value(int64_t i) const { return GetValue(i); } + + std::string_view GetView(int64_t i) const { + return std::string_view(reinterpret_cast(GetValue(i)), byte_width()); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + int32_t byte_width() const { return byte_width_; } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width_; } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + void SetData(const std::shared_ptr& data) { + this->PrimitiveArray::SetData(data); + byte_width_ = + internal::checked_cast(*type()).byte_width(); + } + + int32_t byte_width_; +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..f14812549089ad9c2f6d0e80c8cc17e05d6252c6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/array_binary.h" +#include "arrow/array/data.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Decimal128Array + +/// Concrete Array class for 128-bit decimal data +class ARROW_EXPORT Decimal128Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal128Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal128Array from ArrayData instance + explicit Decimal128Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +// Backward compatibility +using DecimalArray = Decimal128Array; + +// ---------------------------------------------------------------------- +// Decimal256Array + +/// Concrete Array class for 256-bit decimal data +class ARROW_EXPORT Decimal256Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal256Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal256Array from ArrayData instance + explicit Decimal256Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..bf376b51f8c9470d2b4e4c7ed950c9a513fddc9b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// DictionaryArray + +/// \brief Array type for dictionary-encoded data with a +/// data-dependent dictionary +/// +/// A dictionary array contains an array of non-negative integers (the +/// "dictionary indices") along with a data type containing a "dictionary" +/// corresponding to the distinct values represented in the data. +/// +/// For example, the array +/// +/// ["foo", "bar", "foo", "bar", "foo", "bar"] +/// +/// with dictionary ["bar", "foo"], would have dictionary array representation +/// +/// indices: [1, 0, 1, 0, 1, 0] +/// dictionary: ["bar", "foo"] +/// +/// The indices in principle may be any integer type. +class ARROW_EXPORT DictionaryArray : public Array { + public: + using TypeClass = DictionaryType; + + explicit DictionaryArray(const std::shared_ptr& data); + + DictionaryArray(const std::shared_ptr& type, + const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + /// \brief Construct DictionaryArray from dictionary and indices + /// array and validate + /// + /// This function does the validation of the indices and input type. It checks if + /// all indices are non-negative and smaller than the size of the dictionary. + /// + /// \param[in] type a dictionary type + /// \param[in] dictionary the dictionary with same value type as the + /// type object + /// \param[in] indices an array of non-negative integers smaller than the + /// size of the dictionary + static Result> FromArrays( + const std::shared_ptr& type, const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + static Result> FromArrays( + const std::shared_ptr& indices, const std::shared_ptr& dictionary) { + return FromArrays(::arrow::dictionary(indices->type(), dictionary->type()), indices, + dictionary); + } + + /// \brief Transpose this DictionaryArray + /// + /// This method constructs a new dictionary array with the given dictionary + /// type, transposing indices using the transpose map. The type and the + /// transpose map are typically computed using DictionaryUnifier. + /// + /// \param[in] type the new type object + /// \param[in] dictionary the new dictionary + /// \param[in] transpose_map transposition array of this array's indices + /// into the target array's indices + /// \param[in] pool a pool to allocate the array data from + Result> Transpose( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + const int32_t* transpose_map, MemoryPool* pool = default_memory_pool()) const; + + Result> Compact(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Determine whether dictionary arrays may be compared without unification + bool CanCompareIndices(const DictionaryArray& other) const; + + /// \brief Return the dictionary for this array, which is stored as + /// a member of the ArrayData internal structure + const std::shared_ptr& dictionary() const; + const std::shared_ptr& indices() const; + + /// \brief Return the ith value of indices, cast to int64_t. Not recommended + /// for use in performance-sensitive code. Does not validate whether the + /// value is null or out-of-bounds. + int64_t GetValueIndex(int64_t i) const; + + const DictionaryType* dict_type() const { return dict_type_; } + + private: + void SetData(const std::shared_ptr& data); + const DictionaryType* dict_type_; + std::shared_ptr indices_; + + // Lazily initialized when invoking dictionary() + mutable std::shared_ptr dictionary_; +}; + +/// \brief Helper class for incremental dictionary unification +class ARROW_EXPORT DictionaryUnifier { + public: + virtual ~DictionaryUnifier() = default; + + /// \brief Construct a DictionaryUnifier + /// \param[in] value_type the data type of the dictionaries + /// \param[in] pool MemoryPool to use for memory allocations + static Result> Make( + std::shared_ptr value_type, MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries across array chunks + /// + /// The dictionaries in the array chunks will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyChunkedArray( + const std::shared_ptr& array, + MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries across the chunks of each table column + /// + /// The dictionaries in each table column will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyTable( + const Table& table, MemoryPool* pool = default_memory_pool()); + + /// \brief Append dictionary to the internal memo + virtual Status Unify(const Array& dictionary) = 0; + + /// \brief Append dictionary and compute transpose indices + /// \param[in] dictionary the dictionary values to unify + /// \param[out] out_transpose a Buffer containing computed transpose indices + /// as int32_t values equal in length to the passed dictionary. The value in + /// each slot corresponds to the new index value for each original index + /// for a DictionaryArray with the old dictionary + virtual Status Unify(const Array& dictionary, + std::shared_ptr* out_transpose) = 0; + + /// \brief Return a result DictionaryType with the smallest possible index + /// type to accommodate the unified dictionary. The unifier cannot be used + /// after this is called + virtual Status GetResult(std::shared_ptr* out_type, + std::shared_ptr* out_dict) = 0; + + /// \brief Return a unified dictionary with the given index type. If + /// the index type is not large enough then an invalid status will be returned. + /// The unifier cannot be used after this is called + virtual Status GetResultWithIndexType(const std::shared_ptr& index_type, + std::shared_ptr* out_dict) = 0; +}; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h new file mode 100644 index 0000000000000000000000000000000000000000..768a630e0af54da969c1dd9a00de75e7fada8b3c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_nested.h @@ -0,0 +1,863 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for List, LargeList, ListView, LargeListView, FixedSizeList, +// Map, Struct, and Union + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// VarLengthListLikeArray + +template +class VarLengthListLikeArray; + +namespace internal { + +// Private helper for [Large]List[View]Array::SetData. +// Unfortunately, trying to define VarLengthListLikeArray::SetData outside of this header +// doesn't play well with MSVC. +template +void SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id = TYPE::type_id); + +} // namespace internal + +/// Base class for variable-sized list and list-view arrays, regardless of offset size. +template +class VarLengthListLikeArray : public Array { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + const TypeClass* var_length_list_like_type() const { return this->list_type_; } + + /// \brief Return array object containing the list's values + /// + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& values() const { return values_; } + + /// Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_offsets() const { return data_->buffers[1]; } + + const std::shared_ptr& value_type() const { return list_type_->value_type(); } + + /// Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_offsets() const { + return raw_value_offsets_ + data_->offset; + } + + // The following functions will not perform boundschecking + + offset_type value_offset(int64_t i) const { + return raw_value_offsets_[i + data_->offset]; + } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists and list-views are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + virtual offset_type value_length(int64_t i) const = 0; + + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + protected: + friend void internal::SetListData(VarLengthListLikeArray* self, + const std::shared_ptr& data, + Type::type expected_type_id); + + const TypeClass* list_type_ = NULLPTR; + std::shared_ptr values_; + const offset_type* raw_value_offsets_ = NULLPTR; +}; + +// ---------------------------------------------------------------------- +// ListArray / LargeListArray + +template +class BaseListArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_type() const { return this->var_length_list_like_type(); } + + /// \brief Return the size of the value at a particular index + /// + /// Since non-empty null lists are possible, avoid calling this + /// function when the list at slot i is null. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { + i += this->data_->offset; + return this->raw_value_offsets_[i + 1] - this->raw_value_offsets_[i]; + } +}; + +/// Concrete Array class for list data +class ARROW_EXPORT ListArray : public BaseListArray { + public: + explicit ListArray(std::shared_ptr data); + + ListArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListArray from a ListViewArray + static Result> FromListView(const ListViewArray& source, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + protected: + // This constructor defers SetData to a derived array class + ListArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// Concrete Array class for large list data (with 64-bit offsets) +class ARROW_EXPORT LargeListArray : public BaseListArray { + public: + explicit LargeListArray(const std::shared_ptr& data); + + LargeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListArray from array of offsets and child value array + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' + /// null bitmap. But if a null_bitmap is provided, the offsets array can't have nulls. + /// + /// And when a null_bitmap is provided, the offsets array cannot be a slice (i.e. an + /// array with offset() > 0). + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int64 type + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListArray from a LargeListViewArray + static Result> FromListView( + const LargeListViewArray& source, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration of this array's offsets as well as null elements backed + /// by non-empty lists (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list offsets as an Int64Array + std::shared_ptr offsets() const; + + protected: + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// ListViewArray / LargeListViewArray + +template +class BaseListViewArray : public VarLengthListLikeArray { + public: + using TypeClass = TYPE; + using offset_type = typename TYPE::offset_type; + + const TypeClass* list_view_type() const { return this->var_length_list_like_type(); } + + /// \brief Note that this buffer does not account for any slice offset or length. + const std::shared_ptr& value_sizes() const { return this->data_->buffers[2]; } + + /// \brief Return pointer to raw value offsets accounting for any slice offset + const offset_type* raw_value_sizes() const { + return raw_value_sizes_ + this->data_->offset; + } + + /// \brief Return the size of the value at a particular index + /// + /// This should not be called if the list-view at slot i is null. + /// The returned size in those cases could be any value from 0 to the + /// length of the child values array. + /// + /// \pre IsValid(i) + offset_type value_length(int64_t i) const final { + return this->raw_value_sizes_[i + this->data_->offset]; + } + + protected: + const offset_type* raw_value_sizes_ = NULLPTR; +}; + +/// \brief Concrete Array class for list-view data +class ARROW_EXPORT ListViewArray : public BaseListViewArray { + public: + explicit ListViewArray(std::shared_ptr data); + + ListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct ListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct a ListViewArray using buffers from offsets and sizes arrays + /// that project views into the child values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the + /// offsets's null bitmap. But if a null_bitmap is provided, the offsets array + /// can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int32 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int32 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a ListViewArray from a ListArray + static Result> FromList(const ListArray& list_array, + MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the list-views in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + /// + /// This function invokes Concatenate() if list-views are non-contiguous. It + /// will try to minimize the number of array slices passed to Concatenate() by + /// maximizing the size of each slice (containing as many contiguous + /// list-views as possible). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int32Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to ListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + ListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +/// \brief Concrete Array class for large list-view data (with 64-bit offsets +/// and sizes) +class ARROW_EXPORT LargeListViewArray : public BaseListViewArray { + public: + explicit LargeListViewArray(std::shared_ptr data); + + LargeListViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr value_offsets, + std::shared_ptr value_sizes, std::shared_ptr values, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct LargeListViewArray from array of offsets, sizes, and child + /// value array + /// + /// Construct an LargeListViewArray using buffers from offsets and sizes arrays + /// that project views into the values array. + /// + /// This function does the bare minimum of validation of the offsets/sizes and + /// input types. The offset and length of the offsets and sizes arrays must + /// match and that will be checked, but their contents will be assumed to be + /// well-formed. + /// + /// If a null_bitmap is not provided, the nulls will be inferred from the offsets' or + /// sizes' null bitmap. Only one of these two is allowed to have a null bitmap. But if a + /// null_bitmap is provided, the offsets array and the sizes array can't have nulls. + /// + /// And when a null_bitmap is provided, neither the offsets or sizes array can be a + /// slice (i.e. an array with offset() > 0). + /// + /// \param[in] offsets An array of int64 offsets into the values array. NULL values are + /// supported if the corresponding values in sizes is NULL or 0. + /// \param[in] sizes An array containing the int64 sizes of every view. NULL values are + /// taken to represent a NULL list-view in the array being created. + /// \param[in] values Array containing list values + /// \param[in] pool MemoryPool + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + static Result> FromArrays( + const Array& offsets, const Array& sizes, const Array& values, + MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + static Result> FromArrays( + std::shared_ptr type, const Array& offsets, const Array& sizes, + const Array& values, MemoryPool* pool = default_memory_pool(), + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Build a LargeListViewArray from a LargeListArray + static Result> FromList( + const LargeListArray& list_array, MemoryPool* pool); + + /// \brief Return an Array that is a concatenation of the large list-views in this + /// array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration this array's offsets (which can be in any order) + /// and sizes. Nulls are skipped. + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Return list-view offsets as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListArray::FromArrays() and get back the same list array + /// if the original one has nulls. + std::shared_ptr offsets() const; + + /// \brief Return list-view sizes as an Int64Array + /// + /// The returned array will not have a validity bitmap, so you cannot expect + /// to pass it to LargeListViewArray::FromArrays() and get back the same list + /// array if the original one has nulls. + std::shared_ptr sizes() const; + + protected: + // This constructor defers SetData to a derived array class + LargeListViewArray() = default; + + void SetData(const std::shared_ptr& data); +}; + +// ---------------------------------------------------------------------- +// MapArray + +/// Concrete Array class for map data +/// +/// NB: "value" in this context refers to a pair of a key and the corresponding item +class ARROW_EXPORT MapArray : public ListArray { + public: + using TypeClass = MapType; + + explicit MapArray(const std::shared_ptr& data); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, BufferVector buffers, + const std::shared_ptr& keys, const std::shared_ptr& items, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MapArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& value_offsets, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Construct MapArray from array of offsets and child key, item arrays + /// + /// This function does the bare minimum of validation of the offsets and + /// input types, and will allocate a new offsets array if necessary (i.e. if + /// the offsets contain any nulls). If the offsets do not have nulls, they + /// are assumed to be well-formed + /// + /// \param[in] offsets Array containing n + 1 offsets encoding length and + /// size. Must be of int32 type + /// \param[in] keys Array containing key values + /// \param[in] items Array containing item values + /// \param[in] pool MemoryPool in case new offsets array needs to be + /// allocated because of null values + static Result> FromArrays( + const std::shared_ptr& offsets, const std::shared_ptr& keys, + const std::shared_ptr& items, MemoryPool* pool = default_memory_pool()); + + static Result> FromArrays( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool = default_memory_pool()); + + const MapType* map_type() const { return map_type_; } + + /// \brief Return array object containing all map keys + const std::shared_ptr& keys() const { return keys_; } + + /// \brief Return array object containing all mapped items + const std::shared_ptr& items() const { return items_; } + + /// Validate child data before constructing the actual MapArray. + static Status ValidateChildData( + const std::vector>& child_data); + + protected: + void SetData(const std::shared_ptr& data); + + static Result> FromArraysInternal( + std::shared_ptr type, const std::shared_ptr& offsets, + const std::shared_ptr& keys, const std::shared_ptr& items, + MemoryPool* pool); + + private: + const MapType* map_type_; + std::shared_ptr keys_, items_; +}; + +// ---------------------------------------------------------------------- +// FixedSizeListArray + +/// Concrete Array class for fixed size list data +class ARROW_EXPORT FixedSizeListArray : public Array { + public: + using TypeClass = FixedSizeListType; + using offset_type = TypeClass::offset_type; + + explicit FixedSizeListArray(const std::shared_ptr& data); + + FixedSizeListArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& values, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const FixedSizeListType* list_type() const; + + /// \brief Return array object containing the list's values + const std::shared_ptr& values() const; + + const std::shared_ptr& value_type() const; + + // The following functions will not perform boundschecking + int64_t value_offset(int64_t i) const { + i += data_->offset; + return list_size_ * i; + } + /// \brief Return the fixed-size of the values + /// + /// No matter the value of the index parameter, the result is the same. + /// So even when the value at slot i is null, this function will return a + /// non-zero size. + /// + /// \pre IsValid(i) + int32_t value_length(int64_t i = 0) const { + ARROW_UNUSED(i); + return list_size_; + } + /// \pre IsValid(i) + std::shared_ptr value_slice(int64_t i) const { + return values_->Slice(value_offset(i), value_length(i)); + } + + /// \brief Return an Array that is a concatenation of the lists in this array. + /// + /// Note that it's different from `values()` in that it takes into + /// consideration null elements (they are skipped, thus copying may be needed). + Result> Flatten( + MemoryPool* memory_pool = default_memory_pool()) const; + + /// \brief Construct FixedSizeListArray from child value array and value_length + /// + /// \param[in] values Array containing list values + /// \param[in] list_size The fixed length of each list + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / list_size + static Result> FromArrays( + const std::shared_ptr& values, int32_t list_size, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + /// \brief Construct FixedSizeListArray from child value array and type + /// + /// \param[in] values Array containing list values + /// \param[in] type The fixed sized list type + /// \param[in] null_bitmap Optional validity bitmap + /// \param[in] null_count Optional null count in null_bitmap + /// \return Will have length equal to values.length() / type.list_size() + static Result> FromArrays( + const std::shared_ptr& values, std::shared_ptr type, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount); + + protected: + void SetData(const std::shared_ptr& data); + int32_t list_size_; + + private: + std::shared_ptr values_; +}; + +// ---------------------------------------------------------------------- +// Struct + +/// Concrete Array class for struct data +class ARROW_EXPORT StructArray : public Array { + public: + using TypeClass = StructType; + + explicit StructArray(const std::shared_ptr& data); + + StructArray(const std::shared_ptr& type, int64_t length, + const std::vector>& children, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and field names. + /// + /// The length and data type are automatically inferred from the arguments. + /// There should be at least one child array. + static Result> Make( + const ArrayVector& children, const std::vector& field_names, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Return a StructArray from child arrays and fields. + /// + /// The length is automatically inferred from the arguments. + /// There should be at least one child array. This method does not + /// check that field types and child array types are consistent. + static Result> Make( + const ArrayVector& children, const FieldVector& fields, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const StructType* struct_type() const; + + // Return a shared pointer in case the requestor desires to share ownership + // with this array. The returned array has its offset, length and null + // count adjusted. + const std::shared_ptr& field(int pos) const; + + const ArrayVector& fields() const; + + /// Returns null if name not found + std::shared_ptr GetFieldByName(const std::string& name) const; + + /// Indicate if field named `name` can be found unambiguously in the struct. + Status CanReferenceFieldByName(const std::string& name) const; + + /// Indicate if fields named `names` can be found unambiguously in the struct. + Status CanReferenceFieldsByNames(const std::vector& names) const; + + /// \brief Flatten this array as a vector of arrays, one for each field + /// + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result Flatten(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Get one of the child arrays, combining its null bitmap + /// with the parent struct array's bitmap. + /// + /// \param[in] index Which child array to get + /// \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + private: + // For caching boxed child data + // XXX This is not handled in a thread-safe manner. + mutable ArrayVector boxed_fields_; +}; + +// ---------------------------------------------------------------------- +// Union + +/// Base class for SparseUnionArray and DenseUnionArray +class ARROW_EXPORT UnionArray : public Array { + public: + using type_code_t = int8_t; + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& type_codes() const { return data_->buffers[1]; } + + const type_code_t* raw_type_codes() const { return raw_type_codes_ + data_->offset; } + + /// The logical type code of the value at index. + type_code_t type_code(int64_t i) const { return raw_type_codes_[i + data_->offset]; } + + /// The physical child id containing value at index. + int child_id(int64_t i) const { + return union_type_->child_ids()[raw_type_codes_[i + data_->offset]]; + } + + const UnionType* union_type() const { return union_type_; } + + UnionMode::type mode() const { return union_type_->mode(); } + + /// \brief Return the given field as an individual array. + /// + /// For sparse unions, the returned array has its offset, length and null + /// count adjusted. + std::shared_ptr field(int pos) const; + + protected: + void SetData(std::shared_ptr data); + + const type_code_t* raw_type_codes_; + const UnionType* union_type_; + + // For caching boxed child data + mutable std::vector> boxed_fields_; +}; + +/// Concrete Array class for sparse union data +class ARROW_EXPORT SparseUnionArray : public UnionArray { + public: + using TypeClass = SparseUnionType; + + explicit SparseUnionArray(std::shared_ptr data); + + SparseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, int64_t offset = 0); + + /// \brief Construct SparseUnionArray from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector type_codes) { + return Make(std::move(type_ids), std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct SparseUnionArray with custom field names from type_ids and children + /// + /// This function does the bare minimum of validation of the input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const SparseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// \brief Get one of the child arrays, adjusting its null bitmap + /// where the union array type code does not match. + /// + /// \param[in] index Which child array to get (i.e. the physical index, not the type + /// code) \param[in] pool The pool to allocate null bitmaps from, if necessary + Result> GetFlattenedField( + int index, MemoryPool* pool = default_memory_pool()) const; + + protected: + void SetData(std::shared_ptr data); +}; + +/// \brief Concrete Array class for dense union data +/// +/// Note that union types do not have a validity bitmap +class ARROW_EXPORT DenseUnionArray : public UnionArray { + public: + using TypeClass = DenseUnionType; + + explicit DenseUnionArray(const std::shared_ptr& data); + + DenseUnionArray(std::shared_ptr type, int64_t length, ArrayVector children, + std::shared_ptr type_ids, + std::shared_ptr value_offsets = NULLPTR, int64_t offset = 0); + + /// \brief Construct DenseUnionArray from type_ids, value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector type_codes) { + return Make(type_ids, value_offsets, std::move(children), std::vector{}, + std::move(type_codes)); + } + + /// \brief Construct DenseUnionArray with custom field names from type_ids, + /// value_offsets, and children + /// + /// This function does the bare minimum of validation of the offsets and + /// input types. + /// + /// \param[in] type_ids An array of logical type ids for the union type + /// \param[in] value_offsets An array of signed int32 values indicating the + /// relative offset into the respective child array for the type in a given slot. + /// The respective offsets for each child value array must be in order / increasing. + /// \param[in] children Vector of children Arrays containing the data for each type. + /// \param[in] field_names Vector of strings containing the name of each field. + /// \param[in] type_codes Vector of type codes. + static Result> Make(const Array& type_ids, + const Array& value_offsets, + ArrayVector children, + std::vector field_names = {}, + std::vector type_codes = {}); + + const DenseUnionType* union_type() const { + return internal::checked_cast(union_type_); + } + + /// Note that this buffer does not account for any slice offset + const std::shared_ptr& value_offsets() const { return data_->buffers[2]; } + + int32_t value_offset(int64_t i) const { return raw_value_offsets_[i + data_->offset]; } + + const int32_t* raw_value_offsets() const { return raw_value_offsets_ + data_->offset; } + + protected: + const int32_t* raw_value_offsets_; + + void SetData(const std::shared_ptr& data); +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h new file mode 100644 index 0000000000000000000000000000000000000000..e6df92e3b788ceaf74d67219b4f88fd97d786724 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h @@ -0,0 +1,202 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor types for primitive/C-type-based arrays, such as numbers, +// boolean, and temporal types. + +#pragma once + +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/stl_iterator.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" // IWYU pragma: export +#include "arrow/type_traits.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// Concrete Array class for boolean data +class ARROW_EXPORT BooleanArray : public PrimitiveArray { + public: + using TypeClass = BooleanType; + using IteratorType = stl::ArrayIterator; + + explicit BooleanArray(const std::shared_ptr& data); + + BooleanArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + bool Value(int64_t i) const { + return bit_util::GetBit(reinterpret_cast(raw_values_), + i + data_->offset); + } + + bool GetView(int64_t i) const { return Value(i); } + + std::optional operator[](int64_t i) const { return *IteratorType(*this, i); } + + /// \brief Return the number of false (0) values among the valid + /// values. Result is not cached. + int64_t false_count() const; + + /// \brief Return the number of true (1) values among the valid + /// values. Result is not cached. + int64_t true_count() const; + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using PrimitiveArray::PrimitiveArray; +}; + +/// \addtogroup numeric-arrays +/// +/// @{ + +/// \brief Concrete Array class for numeric data with a corresponding C type +/// +/// This class is templated on the corresponding DataType subclass for the +/// given data, for example NumericArray or NumericArray. +/// +/// Note that convenience aliases are available for all accepted types +/// (for example Int8Array for NumericArray). +template +class NumericArray : public PrimitiveArray { + public: + using TypeClass = TYPE; + using value_type = typename TypeClass::c_type; + using IteratorType = stl::ArrayIterator>; + + explicit NumericArray(const std::shared_ptr& data) : PrimitiveArray(data) {} + + // Only enable this constructor without a type argument for types without additional + // metadata + template + NumericArray(enable_if_parameter_free length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : PrimitiveArray(TypeTraits::type_singleton(), length, data, null_bitmap, + null_count, offset) {} + + const value_type* raw_values() const { + return reinterpret_cast(raw_values_) + data_->offset; + } + + value_type Value(int64_t i) const { return raw_values()[i]; } + + // For API compatibility with BinaryArray etc. + value_type GetView(int64_t i) const { return Value(i); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using PrimitiveArray::PrimitiveArray; +}; + +/// DayTimeArray +/// --------------------- +/// \brief Array of Day and Millisecond values. +class ARROW_EXPORT DayTimeIntervalArray : public PrimitiveArray { + public: + using TypeClass = DayTimeIntervalType; + using IteratorType = stl::ArrayIterator; + + explicit DayTimeIntervalArray(const std::shared_ptr& data); + + DayTimeIntervalArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + DayTimeIntervalArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + TypeClass::DayMilliseconds GetValue(int64_t i) const; + TypeClass::DayMilliseconds Value(int64_t i) const { return GetValue(i); } + + // For compatibility with Take kernel. + TypeClass::DayMilliseconds GetView(int64_t i) const { return GetValue(i); } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + int32_t byte_width() const { return sizeof(TypeClass::DayMilliseconds); } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width(); } +}; + +/// \brief Array of Month, Day and nanosecond values. +class ARROW_EXPORT MonthDayNanoIntervalArray : public PrimitiveArray { + public: + using TypeClass = MonthDayNanoIntervalType; + using IteratorType = stl::ArrayIterator; + + explicit MonthDayNanoIntervalArray(const std::shared_ptr& data); + + MonthDayNanoIntervalArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MonthDayNanoIntervalArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + TypeClass::MonthDayNanos GetValue(int64_t i) const; + TypeClass::MonthDayNanos Value(int64_t i) const { return GetValue(i); } + + // For compatibility with Take kernel. + TypeClass::MonthDayNanos GetView(int64_t i) const { return GetValue(i); } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + int32_t byte_width() const { return sizeof(TypeClass::MonthDayNanos); } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width(); } +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..b46b0855ab36776eec4e22cef1a35112e2d18fa8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_run_end.h @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes run-end encoded arrays + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// RunEndEncoded + +/// \brief Array type for run-end encoded data +class ARROW_EXPORT RunEndEncodedArray : public Array { + private: + std::shared_ptr run_ends_array_; + std::shared_ptr values_array_; + + public: + using TypeClass = RunEndEncodedType; + + explicit RunEndEncodedArray(const std::shared_ptr& data); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the length of + /// the child run_ends and values arrays. + RunEndEncodedArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t offset = 0); + + /// \brief Construct a RunEndEncodedArray from all parameters + /// + /// The length and offset parameters refer to the dimensions of the logical + /// array which is the array we would get after expanding all the runs into + /// repeated values. As such, length can be much greater than the length of + /// the child run_ends and values arrays. + static Result> Make( + const std::shared_ptr& type, int64_t logical_length, + const std::shared_ptr& run_ends, const std::shared_ptr& values, + int64_t logical_offset = 0); + + /// \brief Construct a RunEndEncodedArray from values and run ends arrays + /// + /// The data type is automatically inferred from the arguments. + /// The run_ends and values arrays must have the same length. + static Result> Make( + int64_t logical_length, const std::shared_ptr& run_ends, + const std::shared_ptr& values, int64_t logical_offset = 0); + + protected: + void SetData(const std::shared_ptr& data); + + public: + /// \brief Returns an array holding the logical indexes of each run-end + /// + /// The physical offset to the array is applied. + const std::shared_ptr& run_ends() const { return run_ends_array_; } + + /// \brief Returns an array holding the values of each run + /// + /// The physical offset to the array is applied. + const std::shared_ptr& values() const { return values_array_; } + + /// \brief Returns an array holding the logical indexes of each run end + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array and rewrites all the run end values to be relative to the logical + /// offset and cuts the end of the array to the logical length. + Result> LogicalRunEnds(MemoryPool* pool) const; + + /// \brief Returns an array holding the values of each run + /// + /// If a non-zero logical offset is set, this function allocates a new + /// array containing only the values within the logical range. + std::shared_ptr LogicalValues() const; + + /// \brief Find the physical offset of this REE array + /// + /// This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalOffset() const; + + /// \brief Find the physical length of this REE array + /// + /// The physical length of an REE is the number of physical values (and + /// run-ends) necessary to represent the logical range of values from offset + /// to length. + /// + /// Avoid calling this function if the physical length can be established in + /// some other way (e.g. when iterating over the runs sequentially until the + /// end). This function uses binary-search, so it has a O(log N) cost. + int64_t FindPhysicalLength() const; +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h new file mode 100644 index 0000000000000000000000000000000000000000..11036797e014f499db6f34604fb3344bf8bb660d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_base.h @@ -0,0 +1,370 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: keep +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_primitive.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { + +template +class ArrayBuilderExtraOps { + public: + /// \brief Append a value from an optional or null if it has no value. + Status AppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->Append(*value) : self->AppendNull(); + } + + /// \brief Append a value from an optional or null if it has no value. + /// + /// Unsafe methods don't check existing size. + void UnsafeAppendOrNull(const std::optional& value) { + auto* self = static_cast(this); + return value.has_value() ? self->UnsafeAppend(*value) : self->UnsafeAppendNull(); + } +}; + +} // namespace internal + +/// \defgroup numeric-builders Concrete builder subclasses for numeric types +/// @{ +/// @} + +/// \defgroup temporal-builders Concrete builder subclasses for temporal types +/// @{ +/// @} + +/// \defgroup binary-builders Concrete builder subclasses for binary types +/// @{ +/// @} + +/// \defgroup nested-builders Concrete builder subclasses for nested types +/// @{ +/// @} + +/// \defgroup dictionary-builders Concrete builder subclasses for dictionary types +/// @{ +/// @} + +/// \defgroup run-end-encoded-builders Concrete builder subclasses for run-end encoded +/// arrays +/// @{ +/// @} + +constexpr int64_t kMinBuilderCapacity = 1 << 5; +constexpr int64_t kListMaximumElements = std::numeric_limits::max() - 1; + +/// Base class for all data array builders. +/// +/// This class provides a facilities for incrementally building the null bitmap +/// (see Append methods) and as a side effect the current number of slots and +/// the null count. +/// +/// \note Users are expected to use builders as one of the concrete types below. +/// For example, ArrayBuilder* pointing to BinaryBuilder should be downcast before use. +class ARROW_EXPORT ArrayBuilder { + public: + explicit ArrayBuilder(MemoryPool* pool, int64_t alignment = kDefaultBufferAlignment) + : pool_(pool), alignment_(alignment), null_bitmap_builder_(pool, alignment) {} + + ARROW_DEFAULT_MOVE_AND_ASSIGN(ArrayBuilder); + + virtual ~ArrayBuilder() = default; + + /// For nested types. Since the objects are owned by this class instance, we + /// skip shared pointers and just return a raw pointer + ArrayBuilder* child(int i) { return children_[i].get(); } + + const std::shared_ptr& child_builder(int i) const { return children_[i]; } + + int num_children() const { return static_cast(children_.size()); } + + virtual int64_t length() const { return length_; } + int64_t null_count() const { return null_count_; } + int64_t capacity() const { return capacity_; } + + /// \brief Ensure that enough memory has been allocated to fit the indicated + /// number of total elements in the builder, including any that have already + /// been appended. Does not account for reallocations that may be due to + /// variable size data, like binary values. To make space for incremental + /// appends, use Reserve instead. + /// + /// \param[in] capacity the minimum number of total array values to + /// accommodate. Must be greater than the current capacity. + /// \return Status + virtual Status Resize(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of elements without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental Reserve() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional array values + /// \return Status + Status Reserve(int64_t additional_capacity) { + auto current_capacity = capacity(); + auto min_capacity = length() + additional_capacity; + if (min_capacity <= current_capacity) return Status::OK(); + + // leave growth factor up to BufferBuilder + auto new_capacity = BufferBuilder::GrowByFactor(current_capacity, min_capacity); + return Resize(new_capacity); + } + + /// Reset the builder. + virtual void Reset(); + + /// \brief Append a null value to builder + virtual Status AppendNull() = 0; + /// \brief Append a number of null values to builder + virtual Status AppendNulls(int64_t length) = 0; + + /// \brief Append a non-null value to builder + /// + /// The appended value is an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending a null value to a parent nested type. + virtual Status AppendEmptyValue() = 0; + + /// \brief Append a number of non-null values to builder + /// + /// The appended values are an implementation detail, but the corresponding + /// memory slot is guaranteed to be initialized. + /// This method is useful when appending null values to a parent nested type. + virtual Status AppendEmptyValues(int64_t length) = 0; + + /// \brief Append a value from a scalar + Status AppendScalar(const Scalar& scalar) { return AppendScalar(scalar, 1); } + virtual Status AppendScalar(const Scalar& scalar, int64_t n_repeats); + virtual Status AppendScalars(const ScalarVector& scalars); + + /// \brief Append a range of values from an array. + /// + /// The given array must be the same type as the builder. + virtual Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) { + return Status::NotImplemented("AppendArraySlice for builder for ", *type()); + } + + /// \brief Return result of builder as an internal generic ArrayData + /// object. Resets builder except for dictionary builder + /// + /// \param[out] out the finalized ArrayData object + /// \return Status + virtual Status FinishInternal(std::shared_ptr* out) = 0; + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \param[out] out the finalized Array object + /// \return Status + Status Finish(std::shared_ptr* out); + + /// \brief Return result of builder as an Array object. + /// + /// The builder is reset except for DictionaryBuilder. + /// + /// \return The finalized Array object + Result> Finish(); + + /// \brief Return the type of the built Array + virtual std::shared_ptr type() const = 0; + + protected: + /// Append to null bitmap + Status AppendToBitmap(bool is_valid); + + /// Vector append. Treat each zero byte as a null. If valid_bytes is null + /// assume all of length bits are valid. + Status AppendToBitmap(const uint8_t* valid_bytes, int64_t length); + + /// Uniform append. Append N times the same validity bit. + Status AppendToBitmap(int64_t num_bits, bool value); + + /// Set the next length bits to not null (i.e. valid). + Status SetNotNull(int64_t length); + + // Unsafe operations (don't check capacity/don't resize) + + void UnsafeAppendNull() { UnsafeAppendToBitmap(false); } + + // Append to null bitmap, update the length + void UnsafeAppendToBitmap(bool is_valid) { + null_bitmap_builder_.UnsafeAppend(is_valid); + ++length_; + if (!is_valid) ++null_count_; + } + + // Vector append. Treat each zero byte as a nullzero. If valid_bytes is null + // assume all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* valid_bytes, int64_t length) { + if (valid_bytes == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(valid_bytes, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Vector append. Copy from a given bitmap. If bitmap is null assume + // all of length bits are valid. + void UnsafeAppendToBitmap(const uint8_t* bitmap, int64_t offset, int64_t length) { + if (bitmap == NULLPTR) { + return UnsafeSetNotNull(length); + } + null_bitmap_builder_.UnsafeAppend(bitmap, offset, length); + length_ += length; + null_count_ = null_bitmap_builder_.false_count(); + } + + // Append the same validity value a given number of times. + void UnsafeAppendToBitmap(const int64_t num_bits, bool value) { + if (value) { + UnsafeSetNotNull(num_bits); + } else { + UnsafeSetNull(num_bits); + } + } + + void UnsafeAppendToBitmap(const std::vector& is_valid); + + // Set the next validity bits to not null (i.e. valid). + void UnsafeSetNotNull(int64_t length); + + // Set the next validity bits to null (i.e. invalid). + void UnsafeSetNull(int64_t length); + + static Status TrimBuffer(const int64_t bytes_filled, ResizableBuffer* buffer); + + /// \brief Finish to an array of the specified ArrayType + template + Status FinishTyped(std::shared_ptr* out) { + std::shared_ptr out_untyped; + ARROW_RETURN_NOT_OK(Finish(&out_untyped)); + *out = std::static_pointer_cast(std::move(out_untyped)); + return Status::OK(); + } + + // Check the requested capacity for validity + Status CheckCapacity(int64_t new_capacity) { + if (ARROW_PREDICT_FALSE(new_capacity < 0)) { + return Status::Invalid( + "Resize capacity must be positive (requested: ", new_capacity, ")"); + } + + if (ARROW_PREDICT_FALSE(new_capacity < length_)) { + return Status::Invalid("Resize cannot downsize (requested: ", new_capacity, + ", current length: ", length_, ")"); + } + + return Status::OK(); + } + + // Check for array type + Status CheckArrayType(const std::shared_ptr& expected_type, + const Array& array, const char* message); + Status CheckArrayType(Type::type expected_type, const Array& array, + const char* message); + + MemoryPool* pool_; + int64_t alignment_; + + TypedBufferBuilder null_bitmap_builder_; + int64_t null_count_ = 0; + + // Array length, so far. Also, the index of the next element to be added + int64_t length_ = 0; + int64_t capacity_ = 0; + + // Child value array builders. These are owned by this class + std::vector> children_; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrayBuilder); +}; + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the data type to create the builder for +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeBuilder(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilder( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilder(pool, type, &out)); + return std::move(out); +} + +/// \brief Construct an empty ArrayBuilder corresponding to the data +/// type, where any top-level or nested dictionary builders return the +/// exact index type specified by the type. +ARROW_EXPORT +Status MakeBuilderExactIndex(MemoryPool* pool, const std::shared_ptr& type, + std::unique_ptr* out); + +inline Result> MakeBuilderExactIndex( + const std::shared_ptr& type, MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeBuilderExactIndex(pool, type, &out)); + return std::move(out); +} + +/// \brief Construct an empty DictionaryBuilder initialized optionally +/// with a preexisting dictionary +/// \param[in] pool the MemoryPool to use for allocations +/// \param[in] type the dictionary type to create the builder for +/// \param[in] dictionary the initial dictionary, if any. May be nullptr +/// \param[out] out the created ArrayBuilder +ARROW_EXPORT +Status MakeDictionaryBuilder(MemoryPool* pool, const std::shared_ptr& type, + const std::shared_ptr& dictionary, + std::unique_ptr* out); + +inline Result> MakeDictionaryBuilder( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) { + std::unique_ptr out; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, type, dictionary, &out)); + return std::move(out); +} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..d825f7d32520a309905871155a81b9adccff3c07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h @@ -0,0 +1,971 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/binary_view_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-builders +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +template +class BaseBinaryBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, std::string_view> { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + explicit BaseBinaryBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + offsets_builder_(pool, alignment), + value_data_builder_(pool, alignment) {} + + BaseBinaryBuilder(const std::shared_ptr& type, MemoryPool* pool) + : BaseBinaryBuilder(pool) {} + + Status Append(const uint8_t* value, offset_type length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status Append(const char* value, offset_type length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// Extend the last appended value by appending more data at the end + /// + /// Unlike Append, this does not create a new offset. + Status ExtendCurrent(const uint8_t* value, offset_type length) { + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + return Status::OK(); + } + + Status ExtendCurrent(std::string_view value) { + return ExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNulls(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, false); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, true); + return Status::OK(); + } + + /// \brief Append without checking capacity + /// + /// Offsets and data should have been presized using Reserve() and + /// ReserveData(), respectively. + void UnsafeAppend(const uint8_t* value, offset_type length) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(value, length); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppend(const char* value, offset_type length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// Like ExtendCurrent, but do not check capacity + void UnsafeExtendCurrent(const uint8_t* value, offset_type length) { + value_data_builder_.UnsafeAppend(value, length); + } + + void UnsafeExtendCurrent(std::string_view value) { + UnsafeExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + void UnsafeAppendNull() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a sequence of strings in one shot. + /// + /// \param[in] values a vector of strings + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const std::vector& values, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = std::accumulate( + values.begin(), values.end(), 0ULL, + [](uint64_t sum, const std::string& str) { return sum + str.size(); }); + ARROW_RETURN_NOT_OK(Reserve(values.size())); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes != NULLPTR) { + for (std::size_t i = 0; i < values.size(); ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + value_data_builder_.UnsafeAppend( + reinterpret_cast(values[i].data()), values[i].size()); + } + } + } else { + for (const auto& value : values) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(value.data()), + value.size()); + } + } + + UnsafeAppendToBitmap(valid_bytes, values.size()); + return Status::OK(); + } + + /// \brief Append a sequence of nul-terminated strings in one shot. + /// If one of the values is NULL, it is processed as a null + /// value even if the corresponding valid_bytes entry is 1. + /// + /// \param[in] values a contiguous C array of nul-terminated char * + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const char** values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = 0; + std::vector value_lengths(length); + bool have_null_value = false; + for (int64_t i = 0; i < length; ++i) { + if (values[i] != NULLPTR) { + auto value_length = strlen(values[i]); + value_lengths[i] = value_length; + total_length += value_length; + } else { + have_null_value = true; + } + } + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes) { + int64_t valid_bytes_offset = 0; + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } else { + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, + i - valid_bytes_offset); + UnsafeAppendToBitmap(false); + valid_bytes_offset = i + 1; + } + } + } + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, length - valid_bytes_offset); + } else { + if (have_null_value) { + std::vector valid_vector(length, 0); + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + valid_vector[i] = 1; + } + } + UnsafeAppendToBitmap(valid_vector.data(), length); + } else { + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } + UnsafeAppendToBitmap(NULLPTR, length); + } + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + auto bitmap = array.GetValues(0, 0); + auto offsets = array.GetValues(1); + auto data = array.GetValues(2, 0); + auto total_length = offsets[offset + length] - offsets[offset]; + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + for (int64_t i = 0; i < length; i++) { + if (!bitmap || bit_util::GetBit(bitmap, array.offset + offset + i)) { + const offset_type start = offsets[offset + i]; + const offset_type end = offsets[offset + i + 1]; + UnsafeAppend(data + start, end - start); + } else { + UnsafeAppendNull(); + } + } + return Status::OK(); + } + + void Reset() override { + ArrayBuilder::Reset(); + offsets_builder_.Reset(); + value_data_builder_.Reset(); + } + + Status ValidateOverflow(int64_t new_bytes) { + auto new_size = value_data_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + // One more than requested for offsets + ARROW_RETURN_NOT_OK(offsets_builder_.Resize(capacity + 1)); + return ArrayBuilder::Resize(capacity); + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return value_data_builder_.Reserve(elements); + } + + Status FinishInternal(std::shared_ptr* out) override { + // Write final offset (values length) + ARROW_RETURN_NOT_OK(AppendNextOffset()); + + // These buffers' padding zeroed by BufferBuilder + std::shared_ptr offsets, value_data, null_bitmap; + ARROW_RETURN_NOT_OK(offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(value_data_builder_.Finish(&value_data)); + ARROW_RETURN_NOT_OK(null_bitmap_builder_.Finish(&null_bitmap)); + + *out = ArrayData::Make(type(), length_, {null_bitmap, offsets, value_data}, + null_count_, 0); + Reset(); + return Status::OK(); + } + + /// \return data pointer of the value date builder + const uint8_t* value_data() const { return value_data_builder_.data(); } + /// \return size of values buffer so far + int64_t value_data_length() const { return value_data_builder_.length(); } + /// \return capacity of values buffer + int64_t value_data_capacity() const { return value_data_builder_.capacity(); } + + /// \return data pointer of the value date builder + const offset_type* offsets_data() const { return offsets_builder_.data(); } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + const offset_type* offsets = offsets_builder_.data(); + const auto offset = offsets[i]; + if (i == (length_ - 1)) { + *out_length = static_cast(value_data_builder_.length()) - offset; + } else { + *out_length = offsets[i + 1] - offset; + } + return value_data_builder_.data() + offset; + } + + offset_type offset(int64_t i) const { return offsets_data()[i]; } + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const { + offset_type value_length; + const uint8_t* value_data = GetValue(i, &value_length); + return std::string_view(reinterpret_cast(value_data), value_length); + } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + protected: + TypedBufferBuilder offsets_builder_; + TypedBufferBuilder value_data_builder_; + + Status AppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + return offsets_builder_.Append(static_cast(num_bytes)); + } + + void UnsafeAppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } +}; + +/// \class BinaryBuilder +/// \brief Builder class for variable-length binary data +class ARROW_EXPORT BinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return binary(); } +}; + +/// \class StringBuilder +/// \brief Builder class for UTF8 strings +class ARROW_EXPORT StringBuilder : public BinaryBuilder { + public: + using BinaryBuilder::BinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return utf8(); } +}; + +/// \class LargeBinaryBuilder +/// \brief Builder class for large variable-length binary data +class ARROW_EXPORT LargeBinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_binary(); } +}; + +/// \class LargeStringBuilder +/// \brief Builder class for large UTF8 strings +class ARROW_EXPORT LargeStringBuilder : public LargeBinaryBuilder { + public: + using LargeBinaryBuilder::LargeBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_utf8(); } +}; + +// ---------------------------------------------------------------------- +// BinaryViewBuilder, StringViewBuilder +// +// These builders do not support building raw pointer view arrays. + +namespace internal { + +// We allocate medium-sized memory chunks and accumulate data in those, which +// may result in some waste if there are many large-ish strings. If a string +// comes along that does not fit into a block, we allocate a new block and +// write into that. +// +// Later we can implement optimizations to continuing filling underfull blocks +// after encountering a large string that required allocating a new block. +class ARROW_EXPORT StringHeapBuilder { + public: + static constexpr int64_t kDefaultBlocksize = 32 << 10; // 32KB + + StringHeapBuilder(MemoryPool* pool, int64_t alignment) + : pool_(pool), alignment_(alignment) {} + + void SetBlockSize(int64_t blocksize) { blocksize_ = blocksize; } + + using c_type = BinaryViewType::c_type; + + template + std::conditional_t, c_type> Append(const uint8_t* value, + int64_t length) { + if (length <= BinaryViewType::kInlineSize) { + return util::ToInlineBinaryView(value, static_cast(length)); + } + + if constexpr (Safe) { + ARROW_RETURN_NOT_OK(Reserve(length)); + } + + auto v = + util::ToBinaryView(value, static_cast(length), + static_cast(blocks_.size() - 1), current_offset_); + + memcpy(current_out_buffer_, value, static_cast(length)); + current_out_buffer_ += length; + current_remaining_bytes_ -= length; + current_offset_ += static_cast(length); + return v; + } + + static constexpr int64_t ValueSizeLimit() { + return std::numeric_limits::max(); + } + + /// \brief Ensure that the indicated number of bytes can be appended via + /// UnsafeAppend operations without the need to allocate more memory + Status Reserve(int64_t num_bytes) { + if (ARROW_PREDICT_FALSE(num_bytes > ValueSizeLimit())) { + return Status::CapacityError( + "BinaryView or StringView elements cannot reference " + "strings larger than 2GB"); + } + if (num_bytes > current_remaining_bytes_) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + current_remaining_bytes_ = num_bytes > blocksize_ ? num_bytes : blocksize_; + ARROW_ASSIGN_OR_RAISE( + std::shared_ptr new_block, + AllocateResizableBuffer(current_remaining_bytes_, alignment_, pool_)); + current_offset_ = 0; + current_out_buffer_ = new_block->mutable_data(); + blocks_.emplace_back(std::move(new_block)); + } + return Status::OK(); + } + + void Reset() { + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + blocks_.clear(); + } + + int64_t current_remaining_bytes() const { return current_remaining_bytes_; } + + Result>> Finish() { + if (!blocks_.empty()) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + } + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + return std::move(blocks_); + } + + private: + Status FinishLastBlock() { + if (current_remaining_bytes_ > 0) { + // Avoid leaking uninitialized bytes from the allocator + ARROW_RETURN_NOT_OK( + blocks_.back()->Resize(blocks_.back()->size() - current_remaining_bytes_, + /*shrink_to_fit=*/true)); + blocks_.back()->ZeroPadding(); + } + return Status::OK(); + } + + MemoryPool* pool_; + int64_t alignment_; + int64_t blocksize_ = kDefaultBlocksize; + std::vector> blocks_; + + int32_t current_offset_ = 0; + uint8_t* current_out_buffer_ = NULLPTR; + int64_t current_remaining_bytes_ = 0; +}; + +} // namespace internal + +class ARROW_EXPORT BinaryViewBuilder : public ArrayBuilder { + public: + using TypeClass = BinaryViewType; + + // this constructor provided for MakeBuilder compatibility + BinaryViewBuilder(const std::shared_ptr&, MemoryPool* pool); + + explicit BinaryViewBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + data_builder_(pool, alignment), + data_heap_builder_(pool, alignment) {} + + /// Set the size for future preallocated data buffers. + /// + /// The default size is 32KB, so after each 32KB of string data appended to the builder + /// a new data buffer will be allocated. Adjust this to a larger value to decrease the + /// frequency of allocation, or to a smaller value to lower the overhead of each + /// allocation. + void SetBlockSize(int64_t blocksize) { data_heap_builder_.SetBlockSize(blocksize); } + + /// The number of bytes which can be appended to this builder without allocating another + /// data buffer. + int64_t current_block_bytes_remaining() const { + return data_heap_builder_.current_remaining_bytes(); + } + + Status Append(const uint8_t* value, int64_t length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendToBitmap(true); + ARROW_ASSIGN_OR_RAISE(auto v, + data_heap_builder_.Append(value, length)); + data_builder_.UnsafeAppend(v); + return Status::OK(); + } + + Status Append(const char* value, int64_t length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// \brief Append without checking capacity + /// + /// Builder should have been presized using Reserve() and ReserveData(), + /// respectively, and the value must not be larger than 2GB + void UnsafeAppend(const uint8_t* value, int64_t length) { + UnsafeAppendToBitmap(true); + auto v = data_heap_builder_.Append(value, length); + data_builder_.UnsafeAppend(v); + } + + void UnsafeAppend(const char* value, int64_t length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// \brief Ensures there is enough allocated available capacity in the + /// out-of-line data heap to append the indicated number of bytes without + /// additional allocations + Status ReserveData(int64_t length); + + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element (length-0 inline string) + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNotNull(length); + return Status::OK(); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a slice of a BinaryViewArray passed as an ArraySpan. Copies + /// the underlying out-of-line string memory to avoid memory lifetime issues + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + + void Reset() override; + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override { return binary_view(); } + + protected: + TypedBufferBuilder data_builder_; + + // Accumulates out-of-line data in fixed-size chunks which are then attached + // to the resulting ArrayData + internal::StringHeapBuilder data_heap_builder_; +}; + +class ARROW_EXPORT StringViewBuilder : public BinaryViewBuilder { + public: + using BinaryViewBuilder::BinaryViewBuilder; + std::shared_ptr type() const override { return utf8_view(); } +}; + +// ---------------------------------------------------------------------- +// FixedSizeBinaryBuilder + +class ARROW_EXPORT FixedSizeBinaryBuilder : public ArrayBuilder { + public: + using TypeClass = FixedSizeBinaryType; + + explicit FixedSizeBinaryBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + Status Append(const uint8_t* value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(value); + return Status::OK(); + } + + Status Append(const char* value) { + return Append(reinterpret_cast(value)); + } + + Status Append(std::string_view view) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(view); + return Status::OK(); + } + + Status Append(const std::string& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const Buffer& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const std::shared_ptr& s) { return Append(*s); } + + template + Status Append(const std::array& value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend( + std::string_view(reinterpret_cast(value.data()), value.size())); + return Status::OK(); + } + + Status AppendValues(const uint8_t* data, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status AppendValues(const uint8_t* data, int64_t length, const uint8_t* validity, + int64_t bitmap_offset); + + Status AppendNull() final; + Status AppendNulls(int64_t length) final; + + Status AppendEmptyValue() final; + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues( + array.GetValues(1, 0) + ((array.offset + offset) * byte_width_), length, + array.GetValues(0, 0), array.offset + offset); + } + + void UnsafeAppend(const uint8_t* value) { + UnsafeAppendToBitmap(true); + if (ARROW_PREDICT_TRUE(byte_width_ > 0)) { + byte_builder_.UnsafeAppend(value, byte_width_); + } + } + + void UnsafeAppend(const char* value) { + UnsafeAppend(reinterpret_cast(value)); + } + + void UnsafeAppend(std::string_view value) { +#ifndef NDEBUG + CheckValueSize(static_cast(value.size())); +#endif + UnsafeAppend(reinterpret_cast(value.data())); + } + + void UnsafeAppend(const Buffer& s) { UnsafeAppend(std::string_view{s}); } + + void UnsafeAppend(const std::shared_ptr& s) { UnsafeAppend(*s); } + + void UnsafeAppendNull() { + UnsafeAppendToBitmap(false); + byte_builder_.UnsafeAppend(/*num_copies=*/byte_width_, 0); + } + + Status ValidateOverflow(int64_t new_bytes) const { + auto new_size = byte_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return byte_builder_.Reserve(elements); + } + + void Reset() override; + Status Resize(int64_t capacity) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \return size of values buffer so far + int64_t value_data_length() const { return byte_builder_.length(); } + + int32_t byte_width() const { return byte_width_; } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i) const; + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const; + + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + std::shared_ptr type() const override { + return fixed_size_binary(byte_width_); + } + + protected: + int32_t byte_width_; + BufferBuilder byte_builder_; + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + uint8_t* GetMutableValue(int64_t i) { + uint8_t* data_ptr = byte_builder_.mutable_data(); + return data_ptr + i * byte_width_; + } + + void CheckValueSize(int64_t size); +}; + +/// @} + +// ---------------------------------------------------------------------- +// Chunked builders: build a sequence of BinaryArray or StringArray that are +// limited to a particular size (to the upper limit of 2GB) + +namespace internal { + +class ARROW_EXPORT ChunkedBinaryBuilder { + public: + explicit ChunkedBinaryBuilder(int32_t max_chunk_value_length, + MemoryPool* pool = default_memory_pool()); + + ChunkedBinaryBuilder(int32_t max_chunk_value_length, int32_t max_chunk_length, + MemoryPool* pool = default_memory_pool()); + + virtual ~ChunkedBinaryBuilder() = default; + + Status Append(const uint8_t* value, int32_t length) { + if (ARROW_PREDICT_FALSE(length + builder_->value_data_length() > + max_chunk_value_length_)) { + if (builder_->value_data_length() == 0) { + // The current item is larger than max_chunk_size_; + // this chunk will be oversize and hold *only* this item + ARROW_RETURN_NOT_OK(builder_->Append(value, length)); + return NextChunk(); + } + // The current item would cause builder_->value_data_length() to exceed + // max_chunk_size_, so finish this chunk and append the current item to the next + // chunk + ARROW_RETURN_NOT_OK(NextChunk()); + return Append(value, length); + } + + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + // The current item would cause builder_->length() to exceed max_chunk_length_, so + // finish this chunk and append the current item to the next chunk + ARROW_RETURN_NOT_OK(NextChunk()); + } + + return builder_->Append(value, length); + } + + Status Append(std::string_view value) { + return Append(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNull() { + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + ARROW_RETURN_NOT_OK(NextChunk()); + } + return builder_->AppendNull(); + } + + Status Reserve(int64_t values); + + virtual Status Finish(ArrayVector* out); + + protected: + Status NextChunk(); + + // maximum total character data size per chunk + int64_t max_chunk_value_length_; + + // maximum elements allowed per chunk + int64_t max_chunk_length_ = kListMaximumElements; + + // when Reserve() would cause builder_ to exceed its max_chunk_length_, + // add to extra_capacity_ instead and wait to reserve until the next chunk + int64_t extra_capacity_ = 0; + + std::unique_ptr builder_; + std::vector> chunks_; +}; + +class ARROW_EXPORT ChunkedStringBuilder : public ChunkedBinaryBuilder { + public: + using ChunkedBinaryBuilder::ChunkedBinaryBuilder; + + Status Finish(ArrayVector* out) override; +}; + +} // namespace internal + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..8094250aef8d4b5068d8d8a3ec5ab6488221775c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array/array_decimal.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/data.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-builders +/// +/// @{ + +class ARROW_EXPORT Decimal128Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal128Type; + using ValueType = Decimal128; + + explicit Decimal128Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(Decimal128 val); + void UnsafeAppend(Decimal128 val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +class ARROW_EXPORT Decimal256Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal256Type; + using ValueType = Decimal256; + + explicit Decimal256Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(const Decimal256& val); + void UnsafeAppend(const Decimal256& val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +using DecimalBuilder = Decimal128Builder; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..3f0d711dc5bb588c3abcbb301902338cf60d32bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_dict.h @@ -0,0 +1,737 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_adaptive.h" // IWYU pragma: export +#include "arrow/array/builder_base.h" // IWYU pragma: export +#include "arrow/array/builder_primitive.h" // IWYU pragma: export +#include "arrow/array/data.h" +#include "arrow/array/util.h" +#include "arrow/scalar.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/decimal.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Dictionary builder + +namespace internal { + +template +struct DictionaryValue { + using type = typename T::c_type; + using PhysicalType = T; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = + typename std::conditional::value, + BinaryType, LargeBinaryType>::type; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryViewType; +}; + +template +struct DictionaryValue> { + using type = std::string_view; + using PhysicalType = BinaryType; +}; + +class ARROW_EXPORT DictionaryMemoTable { + public: + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& type); + DictionaryMemoTable(MemoryPool* pool, const std::shared_ptr& dictionary); + ~DictionaryMemoTable(); + + Status GetArrayData(int64_t start_offset, std::shared_ptr* out); + + /// \brief Insert new memo values + Status InsertValues(const Array& values); + + int32_t size() const; + + template + Status GetOrInsert(typename DictionaryValue::type value, int32_t* out) { + // We want to keep the DictionaryMemoTable implementation private, also we can't + // use extern template classes because of compiler issues (MinGW?). Instead, + // we expose explicit function overrides for each supported physical type. + const typename DictionaryValue::PhysicalType* physical_type = NULLPTR; + return GetOrInsert(physical_type, value, out); + } + + private: + Status GetOrInsert(const BooleanType*, bool value, int32_t* out); + Status GetOrInsert(const Int8Type*, int8_t value, int32_t* out); + Status GetOrInsert(const Int16Type*, int16_t value, int32_t* out); + Status GetOrInsert(const Int32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Int64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const UInt8Type*, uint8_t value, int32_t* out); + Status GetOrInsert(const UInt16Type*, uint16_t value, int32_t* out); + Status GetOrInsert(const UInt32Type*, uint32_t value, int32_t* out); + Status GetOrInsert(const UInt64Type*, uint64_t value, int32_t* out); + Status GetOrInsert(const DurationType*, int64_t value, int32_t* out); + Status GetOrInsert(const TimestampType*, int64_t value, int32_t* out); + Status GetOrInsert(const Date32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Date64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const Time32Type*, int32_t value, int32_t* out); + Status GetOrInsert(const Time64Type*, int64_t value, int32_t* out); + Status GetOrInsert(const MonthDayNanoIntervalType*, + MonthDayNanoIntervalType::MonthDayNanos value, int32_t* out); + Status GetOrInsert(const DayTimeIntervalType*, + DayTimeIntervalType::DayMilliseconds value, int32_t* out); + Status GetOrInsert(const MonthIntervalType*, int32_t value, int32_t* out); + Status GetOrInsert(const FloatType*, float value, int32_t* out); + Status GetOrInsert(const DoubleType*, double value, int32_t* out); + + Status GetOrInsert(const BinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const LargeBinaryType*, std::string_view value, int32_t* out); + Status GetOrInsert(const BinaryViewType*, std::string_view value, int32_t* out); + + class DictionaryMemoTableImpl; + std::unique_ptr impl_; +}; + +} // namespace internal + +/// \addtogroup dictionary-builders +/// +/// @{ + +namespace internal { + +/// \brief Array builder for created encoded DictionaryArray from +/// dense array +/// +/// Unlike other builders, dictionary builder does not completely +/// reset the state on Finish calls. +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + using TypeClass = DictionaryType; + using Value = typename DictionaryValue::type; + + // WARNING: the type given below is the value type, not the DictionaryType. + // The DictionaryType is instantiated on the Finish() call. + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + !is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_t::value, const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + DictionaryBuilderBase(uint8_t start_int_size, + enable_if_t::value && + is_fixed_size_binary_type::value, + const std::shared_ptr&> + value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(start_int_size, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + const std::shared_ptr& index_type, + enable_if_fixed_size_binary&> value_type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, value_type)), + delta_offset_(0), + byte_width_(static_cast(*value_type).byte_width()), + indices_builder_(index_type, pool, alignment), + value_type_(value_type) {} + + template + explicit DictionaryBuilderBase( + enable_if_parameter_free pool = default_memory_pool()) + : DictionaryBuilderBase(TypeTraits::type_singleton(), pool) {} + + // This constructor doesn't check for errors. Use InsertMemoValues instead. + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + memo_table_(new internal::DictionaryMemoTable(pool, dictionary)), + delta_offset_(0), + byte_width_(-1), + indices_builder_(pool, alignment), + value_type_(dictionary->type()) {} + + ~DictionaryBuilderBase() override = default; + + /// \brief The current number of entries in the dictionary + int64_t dictionary_length() const { return memo_table_->size(); } + + /// \brief The value byte width (for FixedSizeBinaryType) + template + enable_if_fixed_size_binary byte_width() const { + return byte_width_; + } + + /// \brief Append a scalar value + Status Append(Value value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + + int32_t memo_index; + ARROW_RETURN_NOT_OK(memo_table_->GetOrInsert(value, &memo_index)); + ARROW_RETURN_NOT_OK(indices_builder_.Append(memo_index)); + length_ += 1; + + return Status::OK(); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const uint8_t* value) { + return Append(std::string_view(reinterpret_cast(value), byte_width_)); + } + + /// \brief Append a fixed-width string (only for FixedSizeBinaryType) + template + enable_if_fixed_size_binary Append(const char* value) { + return Append(std::string_view(value, byte_width_)); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const uint8_t* value, int32_t length) { + return Append(reinterpret_cast(value), length); + } + + /// \brief Append a string (only for binary types) + template + enable_if_binary_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a string (only for string types) + template + enable_if_string_like Append(const char* value, int32_t length) { + return Append(std::string_view(value, length)); + } + + /// \brief Append a decimal (only for Decimal128Type) + template + enable_if_decimal128 Append(const Decimal128& value) { + uint8_t data[16]; + value.ToBytes(data); + return Append(data, 16); + } + + /// \brief Append a decimal (only for Decimal128Type) + template + enable_if_decimal256 Append(const Decimal256& value) { + uint8_t data[32]; + value.ToBytes(data); + return Append(data, 32); + } + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override { + if (!scalar.is_valid) return AppendNulls(n_repeats); + + const auto& dict_ty = internal::checked_cast(*scalar.type); + const DictionaryScalar& dict_scalar = + internal::checked_cast(scalar); + const auto& dict = internal::checked_cast::ArrayType&>( + *dict_scalar.value.dictionary); + ARROW_RETURN_NOT_OK(Reserve(n_repeats)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT8: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT16: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT32: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::UINT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + case Type::INT64: + return AppendScalarImpl(dict, *dict_scalar.value.index, n_repeats); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + Status AppendScalars(const ScalarVector& scalars) override { + for (const auto& scalar : scalars) { + ARROW_RETURN_NOT_OK(AppendScalar(*scalar, /*n_repeats=*/1)); + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length) final { + // Visit the indices and insert the unpacked values. + const auto& dict_ty = internal::checked_cast(*array.type); + // See if possible to avoid using ToArrayData here + const typename TypeTraits::ArrayType dict(array.dictionary().ToArrayData()); + ARROW_RETURN_NOT_OK(Reserve(length)); + switch (dict_ty.index_type()->id()) { + case Type::UINT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT8: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT16: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT32: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::UINT64: + return AppendArraySliceImpl(dict, array, offset, length); + case Type::INT64: + return AppendArraySliceImpl(dict, array, offset, length); + default: + return Status::TypeError("Invalid index type: ", dict_ty); + } + return Status::OK(); + } + + /// \brief Insert values into the dictionary's memo, but do not append any + /// indices. Can be used to initialize a new builder with known dictionary + /// values + /// \param[in] values dictionary values to add to memo. Type must match + /// builder type + Status InsertMemoValues(const Array& values) { + return memo_table_->InsertValues(values); + } + + /// \brief Append a whole dense array to the builder + template + enable_if_t::value, Status> AppendArray( + const Array& array) { + using ArrayType = typename TypeTraits::ArrayType; + +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetView(i))); + } + } + return Status::OK(); + } + + template + enable_if_fixed_size_binary AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + value_type_, array, "Wrong value type of array to be appended")); +#endif + + const auto& concrete_array = static_cast(array); + for (int64_t i = 0; i < array.length(); i++) { + if (array.IsNull(i)) { + ARROW_RETURN_NOT_OK(AppendNull()); + } else { + ARROW_RETURN_NOT_OK(Append(concrete_array.GetValue(i))); + } + } + return Status::OK(); + } + + void Reset() override { + // Perform a partial reset. Call ResetFull to also reset the accumulated + // dictionary values + ArrayBuilder::Reset(); + indices_builder_.Reset(); + } + + /// \brief Reset and also clear accumulated dictionary values in memo table + void ResetFull() { + Reset(); + memo_table_.reset(new internal::DictionaryMemoTable(pool_, value_type_)); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + /// \brief Return dictionary indices and a delta dictionary since the last + /// time that Finish or FinishDelta were called, and reset state of builder + /// (except the memo table) + Status FinishDelta(std::shared_ptr* out_indices, + std::shared_ptr* out_delta) { + std::shared_ptr indices_data; + std::shared_ptr delta_data; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(delta_offset_, &indices_data, &delta_data)); + *out_indices = MakeArray(indices_data); + *out_delta = MakeArray(delta_data); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), value_type_); + } + + protected: + template + Status AppendArraySliceImpl(const typename TypeTraits::ArrayType& dict, + const ArraySpan& array, int64_t offset, int64_t length) { + const c_type* values = array.GetValues(1) + offset; + return VisitBitBlocks( + array.buffers[0].data, array.offset + offset, length, + [&](const int64_t position) { + const int64_t index = static_cast(values[position]); + if (dict.IsValid(index)) { + return Append(dict.GetView(index)); + } + return AppendNull(); + }, + [&]() { return AppendNull(); }); + } + + template + Status AppendScalarImpl(const typename TypeTraits::ArrayType& dict, + const Scalar& index_scalar, int64_t n_repeats) { + using ScalarType = typename TypeTraits::ScalarType; + const auto index = internal::checked_cast(index_scalar).value; + if (index_scalar.is_valid && dict.IsValid(index)) { + const auto& value = dict.GetView(index); + for (int64_t i = 0; i < n_repeats; i++) { + ARROW_RETURN_NOT_OK(Append(value)); + } + return Status::OK(); + } + return AppendNulls(n_repeats); + } + + Status FinishInternal(std::shared_ptr* out) override { + std::shared_ptr dictionary; + ARROW_RETURN_NOT_OK(FinishWithDictOffset(/*offset=*/0, out, &dictionary)); + + // Set type of array data to the right dictionary type + (*out)->type = type(); + (*out)->dictionary = dictionary; + return Status::OK(); + } + + Status FinishWithDictOffset(int64_t dict_offset, + std::shared_ptr* out_indices, + std::shared_ptr* out_dictionary) { + // Finalize indices array + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out_indices)); + + // Generate dictionary array from hash table contents + ARROW_RETURN_NOT_OK(memo_table_->GetArrayData(dict_offset, out_dictionary)); + delta_offset_ = memo_table_->size(); + + // Update internals for further uses of this DictionaryBuilder + ArrayBuilder::Reset(); + return Status::OK(); + } + + std::unique_ptr memo_table_; + + // The size of the dictionary memo at last invocation of Finish, to use in + // FinishDelta for computing dictionary deltas + int32_t delta_offset_; + + // Only used for FixedSizeBinaryType + int32_t byte_width_; + + BuilderType indices_builder_; + std::shared_ptr value_type_; +}; + +template +class DictionaryBuilderBase : public ArrayBuilder { + public: + template + DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& index_type, + const std::shared_ptr& value_type, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(index_type, pool) {} + + template + explicit DictionaryBuilderBase( + enable_if_t::value, uint8_t> + start_int_size, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(start_int_size, pool) {} + + explicit DictionaryBuilderBase(MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + explicit DictionaryBuilderBase(const std::shared_ptr& dictionary, + MemoryPool* pool = default_memory_pool()) + : ArrayBuilder(pool), indices_builder_(pool) {} + + /// \brief Append a scalar null value + Status AppendNull() final { + length_ += 1; + null_count_ += 1; + + return indices_builder_.AppendNull(); + } + + Status AppendNulls(int64_t length) final { + length_ += length; + null_count_ += length; + + return indices_builder_.AppendNulls(length); + } + + Status AppendEmptyValue() final { + length_ += 1; + + return indices_builder_.AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + length_ += length; + + return indices_builder_.AppendEmptyValues(length); + } + + /// \brief Append a whole dense array to the builder + Status AppendArray(const Array& array) { +#ifndef NDEBUG + ARROW_RETURN_NOT_OK(ArrayBuilder::CheckArrayType( + Type::NA, array, "Wrong value type of array to be appended")); +#endif + for (int64_t i = 0; i < array.length(); i++) { + ARROW_RETURN_NOT_OK(AppendNull()); + } + return Status::OK(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + + ARROW_RETURN_NOT_OK(indices_builder_.Resize(capacity)); + capacity_ = indices_builder_.capacity(); + return Status::OK(); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_RETURN_NOT_OK(indices_builder_.FinishInternal(out)); + (*out)->type = dictionary((*out)->type, null()); + (*out)->dictionary = NullArray(0).data(); + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { + return ::arrow::dictionary(indices_builder_.type(), null()); + } + + protected: + BuilderType indices_builder_; +}; + +} // namespace internal + +/// \brief A DictionaryArray builder that uses AdaptiveIntBuilder to return the +/// smallest index size that can accommodate the dictionary indices +template +class DictionaryBuilder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +/// \brief A DictionaryArray builder that always returns int32 dictionary +/// indices so that data cast to dictionary form will have a consistent index +/// type, e.g. for creating a ChunkedArray +template +class Dictionary32Builder : public internal::DictionaryBuilderBase { + public: + using BASE = internal::DictionaryBuilderBase; + using BASE::BASE; + + /// \brief Append dictionary indices directly without modifying memo + /// + /// NOTE: Experimental API + Status AppendIndices(const int32_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + int64_t null_count_before = this->indices_builder_.null_count(); + ARROW_RETURN_NOT_OK(this->indices_builder_.AppendValues(values, length, valid_bytes)); + this->capacity_ = this->indices_builder_.capacity(); + this->length_ += length; + this->null_count_ += this->indices_builder_.null_count() - null_count_before; + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// Binary / Unicode builders +// (compatibility aliases; those used to be derived classes with additional +// Append() overloads, but they have been folded into DictionaryBuilderBase) + +using BinaryDictionaryBuilder = DictionaryBuilder; +using StringDictionaryBuilder = DictionaryBuilder; +using BinaryDictionary32Builder = Dictionary32Builder; +using StringDictionary32Builder = Dictionary32Builder; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h new file mode 100644 index 0000000000000000000000000000000000000000..29e01d55edeb110c6b93e12722521e81ca445436 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h @@ -0,0 +1,555 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" + +namespace arrow { + +class ARROW_EXPORT NullBuilder : public ArrayBuilder { + public: + explicit NullBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool) {} + explicit NullBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NullBuilder(pool, alignment) {} + + /// \brief Append the specified number of null elements + Status AppendNulls(int64_t length) final { + if (length < 0) return Status::Invalid("length must be positive"); + null_count_ += length; + length_ += length; + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { return AppendNulls(1); } + + Status AppendEmptyValues(int64_t length) final { return AppendNulls(length); } + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + + Status Append(std::nullptr_t) { return AppendNull(); } + + Status AppendArraySlice(const ArraySpan&, int64_t, int64_t length) override { + return AppendNulls(length); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + std::shared_ptr type() const override { return null(); } + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +/// \addtogroup numeric-builders +/// +/// @{ + +/// Base class for all Builders that emit an Array of a scalar numerical type. +template +class NumericBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, typename T::c_type> { + public: + using TypeClass = T; + using value_type = typename T::c_type; + using ArrayType = typename TypeTraits::ArrayType; + + template + explicit NumericBuilder( + enable_if_parameter_free pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + type_(TypeTraits::type_singleton()), + data_builder_(pool, alignment) {} + + NumericBuilder(const std::shared_ptr& type, MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), type_(type), data_builder_(pool, alignment) {} + + /// Append a single scalar and increase the size if necessary. + Status Append(const value_type val) { + ARROW_RETURN_NOT_OK(ArrayBuilder::Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + /// The memory at the corresponding data slot is set to 0 to prevent + /// uninitialized memory access + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNotNull(length); + return Status::OK(); + } + + value_type GetValue(int64_t index) const { return data_builder_.data()[index]; } + + void Reset() override { + data_builder_.Reset(); + ArrayBuilder::Reset(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + value_type operator[](int64_t index) const { return GetValue(index); } + + value_type& operator[](int64_t index) { + return reinterpret_cast(data_builder_.mutable_data())[index]; + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(valid_bytes, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] bitmap a validity bitmap to copy (may be null) + /// \param[in] bitmap_offset an offset into the validity bitmap + /// \return Status + Status AppendValues(const value_type* values, int64_t length, const uint8_t* bitmap, + int64_t bitmap_offset) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(bitmap, bitmap_offset, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const std::vector& is_valid) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(is_valid); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid) { + return AppendValues(values.data(), static_cast(values.size()), is_valid); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \return Status + Status AppendValues(const std::vector& values) { + return AppendValues(values.data(), static_cast(values.size())); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_ASSIGN_OR_RAISE(auto null_bitmap, + null_bitmap_builder_.FinishWithLength(length_)); + ARROW_ASSIGN_OR_RAISE(auto data, data_builder_.FinishWithLength(length_)); + *out = ArrayData::Make(type(), length_, {null_bitmap, data}, null_count_); + capacity_ = length_ = null_count_ = 0; + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values. + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, with a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + } + + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1) + offset, length, + array.GetValues(0, 0), array.offset + offset); + } + + /// Append a single scalar under the assumption that the underlying Buffer is + /// large enough. + /// + /// This method does not capacity-check; make sure to call Reserve + /// beforehand. + void UnsafeAppend(const value_type val) { + ArrayBuilder::UnsafeAppendToBitmap(true); + data_builder_.UnsafeAppend(val); + } + + void UnsafeAppendNull() { + ArrayBuilder::UnsafeAppendToBitmap(false); + data_builder_.UnsafeAppend(value_type{}); // zero + } + + std::shared_ptr type() const override { return type_; } + + protected: + std::shared_ptr type_; + TypedBufferBuilder data_builder_; +}; + +// Builders + +using UInt8Builder = NumericBuilder; +using UInt16Builder = NumericBuilder; +using UInt32Builder = NumericBuilder; +using UInt64Builder = NumericBuilder; + +using Int8Builder = NumericBuilder; +using Int16Builder = NumericBuilder; +using Int32Builder = NumericBuilder; +using Int64Builder = NumericBuilder; + +using HalfFloatBuilder = NumericBuilder; +using FloatBuilder = NumericBuilder; +using DoubleBuilder = NumericBuilder; + +/// @} + +/// \addtogroup temporal-builders +/// +/// @{ + +using Date32Builder = NumericBuilder; +using Date64Builder = NumericBuilder; +using Time32Builder = NumericBuilder; +using Time64Builder = NumericBuilder; +using TimestampBuilder = NumericBuilder; +using MonthIntervalBuilder = NumericBuilder; +using DurationBuilder = NumericBuilder; + +/// @} + +class ARROW_EXPORT BooleanBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps { + public: + using TypeClass = BooleanType; + using value_type = bool; + + explicit BooleanBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + BooleanBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNull(length); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNull(); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(false); + UnsafeSetNotNull(1); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// Scalar append + Status Append(const bool val) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + Status Append(const uint8_t val) { return Append(val != 0); } + + /// Scalar append, without checking for capacity + void UnsafeAppend(const bool val) { + data_builder_.UnsafeAppend(val); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(false); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppend(const uint8_t val) { UnsafeAppend(val != 0); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous array of bytes (non-zero is 1) + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a bitmap of values + /// \param[in] length the number of values to append + /// \param[in] validity a validity bitmap to copy (may be null) + /// \param[in] offset an offset into the values and validity bitmaps + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, const uint8_t* validity, + int64_t offset); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// or null(0) values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + // this updates length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, for a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + } + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + Status AppendValues(int64_t length, bool value); + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1, 0), length, + array.GetValues(0, 0), array.offset + offset); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + void Reset() override; + Status Resize(int64_t capacity) override; + + std::shared_ptr type() const override { return boolean(); } + + protected: + TypedBufferBuilder data_builder_; +}; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h new file mode 100644 index 0000000000000000000000000000000000000000..ac92efbd0dbe6b470b8275219e75b41aa3f7ab3a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_run_end.h @@ -0,0 +1,303 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_base.h" + +namespace arrow { + +/// \addtogroup run-end-encoded-builders +/// +/// @{ + +namespace internal { + +/// \brief An ArrayBuilder that deduplicates repeated values as they are +/// appended to the inner-ArrayBuilder and reports the length of the current run +/// of identical values. +/// +/// The following sequence of calls +/// +/// Append(2) +/// Append(2) +/// Append(2) +/// Append(7) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// will cause the inner-builder to receive only 3 Append calls +/// +/// Append(2) +/// Append(7) +/// Append(2) +/// FinishInternal() +/// +/// Note that values returned by length(), null_count() and capacity() are +/// related to the compressed array built by the inner-ArrayBuilder. +class RunCompressorBuilder : public ArrayBuilder { + public: + RunCompressorBuilder(MemoryPool* pool, std::shared_ptr inner_builder, + std::shared_ptr type); + + ~RunCompressorBuilder() override; + + ARROW_DISALLOW_COPY_AND_ASSIGN(RunCompressorBuilder); + + /// \brief Called right before a run is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run is closed (i.e. run-length is known and value is appended to the + /// inner builder). + /// + /// \param value can be NULLPTR if closing a run of NULLs + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRun(const std::shared_ptr& value, + int64_t length) { + return Status::OK(); + } + + /// \brief Called right before a run of empty values is being closed + /// + /// Subclasses can override this function to perform an additional action when + /// a run of empty values is appended (i.e. run-length is known and a single + /// empty value is appended to the inner builder). + /// + /// \param length the greater than 0 length of the value run being closed + virtual Status WillCloseRunOfEmptyValues(int64_t length) { return Status::OK(); } + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing a run-length compressed array for a given + /// number of logical elements is not possible, since the physical length will + /// vary depending on the values to be appended in the future. But we can + /// pessimistically assume that each run will contain a single value and + /// allocate that number of runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + /// + /// Like Resize on non-encoded builders, it does not account for variable size + /// data. + Status ResizePhysical(int64_t capacity); + + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + + // AppendArraySlice() is not implemented. + + /// \brief Append a slice of an array containing values from already + /// compressed runs. + /// + /// NOTE: WillCloseRun() is not called as the length of each run cannot be + /// determined at this point. Caller should ensure that !has_open_run() by + /// calling FinishCurrentRun() before calling this. + /// + /// Pre-condition: !has_open_run() + Status AppendRunCompressedArraySlice(const ArraySpan& array, int64_t offset, + int64_t length); + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the underlying array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + /// + /// Finish() and FinishInternal() call this automatically. + virtual Status FinishCurrentRun(); + + Status FinishInternal(std::shared_ptr* out) override; + + ArrayBuilder& inner_builder() const { return *inner_builder_; } + + std::shared_ptr type() const override { return inner_builder_->type(); } + + bool has_open_run() const { return current_run_length_ > 0; } + int64_t open_run_length() const { return current_run_length_; } + + private: + inline void UpdateDimensions() { + capacity_ = inner_builder_->capacity(); + length_ = inner_builder_->length(); + null_count_ = inner_builder_->null_count(); + } + + private: + std::shared_ptr inner_builder_; + std::shared_ptr current_value_ = NULLPTR; + int64_t current_run_length_ = 0; +}; + +} // namespace internal + +// ---------------------------------------------------------------------- +// RunEndEncoded builder + +/// \brief Run-end encoded array builder. +/// +/// NOTE: the value returned by and capacity() is related to the +/// compressed array (physical) and not the decoded array (logical) that is +/// run-end encoded. null_count() always returns 0. length(), on the other hand, +/// returns the logical length of the run-end encoded array. +class ARROW_EXPORT RunEndEncodedBuilder : public ArrayBuilder { + private: + // An internal::RunCompressorBuilder that produces a run-end in the + // RunEndEncodedBuilder every time a value-run is closed. + class ValueRunBuilder : public internal::RunCompressorBuilder { + public: + ValueRunBuilder(MemoryPool* pool, const std::shared_ptr& value_builder, + const std::shared_ptr& value_type, + RunEndEncodedBuilder& ree_builder); + + ~ValueRunBuilder() override = default; + + Status WillCloseRun(const std::shared_ptr&, int64_t length) override { + return ree_builder_.CloseRun(length); + } + + Status WillCloseRunOfEmptyValues(int64_t length) override { + return ree_builder_.CloseRun(length); + } + + private: + RunEndEncodedBuilder& ree_builder_; + }; + + public: + RunEndEncodedBuilder(MemoryPool* pool, + const std::shared_ptr& run_end_builder, + const std::shared_ptr& value_builder, + std::shared_ptr type); + + /// \brief Allocate enough memory for a given number of array elements. + /// + /// NOTE: Conservatively resizing an REE for a given number of logical + /// elements is not possible, since the physical length will vary depending on + /// the values to be appended in the future. But we can pessimistically assume + /// that each run will contain a single value and allocate that number of + /// runs. + Status Resize(int64_t capacity) override { return ResizePhysical(capacity); } + + /// \brief Allocate enough memory for a given number of runs. + Status ResizePhysical(int64_t capacity); + + /// \brief Ensure that there is enough space allocated to append the indicated + /// number of run without any further reallocation. Overallocation is + /// used in order to minimize the impact of incremental ReservePhysical() calls. + /// Note that additional_capacity is relative to the current number of elements + /// rather than to the current capacity, so calls to Reserve() which are not + /// interspersed with addition of new elements may not increase the capacity. + /// + /// \param[in] additional_capacity the number of additional runs + /// \return Status + Status ReservePhysical(int64_t additional_capacity) { + return Reserve(additional_capacity); + } + + void Reset() override; + + Status AppendNull() final { return AppendNulls(1); } + Status AppendNulls(int64_t length) override; + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + Status AppendEmptyValues(int64_t length) override; + Status AppendScalar(const Scalar& scalar, int64_t n_repeats) override; + Status AppendScalars(const ScalarVector& scalars) override; + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Forces the closing of the current run if one is currently open. + /// + /// This can be called when one wants to ensure the current run will not be + /// extended. This may cause identical values to appear close to each other in + /// the values array (i.e. two runs that could be a single run) if more + /// values are appended after this is called. + Status FinishCurrentRun(); + + std::shared_ptr type() const override; + + private: + /// \brief Update physical capacity and logical length + /// + /// \param committed_logical_length number of logical values that have been + /// committed to the values array + /// \param open_run_length number of logical values in the currently open run if any + inline void UpdateDimensions(int64_t committed_logical_length, + int64_t open_run_length) { + capacity_ = run_end_builder().capacity(); + length_ = committed_logical_length + open_run_length; + committed_logical_length_ = committed_logical_length; + } + + // Pre-condition: !value_run_builder_.has_open_run() + template + Status DoAppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length); + + template + Status DoAppendRunEnd(int64_t run_end); + + /// \brief Cast run_end to the appropriate type and appends it to the run_ends + /// array. + Status AppendRunEnd(int64_t run_end); + + /// \brief Close a run by appending a value to the run_ends array and updating + /// length_ to reflect the new run. + /// + /// Pre-condition: run_length > 0. + [[nodiscard]] Status CloseRun(int64_t run_length); + + ArrayBuilder& run_end_builder(); + ArrayBuilder& value_builder(); + + private: + std::shared_ptr type_; + ValueRunBuilder* value_run_builder_; + // The length not counting the current open run in the value_run_builder_ + int64_t committed_logical_length_ = 0; +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h new file mode 100644 index 0000000000000000000000000000000000000000..da29ae3124b5d3da32605503b29edf6920cdf6d6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Contains declarations of time related Arrow builder types. + +#pragma once + +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_primitive.h" + +namespace arrow { + +/// \addtogroup temporal-builders +/// +/// @{ + +// TODO(ARROW-7938): this class is untested + +class ARROW_EXPORT DayTimeIntervalBuilder : public NumericBuilder { + public: + using DayMilliseconds = DayTimeIntervalType::DayMilliseconds; + + explicit DayTimeIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : DayTimeIntervalBuilder(day_time_interval(), pool, alignment) {} + + explicit DayTimeIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +class ARROW_EXPORT MonthDayNanoIntervalBuilder + : public NumericBuilder { + public: + using MonthDayNanos = MonthDayNanoIntervalType::MonthDayNanos; + + explicit MonthDayNanoIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : MonthDayNanoIntervalBuilder(month_day_nano_interval(), pool, alignment) {} + + explicit MonthDayNanoIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h new file mode 100644 index 0000000000000000000000000000000000000000..718ef4c32cebef1d30e4f7c036a7ab8f4b333e4a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h @@ -0,0 +1,254 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_nested.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer_builder.h" +#include "arrow/memory_pool.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-builders +/// +/// @{ + +/// \brief Base class for union array builds. +/// +/// Note that while we subclass ArrayBuilder, as union types do not have a +/// validity bitmap, the bitmap builder member of ArrayBuilder is not used. +class ARROW_EXPORT BasicUnionBuilder : public ArrayBuilder { + public: + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Make a new child builder available to the UnionArray + /// + /// \param[in] new_child the child builder + /// \param[in] field_name the name of the field in the union array type + /// if type inference is used + /// \return child index, which is the "type" argument that needs + /// to be passed to the "Append" method to add a new element to + /// the union array. + int8_t AppendChild(const std::shared_ptr& new_child, + const std::string& field_name = ""); + + std::shared_ptr type() const override; + + int64_t length() const override { return types_builder_.length(); } + + protected: + BasicUnionBuilder(MemoryPool* pool, int64_t alignment, + const std::vector>& children, + const std::shared_ptr& type); + + int8_t NextTypeId(); + + std::vector> child_fields_; + std::vector type_codes_; + UnionMode::type mode_; + + std::vector type_id_to_children_; + std::vector type_id_to_child_id_; + // for all type_id < dense_type_id_, type_id_to_children_[type_id] != nullptr + int8_t dense_type_id_ = 0; + TypedBufferBuilder types_builder_; +}; + +/// \class DenseUnionBuilder +/// +/// This API is EXPERIMENTAL. +class ARROW_EXPORT DenseUnionBuilder : public BasicUnionBuilder { + public: + /// Use this constructor to initialize the UnionBuilder with no child builders, + /// allowing type to be inferred. You will need to call AppendChild for each of the + /// children builders you want to use. + explicit DenseUnionBuilder(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, {}, dense_union(FieldVector{})), + offsets_builder_(pool, alignment) {} + + /// Use this constructor to specify the type explicitly. + /// You can still add child builders to the union after using this constructor + DenseUnionBuilder(MemoryPool* pool, + const std::vector>& children, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, children, type), + offsets_builder_(pool, alignment) {} + + Status AppendNull() final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(static_cast(child_builder->length()))); + // Append a null arbitrarily to the first child + return child_builder->AppendNull(); + } + + Status AppendNulls(int64_t length) final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(length, static_cast(child_builder->length()))); + // Append just a single null to the first child + return child_builder->AppendNull(); + } + + Status AppendEmptyValue() final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(static_cast(child_builder->length()))); + // Append an empty value arbitrarily to the first child + return child_builder->AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(length, static_cast(child_builder->length()))); + // Append just a single empty value to the first child + return child_builder->AppendEmptyValue(); + } + + /// \brief Append an element to the UnionArray. This must be followed + /// by an append to the appropriate child builder. + /// + /// \param[in] next_type type_id of the child to which the next value will be appended. + /// + /// The corresponding child builder must be appended to independently after this method + /// is called. + Status Append(int8_t next_type) { + ARROW_RETURN_NOT_OK(types_builder_.Append(next_type)); + if (type_id_to_children_[next_type]->length() == kListMaximumElements) { + return Status::CapacityError( + "a dense UnionArray cannot contain more than 2^31 - 1 elements from a single " + "child"); + } + auto offset = static_cast(type_id_to_children_[next_type]->length()); + return offsets_builder_.Append(offset); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + + Status FinishInternal(std::shared_ptr* out) override; + + private: + TypedBufferBuilder offsets_builder_; +}; + +/// \class SparseUnionBuilder +/// +/// This API is EXPERIMENTAL. +class ARROW_EXPORT SparseUnionBuilder : public BasicUnionBuilder { + public: + /// Use this constructor to initialize the UnionBuilder with no child builders, + /// allowing type to be inferred. You will need to call AppendChild for each of the + /// children builders you want to use. + explicit SparseUnionBuilder(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, {}, sparse_union(FieldVector{})) {} + + /// Use this constructor to specify the type explicitly. + /// You can still add child builders to the union after using this constructor + SparseUnionBuilder(MemoryPool* pool, + const std::vector>& children, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, children, type) {} + + /// \brief Append a null value. + /// + /// A null is appended to the first child, empty values to the other children. + Status AppendNull() final { + const auto first_child_code = type_codes_[0]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK(type_id_to_children_[first_child_code]->AppendNull()); + for (int i = 1; i < static_cast(type_codes_.size()); ++i) { + ARROW_RETURN_NOT_OK(type_id_to_children_[type_codes_[i]]->AppendEmptyValue()); + } + return Status::OK(); + } + + /// \brief Append multiple null values. + /// + /// Nulls are appended to the first child, empty values to the other children. + Status AppendNulls(int64_t length) final { + const auto first_child_code = type_codes_[0]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK(type_id_to_children_[first_child_code]->AppendNulls(length)); + for (int i = 1; i < static_cast(type_codes_.size()); ++i) { + ARROW_RETURN_NOT_OK( + type_id_to_children_[type_codes_[i]]->AppendEmptyValues(length)); + } + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(types_builder_.Append(type_codes_[0])); + for (int8_t code : type_codes_) { + ARROW_RETURN_NOT_OK(type_id_to_children_[code]->AppendEmptyValue()); + } + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(types_builder_.Append(length, type_codes_[0])); + for (int8_t code : type_codes_) { + ARROW_RETURN_NOT_OK(type_id_to_children_[code]->AppendEmptyValues(length)); + } + return Status::OK(); + } + + /// \brief Append an element to the UnionArray. This must be followed + /// by an append to the appropriate child builder. + /// + /// \param[in] next_type type_id of the child to which the next value will be appended. + /// + /// The corresponding child builder must be appended to independently after this method + /// is called, and all other child builders must have null or empty value appended. + Status Append(int8_t next_type) { return types_builder_.Append(next_type); } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; +}; + +/// @} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h new file mode 100644 index 0000000000000000000000000000000000000000..e7597aad812c4ca20a9335afa1bc44129b2ad727 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/concatenate.h @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Concatenate arrays +/// +/// \param[in] arrays a vector of arrays to be concatenated +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return the concatenated array +ARROW_EXPORT +Result> Concatenate(const ArrayVector& arrays, + MemoryPool* pool = default_memory_pool()); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h new file mode 100644 index 0000000000000000000000000000000000000000..d8a6663cec5803eafdd7c3bce47d8f456ee89e3f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h @@ -0,0 +1,634 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: export +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/span.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { +// ---------------------------------------------------------------------- +// Null handling for types without a validity bitmap and the dictionary type + +ARROW_EXPORT bool IsNullSparseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullDenseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullRunEndEncoded(const ArrayData& data, int64_t i); + +ARROW_EXPORT bool UnionMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool RunEndEncodedMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool DictionaryMayHaveLogicalNulls(const ArrayData& data); +} // namespace internal + +// When slicing, we do not know the null count of the sliced range without +// doing some computation. To avoid doing this eagerly, we set the null count +// to -1 (any negative number will do). When Array::null_count is called the +// first time, the null count will be computed. See ARROW-33 +constexpr int64_t kUnknownNullCount = -1; + +// ---------------------------------------------------------------------- +// Generic array data container + +/// \class ArrayData +/// \brief Mutable container for generic Arrow array data +/// +/// This data structure is a self-contained representation of the memory and +/// metadata inside an Arrow array data structure (called vectors in Java). The +/// classes arrow::Array and its subclasses provide strongly-typed accessors +/// with support for the visitor pattern and other affordances. +/// +/// This class is designed for easy internal data manipulation, analytical data +/// processing, and data transport to and from IPC messages. For example, we +/// could cast from int64 to float64 like so: +/// +/// Int64Array arr = GetMyData(); +/// auto new_data = arr.data()->Copy(); +/// new_data->type = arrow::float64(); +/// DoubleArray double_arr(new_data); +/// +/// This object is also useful in an analytics setting where memory may be +/// reused. For example, if we had a group of operations all returning doubles, +/// say: +/// +/// Log(Sqrt(Expr(arr))) +/// +/// Then the low-level implementations of each of these functions could have +/// the signatures +/// +/// void Log(const ArrayData& values, ArrayData* out); +/// +/// As another example a function may consume one or more memory buffers in an +/// input array and replace them with newly-allocated data, changing the output +/// data type as well. +struct ARROW_EXPORT ArrayData { + ArrayData() = default; + + ArrayData(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : type(std::move(type)), length(length), null_count(null_count), offset(offset) {} + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); + } + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); + this->child_data = std::move(child_data); + } + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + std::shared_ptr dictionary, int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + // Move constructor + ArrayData(ArrayData&& other) noexcept + : type(std::move(other.type)), + length(other.length), + offset(other.offset), + buffers(std::move(other.buffers)), + child_data(std::move(other.child_data)), + dictionary(std::move(other.dictionary)) { + SetNullCount(other.null_count); + } + + // Copy constructor + ArrayData(const ArrayData& other) noexcept + : type(other.type), + length(other.length), + offset(other.offset), + buffers(other.buffers), + child_data(other.child_data), + dictionary(other.dictionary) { + SetNullCount(other.null_count); + } + + // Move assignment + ArrayData& operator=(ArrayData&& other) { + type = std::move(other.type); + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = std::move(other.buffers); + child_data = std::move(other.child_data); + dictionary = std::move(other.dictionary); + return *this; + } + + // Copy assignment + ArrayData& operator=(const ArrayData& other) { + type = other.type; + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = other.buffers; + child_data = other.child_data; + dictionary = other.dictionary; + return *this; + } + + std::shared_ptr Copy() const { return std::make_shared(*this); } + + /// \brief Copy all buffers and children recursively to destination MemoryManager + /// + /// This utilizes MemoryManager::CopyBuffer to create a new ArrayData object + /// recursively copying the buffers and all child buffers to the destination + /// memory manager. This includes dictionaries if applicable. + Result> CopyTo( + const std::shared_ptr& to) const; + /// \brief View or Copy this ArrayData to destination memory manager. + /// + /// Tries to view the buffer contents on the given memory manager's device + /// if possible (to avoid a copy) but falls back to copying if a no-copy view + /// isn't supported. + Result> ViewOrCopyTo( + const std::shared_ptr& to) const; + + bool IsNull(int64_t i) const { return !IsValid(i); } + + bool IsValid(int64_t i) const { + if (buffers[0] != NULLPTR) { + return bit_util::GetBit(buffers[0]->data(), i + offset); + } + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*this, i); + } + if (type == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*this, i); + } + if (type == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*this, i); + } + return null_count.load() != length; + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, offset); + } + + // Like GetValues, but returns NULLPTR instead of aborting if the underlying + // buffer is not a CPU buffer. + template + inline const T* GetValuesSafe(int i, int64_t absolute_offset) const { + if (buffers[i] && buffers[i]->is_cpu()) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValuesSafe(int i) const { + return GetValuesSafe(i, offset); + } + + // Access a buffer's data as a typed C pointer + template + inline T* GetMutableValues(int i, int64_t absolute_offset) { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->mutable_data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline T* GetMutableValues(int i) { + return GetMutableValues(i, offset); + } + + /// \brief Construct a zero-copy slice of the data with the given offset and length + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// \brief Input-checking variant of Slice + /// + /// An Invalid Status is returned if the requested slice falls out of bounds. + /// Note that unlike Slice, `length` isn't clamped to the available buffer size. + Result> SliceSafe(int64_t offset, int64_t length) const; + + void SetNullCount(int64_t v) { null_count.store(v); } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the data has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known. + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count.load() != 0 && buffers[0] != NULLPTR; + } + + /// \brief Return true if the data has a validity bitmap + bool HasValidityBitmap() const { return buffers[0] != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// This is not a drop-in replacement for MayHaveNulls, as historically + /// MayHaveNulls() has been used to check for the presence of a validity + /// bitmap that needs to be checked. + /// + /// Code that previously used MayHaveNulls() and then dealt with the validity + /// bitmap directly can be fixed to handle all types correctly without + /// performance degradation when handling most types by adopting + /// HasValidityBitmap and MayHaveLogicalNulls. + /// + /// Before: + /// + /// uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// if (validity && !bit_util::GetBit(validity, i)) { + /// continue; // skip a NULL + /// } + /// ... + /// } + /// + /// After: + /// + /// bool all_valid = !array.MayHaveLogicalNulls(); + /// uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// bool is_valid = all_valid || + /// (validity && bit_util::GetBit(validity, i)) || + /// array.IsValid(i); + /// if (!is_valid) { + /// continue; // skip a NULL + /// } + /// ... + /// } + bool MayHaveLogicalNulls() const { + if (buffers[0] != NULLPTR) { + return null_count.load() != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return internal::UnionMayHaveLogicalNulls(*this); + } + if (t == Type::RUN_END_ENCODED) { + return internal::RunEndEncodedMayHaveLogicalNulls(*this); + } + if (t == Type::DICTIONARY) { + return internal::DictionaryMayHaveLogicalNulls(*this); + } + return null_count.load() != 0; + } + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + std::shared_ptr type; + int64_t length = 0; + mutable std::atomic null_count{0}; + // The logical start point into the physical buffers (in values, not bytes). + // Note that, for child data, this must be *added* to the child data's own offset. + int64_t offset = 0; + std::vector> buffers; + std::vector> child_data; + + // The dictionary for this Array, if any. Only used for dictionary type + std::shared_ptr dictionary; +}; + +/// \brief A non-owning Buffer reference +struct ARROW_EXPORT BufferSpan { + // It is the user of this class's responsibility to ensure that + // buffers that were const originally are not written to + // accidentally. + uint8_t* data = NULLPTR; + int64_t size = 0; + // Pointer back to buffer that owns this memory + const std::shared_ptr* owner = NULLPTR; + + template + const T* data_as() const { + return reinterpret_cast(data); + } + template + T* mutable_data_as() { + return reinterpret_cast(data); + } +}; + +/// \brief EXPERIMENTAL: A non-owning ArrayData reference that is cheaply +/// copyable and does not contain any shared_ptr objects. Do not use in public +/// APIs aside from compute kernels for now +struct ARROW_EXPORT ArraySpan { + const DataType* type = NULLPTR; + int64_t length = 0; + mutable int64_t null_count = kUnknownNullCount; + int64_t offset = 0; + BufferSpan buffers[3]; + + ArraySpan() = default; + + explicit ArraySpan(const DataType* type, int64_t length) : type(type), length(length) {} + + ArraySpan(const ArrayData& data) { // NOLINT implicit conversion + SetMembers(data); + } + explicit ArraySpan(const Scalar& data) { FillFromScalar(data); } + + /// If dictionary-encoded, put dictionary in the first entry + std::vector child_data; + + /// \brief Populate ArraySpan to look like an array of length 1 pointing at + /// the data members of a Scalar value + void FillFromScalar(const Scalar& value); + + void SetMembers(const ArrayData& data); + + void SetBuffer(int index, const std::shared_ptr& buffer) { + this->buffers[index].data = const_cast(buffer->data()); + this->buffers[index].size = buffer->size(); + this->buffers[index].owner = &buffer; + } + + const ArraySpan& dictionary() const { return child_data[0]; } + + /// \brief Return the number of buffers (out of 3) that are used to + /// constitute this array + int num_buffers() const; + + // Access a buffer's data as a typed C pointer + template + inline T* GetValues(int i, int64_t absolute_offset) { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline T* GetValues(int i) { + return GetValues(i, this->offset); + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, this->offset); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) const { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].data_as() + this->offset, length); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].mutable_data_as() + this->offset, length); + } + + inline bool IsNull(int64_t i) const { return !IsValid(i); } + + inline bool IsValid(int64_t i) const { + if (this->buffers[0].data != NULLPTR) { + return bit_util::GetBit(this->buffers[0].data, i + this->offset); + } else { + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !IsNullSparseUnion(i); + } + if (type == Type::DENSE_UNION) { + return !IsNullDenseUnion(i); + } + if (type == Type::RUN_END_ENCODED) { + return !IsNullRunEndEncoded(i); + } + return this->null_count != this->length; + } + } + + std::shared_ptr ToArrayData() const; + + std::shared_ptr ToArray() const; + + std::shared_ptr GetBuffer(int index) const { + const BufferSpan& buf = this->buffers[index]; + if (buf.owner) { + return *buf.owner; + } else if (buf.data != NULLPTR) { + // Buffer points to some memory without an owning buffer + return std::make_shared(buf.data, buf.size); + } else { + return NULLPTR; + } + } + + void SetSlice(int64_t offset, int64_t length) { + this->offset = offset; + this->length = length; + if (this->type->id() == Type::NA) { + this->null_count = this->length; + } else if (this->MayHaveNulls()) { + this->null_count = kUnknownNullCount; + } else { + this->null_count = 0; + } + } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the array has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count != 0 && buffers[0].data != NULLPTR; + } + + /// \brief Return true if the array has a validity bitmap + bool HasValidityBitmap() const { return buffers[0].data != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// \see ArrayData::MayHaveLogicalNulls + bool MayHaveLogicalNulls() const { + if (buffers[0].data != NULLPTR) { + return null_count != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return UnionMayHaveLogicalNulls(); + } + if (t == Type::RUN_END_ENCODED) { + return RunEndEncodedMayHaveLogicalNulls(); + } + if (t == Type::DICTIONARY) { + return DictionaryMayHaveLogicalNulls(); + } + return null_count != 0; + } + + /// \brief Compute the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the logical null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + /// Some DataTypes (StringView, BinaryView) may have an arbitrary number of variadic + /// buffers. Since ArraySpan only has 3 buffers, we pack the variadic buffers into + /// buffers[2]; IE buffers[2].data points to the first shared_ptr of the + /// variadic set and buffers[2].size is the number of variadic buffers times + /// sizeof(shared_ptr). + /// + /// \see HasVariadicBuffers + util::span> GetVariadicBuffers() const; + bool HasVariadicBuffers() const; + + private: + ARROW_FRIEND_EXPORT friend bool internal::IsNullRunEndEncoded(const ArrayData& span, + int64_t i); + + bool IsNullSparseUnion(int64_t i) const; + bool IsNullDenseUnion(int64_t i) const; + + /// \brief Return true if the value at logical index i is null + /// + /// This function uses binary-search, so it has a O(log N) cost. + /// Iterating over the whole array and calling IsNull is O(N log N), so + /// for better performance it is recommended to use a + /// ree_util::RunEndEncodedArraySpan to iterate run by run instead. + bool IsNullRunEndEncoded(int64_t i) const; + + bool UnionMayHaveLogicalNulls() const; + bool RunEndEncodedMayHaveLogicalNulls() const; + bool DictionaryMayHaveLogicalNulls() const; +}; + +namespace internal { + +void FillZeroLengthArray(const DataType* type, ArraySpan* span); + +/// Construct a zero-copy view of this ArrayData with the given type. +/// +/// This method checks if the types are layout-compatible. +/// Nested types are traversed in depth-first order. Data buffers must have +/// the same item sizes, even though the logical types may be different. +/// An error is returned if the types are not layout-compatible. +ARROW_EXPORT +Result> GetArrayView(const std::shared_ptr& data, + const std::shared_ptr& type); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h new file mode 100644 index 0000000000000000000000000000000000000000..a405164b333f3b21a17e8414ef59a8a628c28579 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_nested.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Compare two arrays, returning an edit script which expresses the difference +/// between them +/// +/// An edit script is an array of struct(insert: bool, run_length: int64_t). +/// Each element of "insert" determines whether an element was inserted into (true) +/// or deleted from (false) base. Each insertion or deletion is followed by a run of +/// elements which are unchanged from base to target; the length of this run is stored +/// in "run_length". (Note that the edit script begins and ends with a run of shared +/// elements but both fields of the struct must have the same length. To accommodate this +/// the first element of "insert" should be ignored.) +/// +/// For example for base "hlloo" and target "hello", the edit script would be +/// [ +/// {"insert": false, "run_length": 1}, // leading run of length 1 ("h") +/// {"insert": true, "run_length": 3}, // insert("e") then a run of length 3 ("llo") +/// {"insert": false, "run_length": 0} // delete("o") then an empty run +/// ] +/// +/// Diffing arrays containing nulls is not currently supported. +/// +/// \param[in] base baseline for comparison +/// \param[in] target an array of identical type to base whose elements differ from base's +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return an edit script array which can be applied to base to produce target +ARROW_EXPORT +Result> Diff(const Array& base, const Array& target, + MemoryPool* pool = default_memory_pool()); + +/// \brief visitor interface for easy traversal of an edit script +/// +/// visitor will be called for each hunk of insertions and deletions. +ARROW_EXPORT Status VisitEditScript( + const Array& edits, + const std::function& visitor); + +/// \brief return a function which will format an edit script in unified +/// diff format to os, given base and target arrays of type +ARROW_EXPORT Result< + std::function> +MakeUnifiedDiffFormatter(const DataType& type, std::ostream* os); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h new file mode 100644 index 0000000000000000000000000000000000000000..fd8e75ddb86405c523a8083f559dab0e72364e24 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/util.h @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \defgroup array-factories Array factory functions +/// +/// @{ + +/// \brief Create a strongly-typed Array instance from generic ArrayData +/// \param[in] data the array contents +/// \return the resulting Array instance +ARROW_EXPORT +std::shared_ptr MakeArray(const std::shared_ptr& data); + +/// \brief Create a strongly-typed Array instance with all elements null +/// \param[in] type the array type +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayOfNull(const std::shared_ptr& type, + int64_t length, + MemoryPool* pool = default_memory_pool()); + +/// \brief Create an Array instance whose slots are the given scalar +/// \param[in] scalar the value with which to fill the array +/// \param[in] length the array length +/// \param[in] pool the memory pool to allocate memory from +ARROW_EXPORT +Result> MakeArrayFromScalar( + const Scalar& scalar, int64_t length, MemoryPool* pool = default_memory_pool()); + +/// \brief Create an empty Array of a given type +/// +/// The output Array will be of the given type. +/// +/// \param[in] type the data type of the empty Array +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting Array +ARROW_EXPORT +Result> MakeEmptyArray(std::shared_ptr type, + MemoryPool* pool = default_memory_pool()); + +/// @} + +namespace internal { + +/// \brief Swap endian of each element in a generic ArrayData +/// +/// As dictionaries are often shared between different arrays, dictionaries +/// are not swapped by this function and should be handled separately. +/// +/// \param[in] data the array contents +/// \param[in] pool the memory pool to allocate memory from +/// \return the resulting ArrayData whose elements were swapped +ARROW_EXPORT +Result> SwapEndianArrayData( + const std::shared_ptr& data, MemoryPool* pool = default_memory_pool()); + +/// Given a number of ArrayVectors, treat each ArrayVector as the +/// chunks of a chunked array. Then rechunk each ArrayVector such that +/// all ArrayVectors are chunked identically. It is mandatory that +/// all ArrayVectors contain the same total number of elements. +ARROW_EXPORT +std::vector RechunkArraysConsistently(const std::vector&); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h new file mode 100644 index 0000000000000000000000000000000000000000..3ebfa0a51edce21ca585862b1dbb074b6cf8d9c8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// Internal functions implementing Array::Validate() and friends. + +// O(1) array metadata validation + +ARROW_EXPORT +Status ValidateArray(const Array& array); + +ARROW_EXPORT +Status ValidateArray(const ArrayData& data); + +// O(N) array data validation. +// Note that, starting from 7.0.0, "full" routines also validate metadata. +// Before, ValidateArray() needed to be called before ValidateArrayFull() +// to ensure metadata correctness, otherwise invalid memory accesses +// may occur. + +ARROW_EXPORT +Status ValidateArrayFull(const Array& array); + +ARROW_EXPORT +Status ValidateArrayFull(const ArrayData& data); + +ARROW_EXPORT +Status ValidateUTF8(const Array& array); + +ARROW_EXPORT +Status ValidateUTF8(const ArrayData& data); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..6a56a632dfbd220ee1aaf749f1c7fb2b9ab0852e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Middleware implementation for sending and receiving HTTP cookies. + +#pragma once + +#include + +#include "arrow/flight/client_middleware.h" + +namespace arrow { +namespace flight { + +/// \brief Returns a ClientMiddlewareFactory that handles sending and receiving cookies. +ARROW_FLIGHT_EXPORT std::shared_ptr GetCookieFactory(); + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..8e3126553a953b9d8f2fcdb94b72f9214b690de1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces for defining middleware for Flight clients. Currently +// experimental. + +#pragma once + +#include + +#include "arrow/flight/middleware.h" +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/status.h" + +namespace arrow { +namespace flight { + +/// \brief Client-side middleware for a call, instantiated per RPC. +/// +/// Middleware should be fast and must be infallible: there is no way +/// to reject the call or report errors from the middleware instance. +class ARROW_FLIGHT_EXPORT ClientMiddleware { + public: + virtual ~ClientMiddleware() = default; + + /// \brief A callback before headers are sent. Extra headers can be + /// added, but existing ones cannot be read. + virtual void SendingHeaders(AddCallHeaders* outgoing_headers) = 0; + + /// \brief A callback when headers are received from the server. + /// + /// This may be called more than once, since servers send both + /// headers and trailers. Some implementations (e.g. gRPC-Java, and + /// hence Arrow Flight in Java) may consolidate headers into + /// trailers if the RPC errored. + virtual void ReceivedHeaders(const CallHeaders& incoming_headers) = 0; + + /// \brief A callback after the call has completed. + virtual void CallCompleted(const Status& status) = 0; +}; + +/// \brief A factory for new middleware instances. +/// +/// If added to a client, this will be called for each RPC (including +/// Handshake) to give the opportunity to intercept the call. +/// +/// It is guaranteed that all client middleware methods are called +/// from the same thread that calls the RPC method implementation. +class ARROW_FLIGHT_EXPORT ClientMiddlewareFactory { + public: + virtual ~ClientMiddlewareFactory() = default; + + /// \brief A callback for the start of a new call. + /// + /// \param info Information about the call. + /// \param[out] middleware The middleware instance for this call. If + /// unset, will not add middleware to this call instance from + /// this factory. + virtual void StartCall(const CallInfo& info, + std::unique_ptr* middleware) = 0; +}; + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..fff107fa8fcf4b3871cf48266ac858db33e5f5c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/flight/client.h" +#include "arrow/flight/server.h" +#include "arrow/flight/types.h" +#include "arrow/pch.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h new file mode 100644 index 0000000000000000000000000000000000000000..ffcffe12e3c78bb92452f33644a8293c03aa8175 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h @@ -0,0 +1,328 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces to use for defining Flight RPC servers. API should be considered +// experimental for now + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/server_auth.h" +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" // IWYU pragma: keep +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/ipc/dictionary.h" +#include "arrow/ipc/options.h" +#include "arrow/record_batch.h" + +namespace arrow { + +class Schema; +class Status; + +namespace flight { + +/// \brief Interface that produces a sequence of IPC payloads to be sent in +/// FlightData protobuf messages +class ARROW_FLIGHT_EXPORT FlightDataStream { + public: + virtual ~FlightDataStream(); + + virtual std::shared_ptr schema() = 0; + + /// \brief Compute FlightPayload containing serialized RecordBatch schema + virtual arrow::Result GetSchemaPayload() = 0; + + // When the stream is completed, the last payload written will have null + // metadata + virtual arrow::Result Next() = 0; + + virtual Status Close(); +}; + +/// \brief A basic implementation of FlightDataStream that will provide +/// a sequence of FlightData messages to be written to a stream +class ARROW_FLIGHT_EXPORT RecordBatchStream : public FlightDataStream { + public: + /// \param[in] reader produces a sequence of record batches + /// \param[in] options IPC options for writing + explicit RecordBatchStream( + const std::shared_ptr& reader, + const ipc::IpcWriteOptions& options = ipc::IpcWriteOptions::Defaults()); + ~RecordBatchStream() override; + + // inherit deprecated API + using FlightDataStream::GetSchemaPayload; + using FlightDataStream::Next; + + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + + arrow::Result Next() override; + Status Close() override; + + private: + class RecordBatchStreamImpl; + std::unique_ptr impl_; +}; + +/// \brief A reader for IPC payloads uploaded by a client. Also allows +/// reading application-defined metadata via the Flight protocol. +class ARROW_FLIGHT_EXPORT FlightMessageReader : public MetadataRecordBatchReader { + public: + /// \brief Get the descriptor for this upload. + virtual const FlightDescriptor& descriptor() const = 0; +}; + +/// \brief A writer for application-specific metadata sent back to the +/// client during an upload. +class ARROW_FLIGHT_EXPORT FlightMetadataWriter { + public: + virtual ~FlightMetadataWriter(); + /// \brief Send a message to the client. + virtual Status WriteMetadata(const Buffer& app_metadata) = 0; +}; + +/// \brief A writer for IPC payloads to a client. Also allows sending +/// application-defined metadata via the Flight protocol. +/// +/// This class offers more control compared to FlightDataStream, +/// including the option to write metadata without data and the +/// ability to interleave reading and writing. +class ARROW_FLIGHT_EXPORT FlightMessageWriter : public MetadataRecordBatchWriter { + public: + virtual ~FlightMessageWriter() = default; +}; + +/// \brief Call state/contextual data. +class ARROW_FLIGHT_EXPORT ServerCallContext { + public: + virtual ~ServerCallContext() = default; + /// \brief The name of the authenticated peer (may be the empty string) + virtual const std::string& peer_identity() const = 0; + /// \brief The peer address (not validated) + virtual const std::string& peer() const = 0; + /// \brief Add a response header. This is only valid before the server + /// starts sending the response; generally this isn't an issue unless you + /// are implementing FlightDataStream, ResultStream, or similar interfaces + /// yourself, or during a DoExchange or DoPut. + virtual void AddHeader(const std::string& key, const std::string& value) const = 0; + /// \brief Add a response trailer. This is only valid before the server + /// sends the final status; generally this isn't an issue unless your RPC + /// handler launches a thread or similar. + virtual void AddTrailer(const std::string& key, const std::string& value) const = 0; + /// \brief Look up a middleware by key. Do not maintain a reference + /// to the object beyond the request body. + /// \return The middleware, or nullptr if not found. + virtual ServerMiddleware* GetMiddleware(const std::string& key) const = 0; + /// \brief Check if the current RPC has been cancelled (by the client, by + /// a network error, etc.). + virtual bool is_cancelled() const = 0; + /// \brief The headers sent by the client for this call. + virtual const CallHeaders& incoming_headers() const = 0; +}; + +class ARROW_FLIGHT_EXPORT FlightServerOptions { + public: + explicit FlightServerOptions(const Location& location_); + + ~FlightServerOptions(); + + /// \brief The host & port (or domain socket path) to listen on. + /// Use port 0 to bind to an available port. + Location location; + /// \brief The authentication handler to use. + std::shared_ptr auth_handler; + /// \brief A list of TLS certificate+key pairs to use. + std::vector tls_certificates; + /// \brief Enable mTLS and require that the client present a certificate. + bool verify_client; + /// \brief If using mTLS, the PEM-encoded root certificate to use. + std::string root_certificates; + /// \brief A list of server middleware to apply, along with a key to + /// identify them by. + /// + /// Middleware are always applied in the order provided. Duplicate + /// keys are an error. + std::vector>> + middleware; + + /// \brief An optional memory manager to control where to allocate incoming data. + std::shared_ptr memory_manager; + + /// \brief A Flight implementation-specific callback to customize + /// transport-specific options. + /// + /// Not guaranteed to be called. The type of the parameter is + /// specific to the Flight implementation. Users should take care to + /// link to the same transport implementation as Flight to avoid + /// runtime problems. See "Using Arrow C++ in your own project" in + /// the documentation for more details. + std::function builder_hook; +}; + +/// \brief Skeleton RPC server implementation which can be used to create +/// custom servers by implementing its abstract methods +class ARROW_FLIGHT_EXPORT FlightServerBase { + public: + FlightServerBase(); + virtual ~FlightServerBase(); + + // Lifecycle methods. + + /// \brief Initialize a Flight server listening at the given location. + /// This method must be called before any other method. + /// \param[in] options The configuration for this server. + Status Init(const FlightServerOptions& options); + + /// \brief Get the port that the Flight server is listening on. + /// This method must only be called after Init(). Will return a + /// non-positive value if no port exists (e.g. when listening on a + /// domain socket). + int port() const; + + /// \brief Get the address that the Flight server is listening on. + /// This method must only be called after Init(). + Location location() const; + + /// \brief Set the server to stop when receiving any of the given signal + /// numbers. + /// This method must be called before Serve(). + Status SetShutdownOnSignals(const std::vector sigs); + + /// \brief Start serving. + /// This method blocks until the server shuts down. + /// + /// The server will start to shut down when either Shutdown() is called + /// or one of the signals registered in SetShutdownOnSignals() is received. + Status Serve(); + + /// \brief Query whether Serve() was interrupted by a signal. + /// This method must be called after Serve() has returned. + /// + /// \return int the signal number that interrupted Serve(), if any, otherwise 0 + int GotSignal() const; + + /// \brief Shut down the server, blocking until current requests finish. + /// + /// Can be called from a signal handler or another thread while Serve() + /// blocks. Optionally a deadline can be set. Once the deadline expires + /// server will wait until remaining running calls complete. + /// + /// Should only be called once. + Status Shutdown(const std::chrono::system_clock::time_point* deadline = NULLPTR); + + /// \brief Block until server shuts down with Shutdown. + /// + /// Does not respond to signals like Serve(). + Status Wait(); + + // Implement these methods to create your own server. The default + // implementations will return a not-implemented result to the client + + /// \brief Retrieve a list of available fields given an optional opaque + /// criteria + /// \param[in] context The call context. + /// \param[in] criteria may be null + /// \param[out] listings the returned listings iterator + /// \return Status + virtual Status ListFlights(const ServerCallContext& context, const Criteria* criteria, + std::unique_ptr* listings); + + /// \brief Retrieve the schema and an access plan for the indicated + /// descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] info the returned flight info provider + /// \return Status + virtual Status GetFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the current status of the target query + /// \param[in] context The call context. + /// \param[in] request the dataset request or a descriptor returned by a + /// prior PollFlightInfo call + /// \param[out] info the returned retry info provider + /// \return Status + virtual Status PollFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the schema for the indicated descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] schema the returned flight schema provider + /// \return Status + virtual Status GetSchema(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* schema); + + /// \brief Get a stream of IPC payloads to put on the wire + /// \param[in] context The call context. + /// \param[in] request an opaque ticket + /// \param[out] stream the returned stream provider + /// \return Status + virtual Status DoGet(const ServerCallContext& context, const Ticket& request, + std::unique_ptr* stream); + + /// \brief Process a stream of IPC payloads sent from a client + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send metadata back to the client + /// \return Status + virtual Status DoPut(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Process a bidirectional stream of IPC payloads + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send data back to the client + /// \return Status + virtual Status DoExchange(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Execute an action, return stream of zero or more results + /// \param[in] context The call context. + /// \param[in] action the action to execute, with type and body + /// \param[out] result the result iterator + /// \return Status + virtual Status DoAction(const ServerCallContext& context, const Action& action, + std::unique_ptr* result); + + /// \brief Retrieve the list of available actions + /// \param[in] context The call context. + /// \param[out] actions a vector of available action types + /// \return Status + virtual Status ListActions(const ServerCallContext& context, + std::vector* actions); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_middleware.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..030f1a17c21000fe5c11763c4878e87646d07663 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_middleware.h @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces for defining middleware for Flight servers. Currently +// experimental. + +#pragma once + +#include +#include + +#include "arrow/flight/middleware.h" +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/status.h" + +namespace arrow { +namespace flight { + +/// \brief Server-side middleware for a call, instantiated per RPC. +/// +/// Middleware should be fast and must be infallible: there is no way +/// to reject the call or report errors from the middleware instance. +class ARROW_FLIGHT_EXPORT ServerMiddleware { + public: + virtual ~ServerMiddleware() = default; + + /// \brief Unique name of middleware, used as alternative to RTTI + /// \return the string name of the middleware + virtual std::string name() const = 0; + + /// \brief A callback before headers are sent. Extra headers can be + /// added, but existing ones cannot be read. + virtual void SendingHeaders(AddCallHeaders* outgoing_headers) = 0; + + /// \brief A callback after the call has completed. + virtual void CallCompleted(const Status& status) = 0; +}; + +/// \brief A factory for new middleware instances. +/// +/// If added to a server, this will be called for each RPC (including +/// Handshake) to give the opportunity to intercept the call. +/// +/// It is guaranteed that all server middleware methods are called +/// from the same thread that calls the RPC method implementation. +class ARROW_FLIGHT_EXPORT ServerMiddlewareFactory { + public: + virtual ~ServerMiddlewareFactory() = default; + + /// \brief A callback for the start of a new call. + /// + /// Return a non-OK status to reject the call with the given status. + /// + /// \param[in] info Information about the call. + /// \param[in] context The call context. + /// \param[out] middleware The middleware instance for this call. If + /// null, no middleware will be added to this call instance from + /// this factory. + /// \return Status A non-OK status will reject the call with the + /// given status. Middleware previously in the chain will have + /// their CallCompleted callback called. Other middleware + /// factories will not be called. + virtual Status StartCall(const CallInfo& info, const ServerCallContext& context, + std::shared_ptr* middleware); + + /// \brief A callback for the start of a new call. + /// + /// Return a non-OK status to reject the call with the given status. + /// + /// \param info Information about the call. + /// \param incoming_headers Headers sent by the client for this call. + /// Do not retain a reference to this object. + /// \param[out] middleware The middleware instance for this call. If + /// null, no middleware will be added to this call instance from + /// this factory. + /// \return Status A non-OK status will reject the call with the + /// given status. Middleware previously in the chain will have + /// their CallCompleted callback called. Other middleware + /// factories will not be called. + /// \deprecated Deprecated in 13.0.0. Implement the StartCall() + /// with ServerCallContext version instead. + ARROW_DEPRECATED("Deprecated in 13.0.0. Use ServerCallContext overload instead.") + virtual Status StartCall(const CallInfo& info, const CallHeaders& incoming_headers, + std::shared_ptr* middleware) { + return Status::NotImplemented(typeid(this).name(), "::StartCall() isn't implemented"); + } +}; + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_tracing_middleware.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_tracing_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..581c8354368cf1d87d10cb87a76d162fe7be2d7b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_tracing_middleware.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Middleware implementation for propagating OpenTelemetry spans. + +#pragma once + +#include +#include +#include + +#include "arrow/flight/server_middleware.h" +#include "arrow/flight/visibility.h" +#include "arrow/status.h" + +namespace arrow { +namespace flight { + +/// \brief Returns a ServerMiddlewareFactory that handles receiving OpenTelemetry spans. +ARROW_FLIGHT_EXPORT std::shared_ptr +MakeTracingServerMiddlewareFactory(); + +/// \brief A server middleware that provides access to the +/// OpenTelemetry context, if present. +/// +/// Used to make the OpenTelemetry span available in Python. +class ARROW_FLIGHT_EXPORT TracingServerMiddleware : public ServerMiddleware { + public: + ~TracingServerMiddleware(); + + static constexpr char const kMiddlewareName[] = + "arrow::flight::TracingServerMiddleware"; + + std::string name() const override { return kMiddlewareName; } + void SendingHeaders(AddCallHeaders*) override; + void CallCompleted(const Status&) override; + + struct TraceKey { + std::string key; + std::string value; + }; + /// \brief Get the trace context. + std::vector GetTraceContext() const; + + private: + class Impl; + friend class TracingServerMiddlewareFactory; + + explicit TracingServerMiddleware(std::unique_ptr impl); + std::unique_ptr impl_; +}; + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h new file mode 100644 index 0000000000000000000000000000000000000000..1e0e8c209ac94a5071e7e2817ab384e20ac7cff2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h @@ -0,0 +1,317 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Common test definitions for Flight. Individual transport +// implementations can instantiate these tests. +// +// While Googletest's value-parameterized tests would be a more +// natural way to do this, they cause runtime issues on MinGW/MSVC +// (Googletest thinks the test suite has been defined twice). + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/flight/server.h" +#include "arrow/flight/types.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace flight { + +class ARROW_FLIGHT_EXPORT FlightTest { + protected: + virtual std::string transport() const = 0; + virtual bool supports_async() const { return false; } + virtual void SetUpTest() {} + virtual void TearDownTest() {} +}; + +/// Common tests of startup/shutdown +class ARROW_FLIGHT_EXPORT ConnectivityTest : public FlightTest { + public: + // Test methods + void TestGetPort(); + void TestBuilderHook(); + void TestShutdown(); + void TestShutdownWithDeadline(); + void TestBrokenConnection(); +}; + +#define ARROW_FLIGHT_TEST_CONNECTIVITY(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from ConnectivityTest"); \ + TEST_F(FIXTURE, GetPort) { TestGetPort(); } \ + TEST_F(FIXTURE, BuilderHook) { TestBuilderHook(); } \ + TEST_F(FIXTURE, Shutdown) { TestShutdown(); } \ + TEST_F(FIXTURE, ShutdownWithDeadline) { TestShutdownWithDeadline(); } \ + TEST_F(FIXTURE, BrokenConnection) { TestBrokenConnection(); } + +/// Common tests of data plane methods +class ARROW_FLIGHT_EXPORT DataTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + Status ConnectClient(); + + // Test methods + void TestDoGetInts(); + void TestDoGetFloats(); + void TestDoGetDicts(); + void TestDoGetLargeBatch(); + void TestFlightDataStreamError(); + void TestOverflowServerBatch(); + void TestOverflowClientBatch(); + void TestDoExchange(); + void TestDoExchangeNoData(); + void TestDoExchangeWriteOnlySchema(); + void TestDoExchangeGet(); + void TestDoExchangePut(); + void TestDoExchangeEcho(); + void TestDoExchangeTotal(); + void TestDoExchangeError(); + void TestDoExchangeConcurrency(); + void TestDoExchangeUndrained(); + void TestIssue5095(); + + private: + void CheckDoGet( + const FlightDescriptor& descr, const RecordBatchVector& expected_batches, + std::function&)> check_endpoints); + void CheckDoGet(const Ticket& ticket, const RecordBatchVector& expected_batches); + + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_DATA(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from DataTest"); \ + TEST_F(FIXTURE, TestDoGetInts) { TestDoGetInts(); } \ + TEST_F(FIXTURE, TestDoGetFloats) { TestDoGetFloats(); } \ + TEST_F(FIXTURE, TestDoGetDicts) { TestDoGetDicts(); } \ + TEST_F(FIXTURE, TestDoGetLargeBatch) { TestDoGetLargeBatch(); } \ + TEST_F(FIXTURE, TestFlightDataStreamError) { TestFlightDataStreamError(); } \ + TEST_F(FIXTURE, TestOverflowServerBatch) { TestOverflowServerBatch(); } \ + TEST_F(FIXTURE, TestOverflowClientBatch) { TestOverflowClientBatch(); } \ + TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); } \ + TEST_F(FIXTURE, TestDoExchangeNoData) { TestDoExchangeNoData(); } \ + TEST_F(FIXTURE, TestDoExchangeWriteOnlySchema) { TestDoExchangeWriteOnlySchema(); } \ + TEST_F(FIXTURE, TestDoExchangeGet) { TestDoExchangeGet(); } \ + TEST_F(FIXTURE, TestDoExchangePut) { TestDoExchangePut(); } \ + TEST_F(FIXTURE, TestDoExchangeEcho) { TestDoExchangeEcho(); } \ + TEST_F(FIXTURE, TestDoExchangeTotal) { TestDoExchangeTotal(); } \ + TEST_F(FIXTURE, TestDoExchangeError) { TestDoExchangeError(); } \ + TEST_F(FIXTURE, TestDoExchangeConcurrency) { TestDoExchangeConcurrency(); } \ + TEST_F(FIXTURE, TestDoExchangeUndrained) { TestDoExchangeUndrained(); } \ + TEST_F(FIXTURE, TestIssue5095) { TestIssue5095(); } + +/// \brief Specific tests of DoPut. +class ARROW_FLIGHT_EXPORT DoPutTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + void CheckBatches(const FlightDescriptor& expected_descriptor, + const RecordBatchVector& expected_batches); + void CheckDoPut(const FlightDescriptor& descr, const std::shared_ptr& schema, + const RecordBatchVector& batches); + + // Test methods + void TestInts(); + void TestFloats(); + void TestEmptyBatch(); + void TestDicts(); + void TestLargeBatch(); + void TestSizeLimit(); + void TestUndrained(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_DO_PUT(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from DoPutTest"); \ + TEST_F(FIXTURE, TestInts) { TestInts(); } \ + TEST_F(FIXTURE, TestFloats) { TestFloats(); } \ + TEST_F(FIXTURE, TestEmptyBatch) { TestEmptyBatch(); } \ + TEST_F(FIXTURE, TestDicts) { TestDicts(); } \ + TEST_F(FIXTURE, TestLargeBatch) { TestLargeBatch(); } \ + TEST_F(FIXTURE, TestSizeLimit) { TestSizeLimit(); } \ + TEST_F(FIXTURE, TestUndrained) { TestUndrained(); } + +class ARROW_FLIGHT_EXPORT AppMetadataTestServer : public FlightServerBase { + public: + virtual ~AppMetadataTestServer() = default; + + Status DoGet(const ServerCallContext& context, const Ticket& request, + std::unique_ptr* data_stream) override; + + Status DoPut(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer) override; +}; + +/// \brief Tests of app_metadata in data plane methods. +class ARROW_FLIGHT_EXPORT AppMetadataTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestDoGet(); + void TestDoGetDictionaries(); + void TestDoPut(); + void TestDoPutDictionaries(); + void TestDoPutReadMetadata(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_APP_METADATA(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from AppMetadataTest"); \ + TEST_F(FIXTURE, TestDoGet) { TestDoGet(); } \ + TEST_F(FIXTURE, TestDoGetDictionaries) { TestDoGetDictionaries(); } \ + TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \ + TEST_F(FIXTURE, TestDoPutDictionaries) { TestDoPutDictionaries(); } \ + TEST_F(FIXTURE, TestDoPutReadMetadata) { TestDoPutReadMetadata(); } + +/// \brief Tests of IPC options in data plane methods. +class ARROW_FLIGHT_EXPORT IpcOptionsTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestDoGetReadOptions(); + void TestDoPutWriteOptions(); + void TestDoExchangeClientWriteOptions(); + void TestDoExchangeClientWriteOptionsBegin(); + void TestDoExchangeServerWriteOptions(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_IPC_OPTIONS(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from IpcOptionsTest"); \ + TEST_F(FIXTURE, TestDoGetReadOptions) { TestDoGetReadOptions(); } \ + TEST_F(FIXTURE, TestDoPutWriteOptions) { TestDoPutWriteOptions(); } \ + TEST_F(FIXTURE, TestDoExchangeClientWriteOptions) { \ + TestDoExchangeClientWriteOptions(); \ + } \ + TEST_F(FIXTURE, TestDoExchangeClientWriteOptionsBegin) { \ + TestDoExchangeClientWriteOptionsBegin(); \ + } \ + TEST_F(FIXTURE, TestDoExchangeServerWriteOptions) { \ + TestDoExchangeServerWriteOptions(); \ + } + +/// \brief Tests of data plane methods with CUDA memory. +/// +/// If not built with ARROW_CUDA, tests are no-ops. +class ARROW_FLIGHT_EXPORT CudaDataTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestDoGet(); + void TestDoPut(); + void TestDoExchange(); + + private: + class Impl; + std::unique_ptr client_; + std::unique_ptr server_; + std::shared_ptr impl_; +}; + +#define ARROW_FLIGHT_TEST_CUDA_DATA(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from CudaDataTest"); \ + TEST_F(FIXTURE, TestDoGet) { TestDoGet(); } \ + TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \ + TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); } + +/// \brief Tests of error handling. +class ARROW_FLIGHT_EXPORT ErrorHandlingTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestGetFlightInfo(); + void TestGetFlightInfoMetadata(); + void TestAsyncGetFlightInfo(); + void TestDoPut(); + void TestDoExchange(); + + protected: + struct Impl; + + std::vector> GetHeaders(); + + std::shared_ptr impl_; + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_ERROR_HANDLING(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from ErrorHandlingTest"); \ + TEST_F(FIXTURE, TestAsyncGetFlightInfo) { TestAsyncGetFlightInfo(); } \ + TEST_F(FIXTURE, TestGetFlightInfo) { TestGetFlightInfo(); } \ + TEST_F(FIXTURE, TestGetFlightInfoMetadata) { TestGetFlightInfoMetadata(); } \ + TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \ + TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); } + +/// \brief Tests of the async client. +class ARROW_FLIGHT_EXPORT AsyncClientTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestGetFlightInfo(); + void TestGetFlightInfoFuture(); + void TestListenerLifetime(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_ASYNC_CLIENT(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from AsyncClientTest"); \ + TEST_F(FIXTURE, TestGetFlightInfo) { TestGetFlightInfo(); } \ + TEST_F(FIXTURE, TestGetFlightInfoFuture) { TestGetFlightInfoFuture(); } \ + TEST_F(FIXTURE, TestListenerLifetime) { TestListenerLifetime(); } + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..c0b42d9b90c5a1f4c211c78d3685a31000d76f57 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_util.h @@ -0,0 +1,264 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/util.h" + +#include "arrow/flight/client.h" +#include "arrow/flight/client_auth.h" +#include "arrow/flight/server.h" +#include "arrow/flight/server_auth.h" +#include "arrow/flight/types.h" +#include "arrow/flight/visibility.h" + +namespace boost { +namespace process { + +class child; + +} // namespace process +} // namespace boost + +namespace arrow { +namespace flight { + +// ---------------------------------------------------------------------- +// Helpers to compare values for equality + +inline void AssertEqual(const FlightInfo& expected, const FlightInfo& actual) { + ipc::DictionaryMemo expected_memo; + ipc::DictionaryMemo actual_memo; + ASSERT_OK_AND_ASSIGN(auto ex_schema, expected.GetSchema(&expected_memo)); + ASSERT_OK_AND_ASSIGN(auto actual_schema, actual.GetSchema(&actual_memo)); + + AssertSchemaEqual(*ex_schema, *actual_schema); + ASSERT_EQ(expected.total_records(), actual.total_records()); + ASSERT_EQ(expected.total_bytes(), actual.total_bytes()); + + ASSERT_EQ(expected.descriptor(), actual.descriptor()); + ASSERT_THAT(actual.endpoints(), ::testing::ContainerEq(expected.endpoints())); +} + +// ---------------------------------------------------------------------- +// Fixture to use for running test servers + +class ARROW_FLIGHT_EXPORT TestServer { + public: + explicit TestServer(const std::string& executable_name) + : executable_name_(executable_name), port_(::arrow::GetListenPort()) {} + TestServer(const std::string& executable_name, int port) + : executable_name_(executable_name), port_(port) {} + TestServer(const std::string& executable_name, const std::string& unix_sock) + : executable_name_(executable_name), unix_sock_(unix_sock) {} + + void Start(const std::vector& extra_args); + void Start() { Start({}); } + + int Stop(); + + bool IsRunning(); + + int port() const; + const std::string& unix_sock() const; + + private: + std::string executable_name_; + int port_; + std::string unix_sock_; + std::shared_ptr<::boost::process::child> server_process_; +}; + +/// \brief Create a simple Flight server for testing +ARROW_FLIGHT_EXPORT +std::unique_ptr ExampleTestServer(); + +// Helper to initialize a server and matching client with callbacks to +// populate options. +template +Status MakeServer(const Location& location, std::unique_ptr* server, + std::unique_ptr* client, + std::function make_server_options, + std::function make_client_options, + Args&&... server_args) { + *server = std::make_unique(std::forward(server_args)...); + FlightServerOptions server_options(location); + RETURN_NOT_OK(make_server_options(&server_options)); + RETURN_NOT_OK((*server)->Init(server_options)); + std::string uri = + location.scheme() + "://127.0.0.1:" + std::to_string((*server)->port()); + ARROW_ASSIGN_OR_RAISE(auto real_location, Location::Parse(uri)); + FlightClientOptions client_options = FlightClientOptions::Defaults(); + RETURN_NOT_OK(make_client_options(&client_options)); + return FlightClient::Connect(real_location, client_options).Value(client); +} + +// Helper to initialize a server and matching client with callbacks to +// populate options. +template +Status MakeServer(std::unique_ptr* server, + std::unique_ptr* client, + std::function make_server_options, + std::function make_client_options, + Args&&... server_args) { + ARROW_ASSIGN_OR_RAISE(auto location, Location::ForGrpcTcp("localhost", 0)); + return MakeServer(location, server, client, std::move(make_server_options), + std::move(make_client_options), + std::forward(server_args)...); +} + +// ---------------------------------------------------------------------- +// A FlightDataStream that numbers the record batches +/// \brief A basic implementation of FlightDataStream that will provide +/// a sequence of FlightData messages to be written to a stream +class ARROW_FLIGHT_EXPORT NumberingStream : public FlightDataStream { + public: + explicit NumberingStream(std::unique_ptr stream); + + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + arrow::Result Next() override; + + private: + int counter_; + std::shared_ptr stream_; +}; + +// ---------------------------------------------------------------------- +// Example data for test-server and unit tests + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleIntSchema(); + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleStringSchema(); + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleDictSchema(); + +ARROW_FLIGHT_EXPORT +std::shared_ptr ExampleLargeSchema(); + +ARROW_FLIGHT_EXPORT +Status ExampleIntBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleFloatBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleDictBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleNestedBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleLargeBatches(RecordBatchVector* out); + +ARROW_FLIGHT_EXPORT +arrow::Result> VeryLargeBatch(); + +ARROW_FLIGHT_EXPORT +std::vector ExampleFlightInfo(); + +ARROW_FLIGHT_EXPORT +std::vector ExampleActionTypes(); + +ARROW_FLIGHT_EXPORT +FlightInfo MakeFlightInfo(const Schema& schema, const FlightDescriptor& descriptor, + const std::vector& endpoints, + int64_t total_records, int64_t total_bytes, bool ordered, + std::string app_metadata); + +// ---------------------------------------------------------------------- +// A pair of authentication handlers that check for a predefined password +// and set the peer identity to a predefined username. + +class ARROW_FLIGHT_EXPORT TestServerAuthHandler : public ServerAuthHandler { + public: + explicit TestServerAuthHandler(const std::string& username, + const std::string& password); + ~TestServerAuthHandler() override; + Status Authenticate(const ServerCallContext& context, ServerAuthSender* outgoing, + ServerAuthReader* incoming) override; + Status IsValid(const ServerCallContext& context, const std::string& token, + std::string* peer_identity) override; + + private: + std::string username_; + std::string password_; +}; + +class ARROW_FLIGHT_EXPORT TestServerBasicAuthHandler : public ServerAuthHandler { + public: + explicit TestServerBasicAuthHandler(const std::string& username, + const std::string& password); + ~TestServerBasicAuthHandler() override; + Status Authenticate(const ServerCallContext& context, ServerAuthSender* outgoing, + ServerAuthReader* incoming) override; + Status IsValid(const ServerCallContext& context, const std::string& token, + std::string* peer_identity) override; + + private: + BasicAuth basic_auth_; +}; + +class ARROW_FLIGHT_EXPORT TestClientAuthHandler : public ClientAuthHandler { + public: + explicit TestClientAuthHandler(const std::string& username, + const std::string& password); + ~TestClientAuthHandler() override; + Status Authenticate(ClientAuthSender* outgoing, ClientAuthReader* incoming) override; + Status GetToken(std::string* token) override; + + private: + std::string username_; + std::string password_; +}; + +class ARROW_FLIGHT_EXPORT TestClientBasicAuthHandler : public ClientAuthHandler { + public: + explicit TestClientBasicAuthHandler(const std::string& username, + const std::string& password); + ~TestClientBasicAuthHandler() override; + Status Authenticate(ClientAuthSender* outgoing, ClientAuthReader* incoming) override; + Status GetToken(std::string* token) override; + + private: + BasicAuth basic_auth_; + std::string token_; +}; + +ARROW_FLIGHT_EXPORT +Status ExampleTlsCertificates(std::vector* out); + +ARROW_FLIGHT_EXPORT +Status ExampleTlsCertificateRoot(CertKeyPair* out); + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h new file mode 100644 index 0000000000000000000000000000000000000000..b3df8377b8ffdec6b2c8aeea4710cc540ad3279d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h @@ -0,0 +1,1147 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Data structure for Flight RPC. API should be considered experimental for now + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/visibility.h" +#include "arrow/ipc/options.h" +#include "arrow/ipc/writer.h" +#include "arrow/result.h" +#include "arrow/status.h" + +namespace arrow { + +class Buffer; +class RecordBatch; +class Schema; +class Status; +class Table; + +namespace ipc { + +class DictionaryMemo; + +} // namespace ipc + +namespace util { + +class Uri; + +} // namespace util + +namespace flight { + +/// \brief A timestamp compatible with Protocol Buffer's +/// google.protobuf.Timestamp: +/// +/// https://protobuf.dev/reference/protobuf/google.protobuf/#timestamp +/// +/// > A Timestamp represents a point in time independent of any time +/// > zone or calendar, represented as seconds and fractions of +/// > seconds at nanosecond resolution in UTC Epoch time. It is +/// > encoded using the Proleptic Gregorian Calendar which extends the +/// > Gregorian calendar backwards to year one. It is encoded assuming +/// > all minutes are 60 seconds long, i.e. leap seconds are "smeared" +/// > so that no leap second table is needed for interpretation. Range +/// > is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +using Timestamp = std::chrono::system_clock::time_point; + +/// \brief A Flight-specific status code. Used to encode some +/// additional status codes into an Arrow Status. +enum class FlightStatusCode : int8_t { + /// An implementation error has occurred. + Internal, + /// A request timed out. + TimedOut, + /// A request was cancelled. + Cancelled, + /// We are not authenticated to the remote service. + Unauthenticated, + /// We do not have permission to make this request. + Unauthorized, + /// The remote service cannot handle this request at the moment. + Unavailable, + /// A request failed for some other reason + Failed +}; + +// Silence warning +// "non dll-interface class RecordBatchReader used as base for dll-interface class" +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4275) +#endif + +/// \brief Flight-specific error information in a Status. +class ARROW_FLIGHT_EXPORT FlightStatusDetail : public arrow::StatusDetail { + public: + explicit FlightStatusDetail(FlightStatusCode code) : code_{code} {} + explicit FlightStatusDetail(FlightStatusCode code, std::string extra_info) + : code_{code}, extra_info_(std::move(extra_info)) {} + const char* type_id() const override; + std::string ToString() const override; + + /// \brief Get the Flight status code. + FlightStatusCode code() const; + /// \brief Get the extra error info + std::string extra_info() const; + /// \brief Get the human-readable name of the status code. + std::string CodeAsString() const; + /// \brief Set the extra error info + void set_extra_info(std::string extra_info); + + /// \brief Try to extract a \a FlightStatusDetail from any Arrow + /// status. + /// + /// \return a \a FlightStatusDetail if it could be unwrapped, \a + /// nullptr otherwise + static std::shared_ptr UnwrapStatus(const arrow::Status& status); + + private: + FlightStatusCode code_; + std::string extra_info_; +}; + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +/// \brief Make an appropriate Arrow status for the given +/// Flight-specific status. +/// +/// \param code The Flight status code. +/// \param message The message for the error. +/// \param extra_info Optional extra binary info for the error (eg protobuf) +ARROW_FLIGHT_EXPORT +Status MakeFlightError(FlightStatusCode code, std::string message, + std::string extra_info = {}); + +/// \brief Headers sent from the client or server. +/// +/// Header values are ordered. +using CallHeaders = std::multimap; + +/// \brief A TLS certificate plus key. +struct ARROW_FLIGHT_EXPORT CertKeyPair { + /// \brief The certificate in PEM format. + std::string pem_cert; + + /// \brief The key in PEM format. + std::string pem_key; +}; + +/// \brief A type of action that can be performed with the DoAction RPC. +struct ARROW_FLIGHT_EXPORT ActionType { + /// \brief The name of the action. + std::string type; + + /// \brief A human-readable description of the action. + std::string description; + + std::string ToString() const; + bool Equals(const ActionType& other) const; + + friend bool operator==(const ActionType& left, const ActionType& right) { + return left.Equals(right); + } + friend bool operator!=(const ActionType& left, const ActionType& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); + + static const ActionType kCancelFlightInfo; + static const ActionType kRenewFlightEndpoint; + static const ActionType kSetSessionOptions; + static const ActionType kGetSessionOptions; + static const ActionType kCloseSession; +}; + +/// \brief Opaque selection criteria for ListFlights RPC +struct ARROW_FLIGHT_EXPORT Criteria { + /// Opaque criteria expression, dependent on server implementation + std::string expression; + + std::string ToString() const; + bool Equals(const Criteria& other) const; + + friend bool operator==(const Criteria& left, const Criteria& right) { + return left.Equals(right); + } + friend bool operator!=(const Criteria& left, const Criteria& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief An action to perform with the DoAction RPC +struct ARROW_FLIGHT_EXPORT Action { + /// The action type + std::string type; + + /// The action content as a Buffer + std::shared_ptr body; + + std::string ToString() const; + bool Equals(const Action& other) const; + + friend bool operator==(const Action& left, const Action& right) { + return left.Equals(right); + } + friend bool operator!=(const Action& left, const Action& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief Opaque result returned after executing an action +struct ARROW_FLIGHT_EXPORT Result { + std::shared_ptr body; + + std::string ToString() const; + bool Equals(const Result& other) const; + + friend bool operator==(const Result& left, const Result& right) { + return left.Equals(right); + } + friend bool operator!=(const Result& left, const Result& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +enum class CancelStatus { + /// The cancellation status is unknown. Servers should avoid using + /// this value (send a kNotCancellable if the requested FlightInfo + /// is not known). Clients can retry the request. + kUnspecified = 0, + /// The cancellation request is complete. Subsequent requests with + /// the same payload may return kCancelled or a kNotCancellable error. + kCancelled = 1, + /// The cancellation request is in progress. The client may retry + /// the cancellation request. + kCancelling = 2, + // The FlightInfo is not cancellable. The client should not retry the + // cancellation request. + kNotCancellable = 3, +}; + +/// \brief The result of the CancelFlightInfo action. +struct ARROW_FLIGHT_EXPORT CancelFlightInfoResult { + CancelStatus status; + + std::string ToString() const; + bool Equals(const CancelFlightInfoResult& other) const; + + friend bool operator==(const CancelFlightInfoResult& left, + const CancelFlightInfoResult& right) { + return left.Equals(right); + } + friend bool operator!=(const CancelFlightInfoResult& left, + const CancelFlightInfoResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +ARROW_FLIGHT_EXPORT +std::ostream& operator<<(std::ostream& os, CancelStatus status); + +/// \brief message for simple auth +struct ARROW_FLIGHT_EXPORT BasicAuth { + std::string username; + std::string password; + + std::string ToString() const; + bool Equals(const BasicAuth& other) const; + + friend bool operator==(const BasicAuth& left, const BasicAuth& right) { + return left.Equals(right); + } + friend bool operator!=(const BasicAuth& left, const BasicAuth& right) { + return !(left == right); + } + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; +}; + +/// \brief A request to retrieve or generate a dataset +struct ARROW_FLIGHT_EXPORT FlightDescriptor { + enum DescriptorType { + UNKNOWN = 0, /// Unused + PATH = 1, /// Named path identifying a dataset + CMD = 2 /// Opaque command to generate a dataset + }; + + /// The descriptor type + DescriptorType type; + + /// Opaque value used to express a command. Should only be defined when type + /// is CMD + std::string cmd; + + /// List of strings identifying a particular dataset. Should only be defined + /// when type is PATH + std::vector path; + + bool Equals(const FlightDescriptor& other) const; + + /// \brief Get a human-readable form of this descriptor. + std::string ToString() const; + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result Deserialize(std::string_view serialized); + + // Convenience factory functions + + static FlightDescriptor Command(const std::string& c) { + return FlightDescriptor{CMD, c, {}}; + } + + static FlightDescriptor Path(const std::vector& p) { + return FlightDescriptor{PATH, "", p}; + } + + friend bool operator==(const FlightDescriptor& left, const FlightDescriptor& right) { + return left.Equals(right); + } + friend bool operator!=(const FlightDescriptor& left, const FlightDescriptor& right) { + return !(left == right); + } +}; + +/// \brief Data structure providing an opaque identifier or credential to use +/// when requesting a data stream with the DoGet RPC +struct ARROW_FLIGHT_EXPORT Ticket { + std::string ticket; + + std::string ToString() const; + bool Equals(const Ticket& other) const; + + friend bool operator==(const Ticket& left, const Ticket& right) { + return left.Equals(right); + } + friend bool operator!=(const Ticket& left, const Ticket& right) { + return !(left == right); + } + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result Deserialize(std::string_view serialized); +}; + +class FlightClient; +class FlightServerBase; + +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpc; +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpcTcp; +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpcUnix; +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpcTls; + +/// \brief A host location (a URI) +struct ARROW_FLIGHT_EXPORT Location { + public: + /// \brief Initialize a blank location. + Location(); + + /// \brief Initialize a location by parsing a URI string + static arrow::Result Parse(const std::string& uri_string); + + /// \brief Get the fallback URI. + /// + /// arrow-flight-reuse-connection://? means that a client may attempt to + /// reuse an existing connection to a Flight service to fetch data instead + /// of creating a new connection to one of the other locations listed in a + /// FlightEndpoint response. + static const Location& ReuseConnection(); + + /// \brief Initialize a location for a non-TLS, gRPC-based Flight + /// service from a host and port + /// \param[in] host The hostname to connect to + /// \param[in] port The port + /// \return Arrow result with the resulting location + static arrow::Result ForGrpcTcp(const std::string& host, const int port); + + /// \brief Initialize a location for a TLS-enabled, gRPC-based Flight + /// service from a host and port + /// \param[in] host The hostname to connect to + /// \param[in] port The port + /// \return Arrow result with the resulting location + static arrow::Result ForGrpcTls(const std::string& host, const int port); + + /// \brief Initialize a location for a domain socket-based Flight + /// service + /// \param[in] path The path to the domain socket + /// \return Arrow result with the resulting location + static arrow::Result ForGrpcUnix(const std::string& path); + + /// \brief Initialize a location based on a URI scheme + static arrow::Result ForScheme(const std::string& scheme, + const std::string& host, const int port); + + /// \brief Get a representation of this URI as a string. + std::string ToString() const; + + /// \brief Get the scheme of this URI. + std::string scheme() const; + + bool Equals(const Location& other) const; + + friend bool operator==(const Location& left, const Location& right) { + return left.Equals(right); + } + friend bool operator!=(const Location& left, const Location& right) { + return !(left == right); + } + + private: + friend class FlightClient; + friend class FlightServerBase; + std::shared_ptr uri_; +}; + +/// \brief A flight ticket and list of locations where the ticket can be +/// redeemed +struct ARROW_FLIGHT_EXPORT FlightEndpoint { + /// Opaque ticket identify; use with DoGet RPC + Ticket ticket; + + /// List of locations where ticket can be redeemed. If the list is empty, the + /// ticket can only be redeemed on the current service where the ticket was + /// generated + std::vector locations; + + /// Expiration time of this stream. If present, clients may assume + /// they can retry DoGet requests. Otherwise, clients should avoid + /// retrying DoGet requests. + std::optional expiration_time; + + /// Opaque Application-defined metadata + std::string app_metadata; + + std::string ToString() const; + bool Equals(const FlightEndpoint& other) const; + + friend bool operator==(const FlightEndpoint& left, const FlightEndpoint& right) { + return left.Equals(right); + } + friend bool operator!=(const FlightEndpoint& left, const FlightEndpoint& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The request of the RenewFlightEndpoint action. +struct ARROW_FLIGHT_EXPORT RenewFlightEndpointRequest { + FlightEndpoint endpoint; + + std::string ToString() const; + bool Equals(const RenewFlightEndpointRequest& other) const; + + friend bool operator==(const RenewFlightEndpointRequest& left, + const RenewFlightEndpointRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const RenewFlightEndpointRequest& left, + const RenewFlightEndpointRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize( + std::string_view serialized); +}; + +/// \brief Staging data structure for messages about to be put on the wire +/// +/// This structure corresponds to FlightData in the protocol. +struct ARROW_FLIGHT_EXPORT FlightPayload { + std::shared_ptr descriptor; + std::shared_ptr app_metadata; + ipc::IpcPayload ipc_message; + + /// \brief Check that the payload can be written to the wire. + Status Validate() const; +}; + +/// \brief Schema result returned after a schema request RPC +struct ARROW_FLIGHT_EXPORT SchemaResult { + public: + SchemaResult() = default; + explicit SchemaResult(std::string schema) : raw_schema_(std::move(schema)) {} + + /// \brief Factory method to construct a SchemaResult. + static arrow::Result> Make(const Schema& schema); + + /// \brief return schema + /// \param[in,out] dictionary_memo for dictionary bookkeeping, will + /// be modified + /// \return Arrow result with the reconstructed Schema + arrow::Result> GetSchema( + ipc::DictionaryMemo* dictionary_memo) const; + + const std::string& serialized_schema() const { return raw_schema_; } + + std::string ToString() const; + bool Equals(const SchemaResult& other) const; + + friend bool operator==(const SchemaResult& left, const SchemaResult& right) { + return left.Equals(right); + } + friend bool operator!=(const SchemaResult& left, const SchemaResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); + + private: + std::string raw_schema_; +}; + +/// \brief The access coordinates for retrieval of a dataset, returned by +/// GetFlightInfo +class ARROW_FLIGHT_EXPORT FlightInfo { + public: + struct Data { + std::string schema; + FlightDescriptor descriptor; + std::vector endpoints; + int64_t total_records = -1; + int64_t total_bytes = -1; + bool ordered = false; + std::string app_metadata; + }; + + explicit FlightInfo(Data data) : data_(std::move(data)), reconstructed_schema_(false) {} + + /// \brief Factory method to construct a FlightInfo. + static arrow::Result Make(const Schema& schema, + const FlightDescriptor& descriptor, + const std::vector& endpoints, + int64_t total_records, int64_t total_bytes, + bool ordered = false, + std::string app_metadata = ""); + + /// \brief Deserialize the Arrow schema of the dataset. Populate any + /// dictionary encoded fields into a DictionaryMemo for + /// bookkeeping + /// \param[in,out] dictionary_memo for dictionary bookkeeping, will + /// be modified + /// \return Arrow result with the reconstructed Schema + arrow::Result> GetSchema( + ipc::DictionaryMemo* dictionary_memo) const; + + const std::string& serialized_schema() const { return data_.schema; } + + /// The descriptor associated with this flight, may not be set + const FlightDescriptor& descriptor() const { return data_.descriptor; } + + /// A list of endpoints associated with the flight (dataset). To consume the + /// whole flight, all endpoints must be consumed + const std::vector& endpoints() const { return data_.endpoints; } + + /// The total number of records (rows) in the dataset. If unknown, set to -1 + int64_t total_records() const { return data_.total_records; } + + /// The total number of bytes in the dataset. If unknown, set to -1 + int64_t total_bytes() const { return data_.total_bytes; } + + /// Whether endpoints are in the same order as the data. + bool ordered() const { return data_.ordered; } + + /// Application-defined opaque metadata + const std::string& app_metadata() const { return data_.app_metadata; } + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result> Deserialize( + std::string_view serialized); + + std::string ToString() const; + + /// Compare two FlightInfo for equality. This will compare the + /// serialized schema representations, NOT the logical equality of + /// the schemas. + bool Equals(const FlightInfo& other) const; + + friend bool operator==(const FlightInfo& left, const FlightInfo& right) { + return left.Equals(right); + } + friend bool operator!=(const FlightInfo& left, const FlightInfo& right) { + return !(left == right); + } + + private: + Data data_; + mutable std::shared_ptr schema_; + mutable bool reconstructed_schema_; +}; + +/// \brief The information to process a long-running query. +class ARROW_FLIGHT_EXPORT PollInfo { + public: + /// The currently available results so far. + std::unique_ptr info = NULLPTR; + /// The descriptor the client should use on the next try. If unset, + /// the query is complete. + std::optional descriptor = std::nullopt; + /// Query progress. Must be in [0.0, 1.0] but need not be + /// monotonic or nondecreasing. If unknown, do not set. + std::optional progress = std::nullopt; + /// Expiration time for this request. After this passes, the server + /// might not accept the poll descriptor anymore (and the query may + /// be cancelled). This may be updated on a call to PollFlightInfo. + std::optional expiration_time = std::nullopt; + + PollInfo() + : info(NULLPTR), + descriptor(std::nullopt), + progress(std::nullopt), + expiration_time(std::nullopt) {} + + explicit PollInfo(std::unique_ptr info, + std::optional descriptor, + std::optional progress, + std::optional expiration_time) + : info(std::move(info)), + descriptor(std::move(descriptor)), + progress(progress), + expiration_time(expiration_time) {} + + // Must not be explicit; to declare one we must declare all ("rule of five") + PollInfo(const PollInfo& other) // NOLINT(runtime/explicit) + : info(other.info ? std::make_unique(*other.info) : NULLPTR), + descriptor(other.descriptor), + progress(other.progress), + expiration_time(other.expiration_time) {} + PollInfo(PollInfo&& other) noexcept = default; // NOLINT(runtime/explicit) + ~PollInfo() = default; + PollInfo& operator=(const PollInfo& other) { + info = other.info ? std::make_unique(*other.info) : NULLPTR; + descriptor = other.descriptor; + progress = other.progress; + expiration_time = other.expiration_time; + return *this; + } + PollInfo& operator=(PollInfo&& other) = default; + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result> Deserialize( + std::string_view serialized); + + std::string ToString() const; + + /// Compare two PollInfo for equality. This will compare the + /// serialized schema representations, NOT the logical equality of + /// the schemas. + bool Equals(const PollInfo& other) const; + + friend bool operator==(const PollInfo& left, const PollInfo& right) { + return left.Equals(right); + } + friend bool operator!=(const PollInfo& left, const PollInfo& right) { + return !(left == right); + } +}; + +/// \brief The request of the CancelFlightInfoRequest action. +struct ARROW_FLIGHT_EXPORT CancelFlightInfoRequest { + std::unique_ptr info; + + std::string ToString() const; + bool Equals(const CancelFlightInfoRequest& other) const; + + friend bool operator==(const CancelFlightInfoRequest& left, + const CancelFlightInfoRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const CancelFlightInfoRequest& left, + const CancelFlightInfoRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief Variant supporting all possible value types for {Set,Get}SessionOptions +/// +/// By convention, an attempt to set a valueless (std::monostate) SessionOptionValue +/// should attempt to unset or clear the named option value on the server. +using SessionOptionValue = std::variant>; + +/// \brief The result of setting a session option. +enum class SetSessionOptionErrorValue : int8_t { + /// \brief The status of setting the option is unknown. + /// + /// Servers should avoid using this value (send a NOT_FOUND error if the requested + /// session is not known). Clients can retry the request. + kUnspecified, + /// \brief The given session option name is invalid. + kInvalidName, + /// \brief The session option value or type is invalid. + kInvalidValue, + /// \brief The session option cannot be set. + kError +}; +std::string ToString(const SetSessionOptionErrorValue& error_value); +std::ostream& operator<<(std::ostream& os, const SetSessionOptionErrorValue& error_value); + +/// \brief The result of closing a session. +enum class CloseSessionStatus : int8_t { + // \brief The session close status is unknown. + // + // Servers should avoid using this value (send a NOT_FOUND error if the requested + // session is not known). Clients can retry the request. + kUnspecified, + // \brief The session close request is complete. + // + // Subsequent requests with the same session produce a NOT_FOUND error. + kClosed, + // \brief The session close request is in progress. + // + // The client may retry the request. + kClosing, + // \brief The session is not closeable. + // + // The client should not retry the request. + kNotClosable +}; +std::string ToString(const CloseSessionStatus& status); +std::ostream& operator<<(std::ostream& os, const CloseSessionStatus& status); + +/// \brief A request to set a set of session options by name/value. +struct ARROW_FLIGHT_EXPORT SetSessionOptionsRequest { + std::map session_options; + + std::string ToString() const; + bool Equals(const SetSessionOptionsRequest& other) const; + + friend bool operator==(const SetSessionOptionsRequest& left, + const SetSessionOptionsRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const SetSessionOptionsRequest& left, + const SetSessionOptionsRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The result(s) of setting session option(s). +struct ARROW_FLIGHT_EXPORT SetSessionOptionsResult { + struct Error { + SetSessionOptionErrorValue value; + + bool Equals(const Error& other) const { return value == other.value; } + friend bool operator==(const Error& left, const Error& right) { + return left.Equals(right); + } + friend bool operator!=(const Error& left, const Error& right) { + return !(left == right); + } + }; + + std::map errors; + + std::string ToString() const; + bool Equals(const SetSessionOptionsResult& other) const; + + friend bool operator==(const SetSessionOptionsResult& left, + const SetSessionOptionsResult& right) { + return left.Equals(right); + } + friend bool operator!=(const SetSessionOptionsResult& left, + const SetSessionOptionsResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief A request to get current session options. +struct ARROW_FLIGHT_EXPORT GetSessionOptionsRequest { + std::string ToString() const; + bool Equals(const GetSessionOptionsRequest& other) const; + + friend bool operator==(const GetSessionOptionsRequest& left, + const GetSessionOptionsRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const GetSessionOptionsRequest& left, + const GetSessionOptionsRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The current session options. +struct ARROW_FLIGHT_EXPORT GetSessionOptionsResult { + std::map session_options; + + std::string ToString() const; + bool Equals(const GetSessionOptionsResult& other) const; + + friend bool operator==(const GetSessionOptionsResult& left, + const GetSessionOptionsResult& right) { + return left.Equals(right); + } + friend bool operator!=(const GetSessionOptionsResult& left, + const GetSessionOptionsResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief A request to close the open client session. +struct ARROW_FLIGHT_EXPORT CloseSessionRequest { + std::string ToString() const; + bool Equals(const CloseSessionRequest& other) const; + + friend bool operator==(const CloseSessionRequest& left, + const CloseSessionRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const CloseSessionRequest& left, + const CloseSessionRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The result of attempting to close the client session. +struct ARROW_FLIGHT_EXPORT CloseSessionResult { + CloseSessionStatus status; + + std::string ToString() const; + bool Equals(const CloseSessionResult& other) const; + + friend bool operator==(const CloseSessionResult& left, + const CloseSessionResult& right) { + return left.Equals(right); + } + friend bool operator!=(const CloseSessionResult& left, + const CloseSessionResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief An iterator to FlightInfo instances returned by ListFlights. +class ARROW_FLIGHT_EXPORT FlightListing { + public: + virtual ~FlightListing() = default; + + /// \brief Retrieve the next FlightInfo from the iterator. + /// \return Arrow result with a single FlightInfo. Set to \a nullptr if there + /// are none left. + virtual arrow::Result> Next() = 0; +}; + +/// \brief An iterator to Result instances returned by DoAction. +class ARROW_FLIGHT_EXPORT ResultStream { + public: + virtual ~ResultStream() = default; + + /// \brief Retrieve the next Result from the iterator. + /// \return Arrow result with a single Result. Set to \a nullptr if there are none left. + virtual arrow::Result> Next() = 0; + + /// \brief Read and drop the remaining messages to get the error (if any) from a server. + /// \return Status OK if this is no error from a server, any other status if a + /// server returns an error. + Status Drain(); +}; + +/// \brief A holder for a RecordBatch with associated Flight metadata. +struct ARROW_FLIGHT_EXPORT FlightStreamChunk { + public: + std::shared_ptr data; + std::shared_ptr app_metadata; +}; + +/// \brief An interface to read Flight data with metadata. +class ARROW_FLIGHT_EXPORT MetadataRecordBatchReader { + public: + virtual ~MetadataRecordBatchReader() = default; + + /// \brief Get the schema for this stream. + virtual arrow::Result> GetSchema() = 0; + + /// \brief Get the next message from Flight. If the stream is + /// finished, then the members of \a FlightStreamChunk will be + /// nullptr. + virtual arrow::Result Next() = 0; + + /// \brief Consume entire stream as a vector of record batches + virtual arrow::Result>> ToRecordBatches(); + + /// \brief Consume entire stream as a Table + virtual arrow::Result> ToTable(); +}; + +/// \brief Convert a MetadataRecordBatchReader to a regular RecordBatchReader. +ARROW_FLIGHT_EXPORT +arrow::Result> MakeRecordBatchReader( + std::shared_ptr reader); + +/// \brief An interface to write IPC payloads with metadata. +class ARROW_FLIGHT_EXPORT MetadataRecordBatchWriter : public ipc::RecordBatchWriter { + public: + virtual ~MetadataRecordBatchWriter() = default; + /// \brief Begin writing data with the given schema. Only used with \a DoExchange. + virtual Status Begin(const std::shared_ptr& schema, + const ipc::IpcWriteOptions& options) = 0; + virtual Status Begin(const std::shared_ptr& schema); + virtual Status WriteMetadata(std::shared_ptr app_metadata) = 0; + virtual Status WriteWithMetadata(const RecordBatch& batch, + std::shared_ptr app_metadata) = 0; +}; + +/// \brief A FlightListing implementation based on a vector of +/// FlightInfo objects. +/// +/// This can be iterated once, then it is consumed. +class ARROW_FLIGHT_EXPORT SimpleFlightListing : public FlightListing { + public: + explicit SimpleFlightListing(const std::vector& flights); + explicit SimpleFlightListing(std::vector&& flights); + + arrow::Result> Next() override; + + private: + int position_; + std::vector flights_; +}; + +/// \brief A ResultStream implementation based on a vector of +/// Result objects. +/// +/// This can be iterated once, then it is consumed. +class ARROW_FLIGHT_EXPORT SimpleResultStream : public ResultStream { + public: + explicit SimpleResultStream(std::vector&& results); + arrow::Result> Next() override; + + private: + std::vector results_; + size_t position_; +}; + +/// \defgroup flight-error Error Handling +/// Types for handling errors from RPCs. Flight uses a set of status +/// codes standardized across Flight implementations, so these types +/// let applications work directly with those codes instead of having +/// to translate to and from Arrow Status. +/// @{ + +/// \brief Abstract status code for an RPC as per the Flight +/// specification. +enum class TransportStatusCode { + /// \brief No error. + kOk = 0, + /// \brief An unknown error occurred. + kUnknown = 1, + /// \brief An error occurred in the transport implementation, or an + /// error internal to the service implementation occurred. + kInternal = 2, + /// \brief An argument is invalid. + kInvalidArgument = 3, + /// \brief The request timed out. + kTimedOut = 4, + /// \brief An argument is not necessarily invalid, but references + /// some resource that does not exist. Prefer over + /// kInvalidArgument where applicable. + kNotFound = 5, + /// \brief The request attempted to create some resource that does + /// not exist. + kAlreadyExists = 6, + /// \brief The request was explicitly cancelled. + kCancelled = 7, + /// \brief The client is not authenticated. + kUnauthenticated = 8, + /// \brief The client is not authorized to perform this request. + kUnauthorized = 9, + /// \brief The request is not implemented + kUnimplemented = 10, + /// \brief There is a network connectivity error, or some resource + /// is otherwise unavailable. Most likely a temporary condition. + kUnavailable = 11, +}; + +/// \brief Convert a code to a string. +std::string ToString(TransportStatusCode code); + +/// \brief An error from an RPC call, using Flight error codes directly +/// instead of trying to translate to Arrow Status. +/// +/// Currently, only attached to the Status passed to AsyncListener::OnFinish. +/// +/// This API is EXPERIMENTAL. +class ARROW_FLIGHT_EXPORT TransportStatusDetail : public StatusDetail { + public: + constexpr static const char* kTypeId = "flight::TransportStatusDetail"; + explicit TransportStatusDetail(TransportStatusCode code, std::string message, + std::vector> details) + : code_(code), message_(std::move(message)), details_(std::move(details)) {} + const char* type_id() const override { return kTypeId; } + std::string ToString() const override; + + static std::optional> Unwrap( + const Status& status); + + TransportStatusCode code() const { return code_; } + std::string_view message() const { return message_; } + const std::vector>& details() const { + return details_; + } + + private: + TransportStatusCode code_; + std::string message_; + std::vector> details_; +}; + +/// @} + +} // namespace flight +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..bdee8b751d8a33bff8f2c2a4348ad47b752de84c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_FLIGHT_STATIC +#define ARROW_FLIGHT_EXPORT +#elif defined(ARROW_FLIGHT_EXPORTING) +#define ARROW_FLIGHT_EXPORT __declspec(dllexport) +#else +#define ARROW_FLIGHT_EXPORT __declspec(dllimport) +#endif + +#define ARROW_FLIGHT_NO_EXPORT +#else // Not Windows +#ifndef ARROW_FLIGHT_EXPORT +#define ARROW_FLIGHT_EXPORT __attribute__((visibility("default"))) +#endif +#ifndef ARROW_FLIGHT_NO_EXPORT +#define ARROW_FLIGHT_NO_EXPORT __attribute__((visibility("hidden"))) +#endif +#endif // Non-Windows + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/ProducerConsumerQueue.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/ProducerConsumerQueue.h new file mode 100644 index 0000000000000000000000000000000000000000..0b7cfa1cb166fd7bf06474e27ae6d80a23edb400 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/ProducerConsumerQueue.h @@ -0,0 +1,217 @@ +// Vendored from git tag v2021.02.15.00 + +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author Bo Hu (bhu@fb.com) +// @author Jordan DeLong (delong.j@fb.com) + +// This file has been modified as part of Apache Arrow to conform to +// Apache Arrow's coding conventions + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace arrow_vendored { +namespace folly { + +// Vendored from folly/Portability.h +namespace { +#if defined(__arm__) +#define FOLLY_ARM 1 +#else +#define FOLLY_ARM 0 +#endif + +#if defined(__s390x__) +#define FOLLY_S390X 1 +#else +#define FOLLY_S390X 0 +#endif + +constexpr bool kIsArchArm = FOLLY_ARM == 1; +constexpr bool kIsArchS390X = FOLLY_S390X == 1; +} // namespace + +// Vendored from folly/lang/Align.h +namespace { + +constexpr std::size_t hardware_destructive_interference_size = + (kIsArchArm || kIsArchS390X) ? 64 : 128; + +} // namespace + +/* + * ProducerConsumerQueue is a one producer and one consumer queue + * without locks. + */ +template +struct ProducerConsumerQueue { + typedef T value_type; + + ProducerConsumerQueue(const ProducerConsumerQueue&) = delete; + ProducerConsumerQueue& operator=(const ProducerConsumerQueue&) = delete; + + // size must be >= 2. + // + // Also, note that the number of usable slots in the queue at any + // given time is actually (size-1), so if you start with an empty queue, + // IsFull() will return true after size-1 insertions. + explicit ProducerConsumerQueue(uint32_t size) + : size_(size), + records_(static_cast(std::malloc(sizeof(T) * size))), + readIndex_(0), + writeIndex_(0) { + assert(size >= 2); + if (!records_) { + throw std::bad_alloc(); + } + } + + ~ProducerConsumerQueue() { + // We need to destruct anything that may still exist in our queue. + // (No real synchronization needed at destructor time: only one + // thread can be doing this.) + if (!std::is_trivially_destructible::value) { + size_t readIndex = readIndex_; + size_t endIndex = writeIndex_; + while (readIndex != endIndex) { + records_[readIndex].~T(); + if (++readIndex == size_) { + readIndex = 0; + } + } + } + + std::free(records_); + } + + template + bool Write(Args&&... recordArgs) { + auto const currentWrite = writeIndex_.load(std::memory_order_relaxed); + auto nextRecord = currentWrite + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + new (&records_[currentWrite]) T(std::forward(recordArgs)...); + writeIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // queue is full + return false; + } + + // move the value at the front of the queue to given variable + bool Read(T& record) { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return false; + } + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + record = std::move(records_[currentRead]); + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // pointer to the value at the front of the queue (for use in-place) or + // nullptr if empty. + T* FrontPtr() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return nullptr; + } + return &records_[currentRead]; + } + + // queue must not be empty + void PopFront() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + assert(currentRead != writeIndex_.load(std::memory_order_acquire)); + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + } + + bool IsEmpty() const { + return readIndex_.load(std::memory_order_acquire) == + writeIndex_.load(std::memory_order_acquire); + } + + bool IsFull() const { + auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + return false; + } + // queue is full + return true; + } + + // * If called by consumer, then true size may be more (because producer may + // be adding items concurrently). + // * If called by producer, then true size may be less (because consumer may + // be removing items concurrently). + // * It is undefined to call this from any other thread. + size_t SizeGuess() const { + int ret = writeIndex_.load(std::memory_order_acquire) - + readIndex_.load(std::memory_order_acquire); + if (ret < 0) { + ret += size_; + } + return ret; + } + + // maximum number of items in the queue. + size_t capacity() const { return size_ - 1; } + + private: + using AtomicIndex = std::atomic; + + char pad0_[hardware_destructive_interference_size]; + const uint32_t size_; + T* const records_; + + AtomicIndex readIndex_; + char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; + AtomicIndex writeIndex_; + + char pad2_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; +}; + +} // namespace folly +} // namespace arrow_vendored diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime.h new file mode 100644 index 0000000000000000000000000000000000000000..e437cdcbc2daecdafe589a9483d9348324ad32ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/datetime/date.h" // IWYU pragma: export +#include "arrow/vendored/datetime/tz.h" // IWYU pragma: export + +// Can be defined by date.h. +#ifdef NOEXCEPT +#undef NOEXCEPT +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h new file mode 100644 index 0000000000000000000000000000000000000000..fd2569c6de0f642f65b56e777b7e10e68fca3337 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h @@ -0,0 +1,8237 @@ +#ifndef DATE_H +#define DATE_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016, 2017 Howard Hinnant +// Copyright (c) 2016 Adrian Colomitchi +// Copyright (c) 2017 Florian Dang +// Copyright (c) 2017 Paul Thompson +// Copyright (c) 2018, 2019 Tomasz Kamiński +// Copyright (c) 2019 Jiangang Zhuang +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +#ifndef HAS_STRING_VIEW +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_STRING_VIEW 1 +# else +# define HAS_STRING_VIEW 0 +# endif +#endif // HAS_STRING_VIEW + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if HAS_STRING_VIEW +# include +#endif +#include +#include + +#ifdef __GNUC__ +# pragma GCC diagnostic push +# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 7) +# pragma GCC diagnostic ignored "-Wpedantic" +# endif +# if __GNUC__ < 5 + // GCC 4.9 Bug 61489 Wrong warning with -Wmissing-field-initializers +# pragma GCC diagnostic ignored "-Wmissing-field-initializers" +# endif +#endif + +#ifdef _MSC_VER +# pragma warning(push) +// warning C4127: conditional expression is constant +# pragma warning(disable : 4127) +#endif + +namespace arrow_vendored +{ +namespace date +{ + +//---------------+ +// Configuration | +//---------------+ + +#ifndef ONLY_C_LOCALE +# define ONLY_C_LOCALE 0 +#endif + +#if defined(_MSC_VER) && (!defined(__clang__) || (_MSC_VER < 1910)) +// MSVC +# ifndef _SILENCE_CXX17_UNCAUGHT_EXCEPTION_DEPRECATION_WARNING +# define _SILENCE_CXX17_UNCAUGHT_EXCEPTION_DEPRECATION_WARNING +# endif +# if _MSC_VER < 1910 +// before VS2017 +# define CONSTDATA const +# define CONSTCD11 +# define CONSTCD14 +# define NOEXCEPT _NOEXCEPT +# else +// VS2017 and later +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 constexpr +# define NOEXCEPT noexcept +# endif + +#elif defined(__SUNPRO_CC) && __SUNPRO_CC <= 0x5150 +// Oracle Developer Studio 12.6 and earlier +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 +# define NOEXCEPT noexcept + +#elif __cplusplus >= 201402 +// C++14 +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 constexpr +# define NOEXCEPT noexcept +#else +// C++11 +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 +# define NOEXCEPT noexcept +#endif + +#ifndef HAS_UNCAUGHT_EXCEPTIONS +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_UNCAUGHT_EXCEPTIONS 1 +# else +# define HAS_UNCAUGHT_EXCEPTIONS 0 +# endif +#endif // HAS_UNCAUGHT_EXCEPTIONS + +#ifndef HAS_VOID_T +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_VOID_T 1 +# else +# define HAS_VOID_T 0 +# endif +#endif // HAS_VOID_T + +// Protect from Oracle sun macro +#ifdef sun +# undef sun +#endif + +// Work around for a NVCC compiler bug which causes it to fail +// to compile std::ratio_{multiply,divide} when used directly +// in the std::chrono::duration template instantiations below +namespace detail { +template +using ratio_multiply = decltype(std::ratio_multiply{}); + +template +using ratio_divide = decltype(std::ratio_divide{}); +} // namespace detail + +//-----------+ +// Interface | +//-----------+ + +// durations + +using days = std::chrono::duration + , std::chrono::hours::period>>; + +using weeks = std::chrono::duration + , days::period>>; + +using years = std::chrono::duration + , days::period>>; + +using months = std::chrono::duration + >>; + +// time_point + +template + using sys_time = std::chrono::time_point; + +using sys_days = sys_time; +using sys_seconds = sys_time; + +struct local_t {}; + +template + using local_time = std::chrono::time_point; + +using local_seconds = local_time; +using local_days = local_time; + +// types + +struct last_spec +{ + explicit last_spec() = default; +}; + +class day; +class month; +class year; + +class weekday; +class weekday_indexed; +class weekday_last; + +class month_day; +class month_day_last; +class month_weekday; +class month_weekday_last; + +class year_month; + +class year_month_day; +class year_month_day_last; +class year_month_weekday; +class year_month_weekday_last; + +// date composition operators + +CONSTCD11 year_month operator/(const year& y, const month& m) NOEXCEPT; +CONSTCD11 year_month operator/(const year& y, int m) NOEXCEPT; + +CONSTCD11 month_day operator/(const day& d, const month& m) NOEXCEPT; +CONSTCD11 month_day operator/(const day& d, int m) NOEXCEPT; +CONSTCD11 month_day operator/(const month& m, const day& d) NOEXCEPT; +CONSTCD11 month_day operator/(const month& m, int d) NOEXCEPT; +CONSTCD11 month_day operator/(int m, const day& d) NOEXCEPT; + +CONSTCD11 month_day_last operator/(const month& m, last_spec) NOEXCEPT; +CONSTCD11 month_day_last operator/(int m, last_spec) NOEXCEPT; +CONSTCD11 month_day_last operator/(last_spec, const month& m) NOEXCEPT; +CONSTCD11 month_day_last operator/(last_spec, int m) NOEXCEPT; + +CONSTCD11 month_weekday operator/(const month& m, const weekday_indexed& wdi) NOEXCEPT; +CONSTCD11 month_weekday operator/(int m, const weekday_indexed& wdi) NOEXCEPT; +CONSTCD11 month_weekday operator/(const weekday_indexed& wdi, const month& m) NOEXCEPT; +CONSTCD11 month_weekday operator/(const weekday_indexed& wdi, int m) NOEXCEPT; + +CONSTCD11 month_weekday_last operator/(const month& m, const weekday_last& wdl) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(int m, const weekday_last& wdl) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(const weekday_last& wdl, const month& m) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(const weekday_last& wdl, int m) NOEXCEPT; + +CONSTCD11 year_month_day operator/(const year_month& ym, const day& d) NOEXCEPT; +CONSTCD11 year_month_day operator/(const year_month& ym, int d) NOEXCEPT; +CONSTCD11 year_month_day operator/(const year& y, const month_day& md) NOEXCEPT; +CONSTCD11 year_month_day operator/(int y, const month_day& md) NOEXCEPT; +CONSTCD11 year_month_day operator/(const month_day& md, const year& y) NOEXCEPT; +CONSTCD11 year_month_day operator/(const month_day& md, int y) NOEXCEPT; + +CONSTCD11 + year_month_day_last operator/(const year_month& ym, last_spec) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const year& y, const month_day_last& mdl) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(int y, const month_day_last& mdl) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const month_day_last& mdl, const year& y) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const month_day_last& mdl, int y) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const year_month& ym, const weekday_indexed& wdi) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const year& y, const month_weekday& mwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(int y, const month_weekday& mwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const month_weekday& mwd, const year& y) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const month_weekday& mwd, int y) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const year_month& ym, const weekday_last& wdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const year& y, const month_weekday_last& mwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(int y, const month_weekday_last& mwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const month_weekday_last& mwdl, const year& y) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const month_weekday_last& mwdl, int y) NOEXCEPT; + +// Detailed interface + +// day + +class day +{ + unsigned char d_; + +public: + day() = default; + explicit CONSTCD11 day(unsigned d) NOEXCEPT; + + CONSTCD14 day& operator++() NOEXCEPT; + CONSTCD14 day operator++(int) NOEXCEPT; + CONSTCD14 day& operator--() NOEXCEPT; + CONSTCD14 day operator--(int) NOEXCEPT; + + CONSTCD14 day& operator+=(const days& d) NOEXCEPT; + CONSTCD14 day& operator-=(const days& d) NOEXCEPT; + + CONSTCD11 explicit operator unsigned() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator< (const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator> (const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const day& x, const day& y) NOEXCEPT; + +CONSTCD11 day operator+(const day& x, const days& y) NOEXCEPT; +CONSTCD11 day operator+(const days& x, const day& y) NOEXCEPT; +CONSTCD11 day operator-(const day& x, const days& y) NOEXCEPT; +CONSTCD11 days operator-(const day& x, const day& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const day& d); + +// month + +class month +{ + unsigned char m_; + +public: + month() = default; + explicit CONSTCD11 month(unsigned m) NOEXCEPT; + + CONSTCD14 month& operator++() NOEXCEPT; + CONSTCD14 month operator++(int) NOEXCEPT; + CONSTCD14 month& operator--() NOEXCEPT; + CONSTCD14 month operator--(int) NOEXCEPT; + + CONSTCD14 month& operator+=(const months& m) NOEXCEPT; + CONSTCD14 month& operator-=(const months& m) NOEXCEPT; + + CONSTCD11 explicit operator unsigned() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator< (const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator> (const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month& x, const month& y) NOEXCEPT; + +CONSTCD14 month operator+(const month& x, const months& y) NOEXCEPT; +CONSTCD14 month operator+(const months& x, const month& y) NOEXCEPT; +CONSTCD14 month operator-(const month& x, const months& y) NOEXCEPT; +CONSTCD14 months operator-(const month& x, const month& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month& m); + +// year + +class year +{ + short y_; + +public: + year() = default; + explicit CONSTCD11 year(int y) NOEXCEPT; + + CONSTCD14 year& operator++() NOEXCEPT; + CONSTCD14 year operator++(int) NOEXCEPT; + CONSTCD14 year& operator--() NOEXCEPT; + CONSTCD14 year operator--(int) NOEXCEPT; + + CONSTCD14 year& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 year operator-() const NOEXCEPT; + CONSTCD11 year operator+() const NOEXCEPT; + + CONSTCD11 bool is_leap() const NOEXCEPT; + + CONSTCD11 explicit operator int() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; + + static CONSTCD11 year min() NOEXCEPT { return year{-32767}; } + static CONSTCD11 year max() NOEXCEPT { return year{32767}; } +}; + +CONSTCD11 bool operator==(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator< (const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator> (const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year& x, const year& y) NOEXCEPT; + +CONSTCD11 year operator+(const year& x, const years& y) NOEXCEPT; +CONSTCD11 year operator+(const years& x, const year& y) NOEXCEPT; +CONSTCD11 year operator-(const year& x, const years& y) NOEXCEPT; +CONSTCD11 years operator-(const year& x, const year& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year& y); + +// weekday + +class weekday +{ + unsigned char wd_; +public: + weekday() = default; + explicit CONSTCD11 weekday(unsigned wd) NOEXCEPT; + CONSTCD14 weekday(const sys_days& dp) NOEXCEPT; + CONSTCD14 explicit weekday(const local_days& dp) NOEXCEPT; + + CONSTCD14 weekday& operator++() NOEXCEPT; + CONSTCD14 weekday operator++(int) NOEXCEPT; + CONSTCD14 weekday& operator--() NOEXCEPT; + CONSTCD14 weekday operator--(int) NOEXCEPT; + + CONSTCD14 weekday& operator+=(const days& d) NOEXCEPT; + CONSTCD14 weekday& operator-=(const days& d) NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; + + CONSTCD11 unsigned c_encoding() const NOEXCEPT; + CONSTCD11 unsigned iso_encoding() const NOEXCEPT; + + CONSTCD11 weekday_indexed operator[](unsigned index) const NOEXCEPT; + CONSTCD11 weekday_last operator[](last_spec) const NOEXCEPT; + +private: + static CONSTCD14 unsigned char weekday_from_days(int z) NOEXCEPT; + + friend CONSTCD11 bool operator==(const weekday& x, const weekday& y) NOEXCEPT; + friend CONSTCD14 days operator-(const weekday& x, const weekday& y) NOEXCEPT; + friend CONSTCD14 weekday operator+(const weekday& x, const days& y) NOEXCEPT; + template + friend std::basic_ostream& + operator<<(std::basic_ostream& os, const weekday& wd); + friend class weekday_indexed; +}; + +CONSTCD11 bool operator==(const weekday& x, const weekday& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday& x, const weekday& y) NOEXCEPT; + +CONSTCD14 weekday operator+(const weekday& x, const days& y) NOEXCEPT; +CONSTCD14 weekday operator+(const days& x, const weekday& y) NOEXCEPT; +CONSTCD14 weekday operator-(const weekday& x, const days& y) NOEXCEPT; +CONSTCD14 days operator-(const weekday& x, const weekday& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday& wd); + +// weekday_indexed + +class weekday_indexed +{ + unsigned char wd_ : 4; + unsigned char index_ : 4; + +public: + weekday_indexed() = default; + CONSTCD11 weekday_indexed(const date::weekday& wd, unsigned index) NOEXCEPT; + + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 unsigned index() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_indexed& wdi); + +// weekday_last + +class weekday_last +{ + date::weekday wd_; + +public: + explicit CONSTCD11 weekday_last(const date::weekday& wd) NOEXCEPT; + + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const weekday_last& x, const weekday_last& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday_last& x, const weekday_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_last& wdl); + +namespace detail +{ + +struct unspecified_month_disambiguator {}; + +} // namespace detail + +// year_month + +class year_month +{ + date::year y_; + date::month m_; + +public: + year_month() = default; + CONSTCD11 year_month(const date::year& y, const date::month& m) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + + template + CONSTCD14 year_month& operator+=(const months& dm) NOEXCEPT; + template + CONSTCD14 year_month& operator-=(const months& dm) NOEXCEPT; + CONSTCD14 year_month& operator+=(const years& dy) NOEXCEPT; + CONSTCD14 year_month& operator-=(const years& dy) NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator< (const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator> (const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year_month& x, const year_month& y) NOEXCEPT; + +template +CONSTCD14 year_month operator+(const year_month& ym, const months& dm) NOEXCEPT; +template +CONSTCD14 year_month operator+(const months& dm, const year_month& ym) NOEXCEPT; +template +CONSTCD14 year_month operator-(const year_month& ym, const months& dm) NOEXCEPT; + +CONSTCD11 months operator-(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 year_month operator+(const year_month& ym, const years& dy) NOEXCEPT; +CONSTCD11 year_month operator+(const years& dy, const year_month& ym) NOEXCEPT; +CONSTCD11 year_month operator-(const year_month& ym, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month& ym); + +// month_day + +class month_day +{ + date::month m_; + date::day d_; + +public: + month_day() = default; + CONSTCD11 month_day(const date::month& m, const date::day& d) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::day day() const NOEXCEPT; + + CONSTCD14 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator< (const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator> (const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month_day& x, const month_day& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day& md); + +// month_day_last + +class month_day_last +{ + date::month m_; + +public: + CONSTCD11 explicit month_day_last(const date::month& m) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator< (const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator> (const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month_day_last& x, const month_day_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day_last& mdl); + +// month_weekday + +class month_weekday +{ + date::month m_; + date::weekday_indexed wdi_; +public: + CONSTCD11 month_weekday(const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday_indexed weekday_indexed() const NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_weekday& x, const month_weekday& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_weekday& x, const month_weekday& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday& mwd); + +// month_weekday_last + +class month_weekday_last +{ + date::month m_; + date::weekday_last wdl_; + +public: + CONSTCD11 month_weekday_last(const date::month& m, + const date::weekday_last& wd) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday_last weekday_last() const NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday_last& mwdl); + +// class year_month_day + +class year_month_day +{ + date::year y_; + date::month m_; + date::day d_; + +public: + year_month_day() = default; + CONSTCD11 year_month_day(const date::year& y, const date::month& m, + const date::day& d) NOEXCEPT; + CONSTCD14 year_month_day(const year_month_day_last& ymdl) NOEXCEPT; + + CONSTCD14 year_month_day(sys_days dp) NOEXCEPT; + CONSTCD14 explicit year_month_day(local_days dp) NOEXCEPT; + + template + CONSTCD14 year_month_day& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_day& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_day& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_day& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::day day() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD14 bool ok() const NOEXCEPT; + +private: + static CONSTCD14 year_month_day from_days(days dp) NOEXCEPT; + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator< (const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator> (const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year_month_day& x, const year_month_day& y) NOEXCEPT; + +template +CONSTCD14 year_month_day operator+(const year_month_day& ymd, const months& dm) NOEXCEPT; +template +CONSTCD14 year_month_day operator+(const months& dm, const year_month_day& ymd) NOEXCEPT; +template +CONSTCD14 year_month_day operator-(const year_month_day& ymd, const months& dm) NOEXCEPT; +CONSTCD11 year_month_day operator+(const year_month_day& ymd, const years& dy) NOEXCEPT; +CONSTCD11 year_month_day operator+(const years& dy, const year_month_day& ymd) NOEXCEPT; +CONSTCD11 year_month_day operator-(const year_month_day& ymd, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day& ymd); + +// year_month_day_last + +class year_month_day_last +{ + date::year y_; + date::month_day_last mdl_; + +public: + CONSTCD11 year_month_day_last(const date::year& y, + const date::month_day_last& mdl) NOEXCEPT; + + template + CONSTCD14 year_month_day_last& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_day_last& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_day_last& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_day_last& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::month_day_last month_day_last() const NOEXCEPT; + CONSTCD14 date::day day() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator< (const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator> (const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator<=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator>=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator+(const year_month_day_last& ymdl, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator+(const months& dm, const year_month_day_last& ymdl) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator+(const year_month_day_last& ymdl, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator+(const years& dy, const year_month_day_last& ymdl) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator-(const year_month_day_last& ymdl, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator-(const year_month_day_last& ymdl, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day_last& ymdl); + +// year_month_weekday + +class year_month_weekday +{ + date::year y_; + date::month m_; + date::weekday_indexed wdi_; + +public: + year_month_weekday() = default; + CONSTCD11 year_month_weekday(const date::year& y, const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT; + CONSTCD14 year_month_weekday(const sys_days& dp) NOEXCEPT; + CONSTCD14 explicit year_month_weekday(const local_days& dp) NOEXCEPT; + + template + CONSTCD14 year_month_weekday& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_weekday& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_weekday& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_weekday& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 unsigned index() const NOEXCEPT; + CONSTCD11 date::weekday_indexed weekday_indexed() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD14 bool ok() const NOEXCEPT; + +private: + static CONSTCD14 year_month_weekday from_days(days dp) NOEXCEPT; + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator+(const year_month_weekday& ymwd, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator+(const months& dm, const year_month_weekday& ymwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator+(const year_month_weekday& ymwd, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator+(const years& dy, const year_month_weekday& ymwd) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator-(const year_month_weekday& ymwd, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator-(const year_month_weekday& ymwd, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday& ymwdi); + +// year_month_weekday_last + +class year_month_weekday_last +{ + date::year y_; + date::month m_; + date::weekday_last wdl_; + +public: + CONSTCD11 year_month_weekday_last(const date::year& y, const date::month& m, + const date::weekday_last& wdl) NOEXCEPT; + + template + CONSTCD14 year_month_weekday_last& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_weekday_last& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_weekday_last& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_weekday_last& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 date::weekday_last weekday_last() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; + +private: + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 +bool +operator==(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT; + +CONSTCD11 +bool +operator!=(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator+(const months& dm, const year_month_weekday_last& ymwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator+(const years& dy, const year_month_weekday_last& ymwdl) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday_last& ymwdl); + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +inline namespace literals +{ + +CONSTCD11 date::day operator "" _d(unsigned long long d) NOEXCEPT; +CONSTCD11 date::year operator "" _y(unsigned long long y) NOEXCEPT; + +} // inline namespace literals +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +// CONSTDATA date::month January{1}; +// CONSTDATA date::month February{2}; +// CONSTDATA date::month March{3}; +// CONSTDATA date::month April{4}; +// CONSTDATA date::month May{5}; +// CONSTDATA date::month June{6}; +// CONSTDATA date::month July{7}; +// CONSTDATA date::month August{8}; +// CONSTDATA date::month September{9}; +// CONSTDATA date::month October{10}; +// CONSTDATA date::month November{11}; +// CONSTDATA date::month December{12}; +// +// CONSTDATA date::weekday Sunday{0u}; +// CONSTDATA date::weekday Monday{1u}; +// CONSTDATA date::weekday Tuesday{2u}; +// CONSTDATA date::weekday Wednesday{3u}; +// CONSTDATA date::weekday Thursday{4u}; +// CONSTDATA date::weekday Friday{5u}; +// CONSTDATA date::weekday Saturday{6u}; + +#if HAS_VOID_T + +template > +struct is_clock + : std::false_type +{}; + +template +struct is_clock> + : std::true_type +{}; + +template inline constexpr bool is_clock_v = is_clock::value; + +#endif // HAS_VOID_T + +//----------------+ +// Implementation | +//----------------+ + +// utilities +namespace detail { + +template> +class save_istream +{ +protected: + std::basic_ios& is_; + CharT fill_; + std::ios::fmtflags flags_; + std::streamsize precision_; + std::streamsize width_; + std::basic_ostream* tie_; + std::locale loc_; + +public: + ~save_istream() + { + is_.fill(fill_); + is_.flags(flags_); + is_.precision(precision_); + is_.width(width_); + is_.imbue(loc_); + is_.tie(tie_); + } + + save_istream(const save_istream&) = delete; + save_istream& operator=(const save_istream&) = delete; + + explicit save_istream(std::basic_ios& is) + : is_(is) + , fill_(is.fill()) + , flags_(is.flags()) + , precision_(is.precision()) + , width_(is.width(0)) + , tie_(is.tie(nullptr)) + , loc_(is.getloc()) + { + if (tie_ != nullptr) + tie_->flush(); + } +}; + +template> +class save_ostream + : private save_istream +{ +public: + ~save_ostream() + { + if ((this->flags_ & std::ios::unitbuf) && +#if HAS_UNCAUGHT_EXCEPTIONS + std::uncaught_exceptions() == 0 && +#else + !std::uncaught_exception() && +#endif + this->is_.good()) + this->is_.rdbuf()->pubsync(); + } + + save_ostream(const save_ostream&) = delete; + save_ostream& operator=(const save_ostream&) = delete; + + explicit save_ostream(std::basic_ios& os) + : save_istream(os) + { + } +}; + +template +struct choose_trunc_type +{ + static const int digits = std::numeric_limits::digits; + using type = typename std::conditional + < + digits < 32, + std::int32_t, + typename std::conditional + < + digits < 64, + std::int64_t, +#ifdef __SIZEOF_INT128__ + __int128 +#else + std::int64_t +#endif + >::type + >::type; +}; + +template +CONSTCD11 +inline +typename std::enable_if +< + !std::chrono::treat_as_floating_point::value, + T +>::type +trunc(T t) NOEXCEPT +{ + return t; +} + +template +CONSTCD14 +inline +typename std::enable_if +< + std::chrono::treat_as_floating_point::value, + T +>::type +trunc(T t) NOEXCEPT +{ + using std::numeric_limits; + using I = typename choose_trunc_type::type; + CONSTDATA auto digits = numeric_limits::digits; + static_assert(digits < numeric_limits::digits, ""); + CONSTDATA auto max = I{1} << (digits-1); + CONSTDATA auto min = -max; + const auto negative = t < T{0}; + if (min <= t && t <= max && t != 0 && t == t) + { + t = static_cast(static_cast(t)); + if (t == 0 && negative) + t = -t; + } + return t; +} + +template +struct static_gcd +{ + static const std::intmax_t value = static_gcd::value; +}; + +template +struct static_gcd +{ + static const std::intmax_t value = Xp; +}; + +template <> +struct static_gcd<0, 0> +{ + static const std::intmax_t value = 1; +}; + +template +struct no_overflow +{ +private: + static const std::intmax_t gcd_n1_n2 = static_gcd::value; + static const std::intmax_t gcd_d1_d2 = static_gcd::value; + static const std::intmax_t n1 = R1::num / gcd_n1_n2; + static const std::intmax_t d1 = R1::den / gcd_d1_d2; + static const std::intmax_t n2 = R2::num / gcd_n1_n2; + static const std::intmax_t d2 = R2::den / gcd_d1_d2; +#ifdef __cpp_constexpr + static const std::intmax_t max = std::numeric_limits::max(); +#else + static const std::intmax_t max = LLONG_MAX; +#endif + + template + struct mul // overflow == false + { + static const std::intmax_t value = Xp * Yp; + }; + + template + struct mul + { + static const std::intmax_t value = 1; + }; + +public: + static const bool value = (n1 <= max / d2) && (n2 <= max / d1); + typedef std::ratio::value, + mul::value> type; +}; + +} // detail + +// trunc towards zero +template +CONSTCD11 +inline +typename std::enable_if +< + detail::no_overflow::value, + To +>::type +trunc(const std::chrono::duration& d) +{ + return To{detail::trunc(std::chrono::duration_cast(d).count())}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + !detail::no_overflow::value, + To +>::type +trunc(const std::chrono::duration& d) +{ + using std::chrono::duration_cast; + using std::chrono::duration; + using rep = typename std::common_type::type; + return To{detail::trunc(duration_cast(duration_cast>(d)).count())}; +} + +#ifndef HAS_CHRONO_ROUNDING +# if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023918 || (_MSC_FULL_VER >= 190000000 && defined (__clang__))) +# define HAS_CHRONO_ROUNDING 1 +# elif defined(__cpp_lib_chrono) && __cplusplus > 201402 && __cpp_lib_chrono >= 201510 +# define HAS_CHRONO_ROUNDING 1 +# elif defined(_LIBCPP_VERSION) && __cplusplus > 201402 && _LIBCPP_VERSION >= 3800 +# define HAS_CHRONO_ROUNDING 1 +# else +# define HAS_CHRONO_ROUNDING 0 +# endif +#endif // HAS_CHRONO_ROUNDING + +#if HAS_CHRONO_ROUNDING == 0 + +// round down +template +CONSTCD14 +inline +typename std::enable_if +< + detail::no_overflow::value, + To +>::type +floor(const std::chrono::duration& d) +{ + auto t = trunc(d); + if (t > d) + return t - To{1}; + return t; +} + +template +CONSTCD14 +inline +typename std::enable_if +< + !detail::no_overflow::value, + To +>::type +floor(const std::chrono::duration& d) +{ + using rep = typename std::common_type::type; + return floor(floor>(d)); +} + +// round to nearest, to even on tie +template +CONSTCD14 +inline +To +round(const std::chrono::duration& d) +{ + auto t0 = floor(d); + auto t1 = t0 + To{1}; + if (t1 == To{0} && t0 < To{0}) + t1 = -t1; + auto diff0 = d - t0; + auto diff1 = t1 - d; + if (diff0 == diff1) + { + if (t0 - trunc(t0/2)*2 == To{0}) + return t0; + return t1; + } + if (diff0 < diff1) + return t0; + return t1; +} + +// round up +template +CONSTCD14 +inline +To +ceil(const std::chrono::duration& d) +{ + auto t = trunc(d); + if (t < d) + return t + To{1}; + return t; +} + +template ::is_signed + >::type> +CONSTCD11 +std::chrono::duration +abs(std::chrono::duration d) +{ + return d >= d.zero() ? d : static_cast(-d); +} + +// round down +template +CONSTCD11 +inline +std::chrono::time_point +floor(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{date::floor(tp.time_since_epoch())}; +} + +// round to nearest, to even on tie +template +CONSTCD11 +inline +std::chrono::time_point +round(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{round(tp.time_since_epoch())}; +} + +// round up +template +CONSTCD11 +inline +std::chrono::time_point +ceil(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{ceil(tp.time_since_epoch())}; +} + +#else // HAS_CHRONO_ROUNDING == 1 + +using std::chrono::floor; +using std::chrono::ceil; +using std::chrono::round; +using std::chrono::abs; + +#endif // HAS_CHRONO_ROUNDING + +namespace detail +{ + +template +CONSTCD14 +inline +typename std::enable_if +< + !std::chrono::treat_as_floating_point::value, + To +>::type +round_i(const std::chrono::duration& d) +{ + return round(d); +} + +template +CONSTCD14 +inline +typename std::enable_if +< + std::chrono::treat_as_floating_point::value, + To +>::type +round_i(const std::chrono::duration& d) +{ + return d; +} + +template +CONSTCD11 +inline +std::chrono::time_point +round_i(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{round_i(tp.time_since_epoch())}; +} + +} // detail + +// trunc towards zero +template +CONSTCD11 +inline +std::chrono::time_point +trunc(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{trunc(tp.time_since_epoch())}; +} + +// day + +CONSTCD11 inline day::day(unsigned d) NOEXCEPT : d_(static_cast(d)) {} +CONSTCD14 inline day& day::operator++() NOEXCEPT {++d_; return *this;} +CONSTCD14 inline day day::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline day& day::operator--() NOEXCEPT {--d_; return *this;} +CONSTCD14 inline day day::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} +CONSTCD14 inline day& day::operator+=(const days& d) NOEXCEPT {*this = *this + d; return *this;} +CONSTCD14 inline day& day::operator-=(const days& d) NOEXCEPT {*this = *this - d; return *this;} +CONSTCD11 inline day::operator unsigned() const NOEXCEPT {return d_;} +CONSTCD11 inline bool day::ok() const NOEXCEPT {return 1 <= d_ && d_ <= 31;} + +CONSTCD11 +inline +bool +operator==(const day& x, const day& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const day& x, const day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const day& x, const day& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const day& x, const day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const day& x, const day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const day& x, const day& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD11 +inline +days +operator-(const day& x, const day& y) NOEXCEPT +{ + return days{static_cast(static_cast(x) + - static_cast(y))}; +} + +CONSTCD11 +inline +day +operator+(const day& x, const days& y) NOEXCEPT +{ + return day{static_cast(x) + static_cast(y.count())}; +} + +CONSTCD11 +inline +day +operator+(const days& x, const day& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD11 +inline +day +operator-(const day& x, const days& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const day& d) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(d); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const day& d) +{ + detail::low_level_fmt(os, d); + if (!d.ok()) + os << " is not a valid day"; + return os; +} + +// month + +CONSTCD11 inline month::month(unsigned m) NOEXCEPT : m_(static_cast(m)) {} +CONSTCD14 inline month& month::operator++() NOEXCEPT {*this += months{1}; return *this;} +CONSTCD14 inline month month::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline month& month::operator--() NOEXCEPT {*this -= months{1}; return *this;} +CONSTCD14 inline month month::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} + +CONSTCD14 +inline +month& +month::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +CONSTCD14 +inline +month& +month::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD11 inline month::operator unsigned() const NOEXCEPT {return m_;} +CONSTCD11 inline bool month::ok() const NOEXCEPT {return 1 <= m_ && m_ <= 12;} + +CONSTCD11 +inline +bool +operator==(const month& x, const month& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const month& x, const month& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month& x, const month& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const month& x, const month& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month& x, const month& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month& x, const month& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD14 +inline +months +operator-(const month& x, const month& y) NOEXCEPT +{ + auto const d = static_cast(x) - static_cast(y); + return months(d <= 11 ? d : d + 12); +} + +CONSTCD14 +inline +month +operator+(const month& x, const months& y) NOEXCEPT +{ + auto const mu = static_cast(static_cast(x)) + y.count() - 1; + auto const yr = (mu >= 0 ? mu : mu-11) / 12; + return month{static_cast(mu - yr * 12 + 1)}; +} + +CONSTCD14 +inline +month +operator+(const months& x, const month& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD14 +inline +month +operator-(const month& x, const months& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month& m) +{ + if (m.ok()) + { + CharT fmt[] = {'%', 'b', 0}; + os << format(os.getloc(), fmt, m); + } + else + os << static_cast(m); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month& m) +{ + detail::low_level_fmt(os, m); + if (!m.ok()) + os << " is not a valid month"; + return os; +} + +// year + +CONSTCD11 inline year::year(int y) NOEXCEPT : y_(static_cast(y)) {} +CONSTCD14 inline year& year::operator++() NOEXCEPT {++y_; return *this;} +CONSTCD14 inline year year::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline year& year::operator--() NOEXCEPT {--y_; return *this;} +CONSTCD14 inline year year::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} +CONSTCD14 inline year& year::operator+=(const years& y) NOEXCEPT {*this = *this + y; return *this;} +CONSTCD14 inline year& year::operator-=(const years& y) NOEXCEPT {*this = *this - y; return *this;} +CONSTCD11 inline year year::operator-() const NOEXCEPT {return year{-y_};} +CONSTCD11 inline year year::operator+() const NOEXCEPT {return *this;} + +CONSTCD11 +inline +bool +year::is_leap() const NOEXCEPT +{ + return y_ % 4 == 0 && (y_ % 100 != 0 || y_ % 400 == 0); +} + +CONSTCD11 inline year::operator int() const NOEXCEPT {return y_;} + +CONSTCD11 +inline +bool +year::ok() const NOEXCEPT +{ + return y_ != std::numeric_limits::min(); +} + +CONSTCD11 +inline +bool +operator==(const year& x, const year& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const year& x, const year& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year& x, const year& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const year& x, const year& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year& x, const year& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year& x, const year& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD11 +inline +years +operator-(const year& x, const year& y) NOEXCEPT +{ + return years{static_cast(x) - static_cast(y)}; +} + +CONSTCD11 +inline +year +operator+(const year& x, const years& y) NOEXCEPT +{ + return year{static_cast(x) + y.count()}; +} + +CONSTCD11 +inline +year +operator+(const years& x, const year& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD11 +inline +year +operator-(const year& x, const years& y) NOEXCEPT +{ + return year{static_cast(x) - y.count()}; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year& y) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::internal); + os.width(4 + (y < year{0})); + os.imbue(std::locale::classic()); + os << static_cast(y); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year& y) +{ + detail::low_level_fmt(os, y); + if (!y.ok()) + os << " is not a valid year"; + return os; +} + +// weekday + +CONSTCD14 +inline +unsigned char +weekday::weekday_from_days(int z) NOEXCEPT +{ + auto u = static_cast(z); + return static_cast(z >= -4 ? (u+4) % 7 : u % 7); +} + +CONSTCD11 +inline +weekday::weekday(unsigned wd) NOEXCEPT + : wd_(static_cast(wd != 7 ? wd : 0)) + {} + +CONSTCD14 +inline +weekday::weekday(const sys_days& dp) NOEXCEPT + : wd_(weekday_from_days(dp.time_since_epoch().count())) + {} + +CONSTCD14 +inline +weekday::weekday(const local_days& dp) NOEXCEPT + : wd_(weekday_from_days(dp.time_since_epoch().count())) + {} + +CONSTCD14 inline weekday& weekday::operator++() NOEXCEPT {*this += days{1}; return *this;} +CONSTCD14 inline weekday weekday::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline weekday& weekday::operator--() NOEXCEPT {*this -= days{1}; return *this;} +CONSTCD14 inline weekday weekday::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} + +CONSTCD14 +inline +weekday& +weekday::operator+=(const days& d) NOEXCEPT +{ + *this = *this + d; + return *this; +} + +CONSTCD14 +inline +weekday& +weekday::operator-=(const days& d) NOEXCEPT +{ + *this = *this - d; + return *this; +} + +CONSTCD11 inline bool weekday::ok() const NOEXCEPT {return wd_ <= 6;} + +CONSTCD11 +inline +unsigned weekday::c_encoding() const NOEXCEPT +{ + return unsigned{wd_}; +} + +CONSTCD11 +inline +unsigned weekday::iso_encoding() const NOEXCEPT +{ + return unsigned{((wd_ == 0u) ? 7u : wd_)}; +} + +CONSTCD11 +inline +bool +operator==(const weekday& x, const weekday& y) NOEXCEPT +{ + return x.wd_ == y.wd_; +} + +CONSTCD11 +inline +bool +operator!=(const weekday& x, const weekday& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD14 +inline +days +operator-(const weekday& x, const weekday& y) NOEXCEPT +{ + auto const wdu = x.wd_ - y.wd_; + auto const wk = (wdu >= 0 ? wdu : wdu-6) / 7; + return days{wdu - wk * 7}; +} + +CONSTCD14 +inline +weekday +operator+(const weekday& x, const days& y) NOEXCEPT +{ + auto const wdu = static_cast(static_cast(x.wd_)) + y.count(); + auto const wk = (wdu >= 0 ? wdu : wdu-6) / 7; + return weekday{static_cast(wdu - wk * 7)}; +} + +CONSTCD14 +inline +weekday +operator+(const days& x, const weekday& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD14 +inline +weekday +operator-(const weekday& x, const days& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday& wd) +{ + if (wd.ok()) + { + CharT fmt[] = {'%', 'a', 0}; + os << format(fmt, wd); + } + else + os << wd.c_encoding(); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday& wd) +{ + detail::low_level_fmt(os, wd); + if (!wd.ok()) + os << " is not a valid weekday"; + return os; +} + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +inline namespace literals +{ + +CONSTCD11 +inline +date::day +operator "" _d(unsigned long long d) NOEXCEPT +{ + return date::day{static_cast(d)}; +} + +CONSTCD11 +inline +date::year +operator "" _y(unsigned long long y) NOEXCEPT +{ + return date::year(static_cast(y)); +} +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +CONSTDATA date::last_spec last{}; + +CONSTDATA date::month jan{1}; +CONSTDATA date::month feb{2}; +CONSTDATA date::month mar{3}; +CONSTDATA date::month apr{4}; +CONSTDATA date::month may{5}; +CONSTDATA date::month jun{6}; +CONSTDATA date::month jul{7}; +CONSTDATA date::month aug{8}; +CONSTDATA date::month sep{9}; +CONSTDATA date::month oct{10}; +CONSTDATA date::month nov{11}; +CONSTDATA date::month dec{12}; + +CONSTDATA date::weekday sun{0u}; +CONSTDATA date::weekday mon{1u}; +CONSTDATA date::weekday tue{2u}; +CONSTDATA date::weekday wed{3u}; +CONSTDATA date::weekday thu{4u}; +CONSTDATA date::weekday fri{5u}; +CONSTDATA date::weekday sat{6u}; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +} // inline namespace literals +#endif + +CONSTDATA date::month January{1}; +CONSTDATA date::month February{2}; +CONSTDATA date::month March{3}; +CONSTDATA date::month April{4}; +CONSTDATA date::month May{5}; +CONSTDATA date::month June{6}; +CONSTDATA date::month July{7}; +CONSTDATA date::month August{8}; +CONSTDATA date::month September{9}; +CONSTDATA date::month October{10}; +CONSTDATA date::month November{11}; +CONSTDATA date::month December{12}; + +CONSTDATA date::weekday Monday{1}; +CONSTDATA date::weekday Tuesday{2}; +CONSTDATA date::weekday Wednesday{3}; +CONSTDATA date::weekday Thursday{4}; +CONSTDATA date::weekday Friday{5}; +CONSTDATA date::weekday Saturday{6}; +CONSTDATA date::weekday Sunday{7}; + +// weekday_indexed + +CONSTCD11 +inline +weekday +weekday_indexed::weekday() const NOEXCEPT +{ + return date::weekday{static_cast(wd_)}; +} + +CONSTCD11 inline unsigned weekday_indexed::index() const NOEXCEPT {return index_;} + +CONSTCD11 +inline +bool +weekday_indexed::ok() const NOEXCEPT +{ + return weekday().ok() && 1 <= index_ && index_ <= 5; +} + +#ifdef __GNUC__ +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wconversion" +#endif // __GNUC__ + +CONSTCD11 +inline +weekday_indexed::weekday_indexed(const date::weekday& wd, unsigned index) NOEXCEPT + : wd_(static_cast(static_cast(wd.wd_))) + , index_(static_cast(index)) + {} + +#ifdef __GNUC__ +# pragma GCC diagnostic pop +#endif // __GNUC__ + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday_indexed& wdi) +{ + return low_level_fmt(os, wdi.weekday()) << '[' << wdi.index() << ']'; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_indexed& wdi) +{ + detail::low_level_fmt(os, wdi); + if (!wdi.ok()) + os << " is not a valid weekday_indexed"; + return os; +} + +CONSTCD11 +inline +weekday_indexed +weekday::operator[](unsigned index) const NOEXCEPT +{ + return {*this, index}; +} + +CONSTCD11 +inline +bool +operator==(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT +{ + return x.weekday() == y.weekday() && x.index() == y.index(); +} + +CONSTCD11 +inline +bool +operator!=(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT +{ + return !(x == y); +} + +// weekday_last + +CONSTCD11 inline date::weekday weekday_last::weekday() const NOEXCEPT {return wd_;} +CONSTCD11 inline bool weekday_last::ok() const NOEXCEPT {return wd_.ok();} +CONSTCD11 inline weekday_last::weekday_last(const date::weekday& wd) NOEXCEPT : wd_(wd) {} + +CONSTCD11 +inline +bool +operator==(const weekday_last& x, const weekday_last& y) NOEXCEPT +{ + return x.weekday() == y.weekday(); +} + +CONSTCD11 +inline +bool +operator!=(const weekday_last& x, const weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday_last& wdl) +{ + return low_level_fmt(os, wdl.weekday()) << "[last]"; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_last& wdl) +{ + detail::low_level_fmt(os, wdl); + if (!wdl.ok()) + os << " is not a valid weekday_last"; + return os; +} + +CONSTCD11 +inline +weekday_last +weekday::operator[](last_spec) const NOEXCEPT +{ + return weekday_last{*this}; +} + +// year_month + +CONSTCD11 +inline +year_month::year_month(const date::year& y, const date::month& m) NOEXCEPT + : y_(y) + , m_(m) + {} + +CONSTCD11 inline year year_month::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month::month() const NOEXCEPT {return m_;} +CONSTCD11 inline bool year_month::ok() const NOEXCEPT {return y_.ok() && m_.ok();} + +template +CONSTCD14 +inline +year_month& +year_month::operator+=(const months& dm) NOEXCEPT +{ + *this = *this + dm; + return *this; +} + +template +CONSTCD14 +inline +year_month& +year_month::operator-=(const months& dm) NOEXCEPT +{ + *this = *this - dm; + return *this; +} + +CONSTCD14 +inline +year_month& +year_month::operator+=(const years& dy) NOEXCEPT +{ + *this = *this + dy; + return *this; +} + +CONSTCD14 +inline +year_month& +year_month::operator-=(const years& dy) NOEXCEPT +{ + *this = *this - dy; + return *this; +} + +CONSTCD11 +inline +bool +operator==(const year_month& x, const year_month& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month& x, const year_month& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month() < y.month())); +} + +CONSTCD11 +inline +bool +operator>(const year_month& x, const year_month& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(x < y); +} + +template +CONSTCD14 +inline +year_month +operator+(const year_month& ym, const months& dm) NOEXCEPT +{ + auto dmi = static_cast(static_cast(ym.month())) - 1 + dm.count(); + auto dy = (dmi >= 0 ? dmi : dmi-11) / 12; + dmi = dmi - dy * 12 + 1; + return (ym.year() + years(dy)) / month(static_cast(dmi)); +} + +template +CONSTCD14 +inline +year_month +operator+(const months& dm, const year_month& ym) NOEXCEPT +{ + return ym + dm; +} + +template +CONSTCD14 +inline +year_month +operator-(const year_month& ym, const months& dm) NOEXCEPT +{ + return ym + -dm; +} + +CONSTCD11 +inline +months +operator-(const year_month& x, const year_month& y) NOEXCEPT +{ + return (x.year() - y.year()) + + months(static_cast(x.month()) - static_cast(y.month())); +} + +CONSTCD11 +inline +year_month +operator+(const year_month& ym, const years& dy) NOEXCEPT +{ + return (ym.year() + dy) / ym.month(); +} + +CONSTCD11 +inline +year_month +operator+(const years& dy, const year_month& ym) NOEXCEPT +{ + return ym + dy; +} + +CONSTCD11 +inline +year_month +operator-(const year_month& ym, const years& dy) NOEXCEPT +{ + return ym + -dy; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year_month& ym) +{ + low_level_fmt(os, ym.year()) << '/'; + return low_level_fmt(os, ym.month()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month& ym) +{ + detail::low_level_fmt(os, ym); + if (!ym.ok()) + os << " is not a valid year_month"; + return os; +} + +// month_day + +CONSTCD11 +inline +month_day::month_day(const date::month& m, const date::day& d) NOEXCEPT + : m_(m) + , d_(d) + {} + +CONSTCD11 inline date::month month_day::month() const NOEXCEPT {return m_;} +CONSTCD11 inline date::day month_day::day() const NOEXCEPT {return d_;} + +CONSTCD14 +inline +bool +month_day::ok() const NOEXCEPT +{ + CONSTDATA date::day d[] = + { + date::day(31), date::day(29), date::day(31), + date::day(30), date::day(31), date::day(30), + date::day(31), date::day(31), date::day(30), + date::day(31), date::day(30), date::day(31) + }; + return m_.ok() && date::day{1} <= d_ && d_ <= d[static_cast(m_)-1]; +} + +CONSTCD11 +inline +bool +operator==(const month_day& x, const month_day& y) NOEXCEPT +{ + return x.month() == y.month() && x.day() == y.day(); +} + +CONSTCD11 +inline +bool +operator!=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month_day& x, const month_day& y) NOEXCEPT +{ + return x.month() < y.month() ? true + : (x.month() > y.month() ? false + : (x.day() < y.day())); +} + +CONSTCD11 +inline +bool +operator>(const month_day& x, const month_day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_day& md) +{ + low_level_fmt(os, md.month()) << '/'; + return low_level_fmt(os, md.day()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day& md) +{ + detail::low_level_fmt(os, md); + if (!md.ok()) + os << " is not a valid month_day"; + return os; +} + +// month_day_last + +CONSTCD11 inline month month_day_last::month() const NOEXCEPT {return m_;} +CONSTCD11 inline bool month_day_last::ok() const NOEXCEPT {return m_.ok();} +CONSTCD11 inline month_day_last::month_day_last(const date::month& m) NOEXCEPT : m_(m) {} + +CONSTCD11 +inline +bool +operator==(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return x.month() == y.month(); +} + +CONSTCD11 +inline +bool +operator!=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return x.month() < y.month(); +} + +CONSTCD11 +inline +bool +operator>(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_day_last& mdl) +{ + return low_level_fmt(os, mdl.month()) << "/last"; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day_last& mdl) +{ + detail::low_level_fmt(os, mdl); + if (!mdl.ok()) + os << " is not a valid month_day_last"; + return os; +} + +// month_weekday + +CONSTCD11 +inline +month_weekday::month_weekday(const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT + : m_(m) + , wdi_(wdi) + {} + +CONSTCD11 inline month month_weekday::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday_indexed +month_weekday::weekday_indexed() const NOEXCEPT +{ + return wdi_; +} + +CONSTCD11 +inline +bool +month_weekday::ok() const NOEXCEPT +{ + return m_.ok() && wdi_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const month_weekday& x, const month_weekday& y) NOEXCEPT +{ + return x.month() == y.month() && x.weekday_indexed() == y.weekday_indexed(); +} + +CONSTCD11 +inline +bool +operator!=(const month_weekday& x, const month_weekday& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_weekday& mwd) +{ + low_level_fmt(os, mwd.month()) << '/'; + return low_level_fmt(os, mwd.weekday_indexed()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday& mwd) +{ + detail::low_level_fmt(os, mwd); + if (!mwd.ok()) + os << " is not a valid month_weekday"; + return os; +} + +// month_weekday_last + +CONSTCD11 +inline +month_weekday_last::month_weekday_last(const date::month& m, + const date::weekday_last& wdl) NOEXCEPT + : m_(m) + , wdl_(wdl) + {} + +CONSTCD11 inline month month_weekday_last::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday_last +month_weekday_last::weekday_last() const NOEXCEPT +{ + return wdl_; +} + +CONSTCD11 +inline +bool +month_weekday_last::ok() const NOEXCEPT +{ + return m_.ok() && wdl_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT +{ + return x.month() == y.month() && x.weekday_last() == y.weekday_last(); +} + +CONSTCD11 +inline +bool +operator!=(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_weekday_last& mwdl) +{ + low_level_fmt(os, mwdl.month()) << '/'; + return low_level_fmt(os, mwdl.weekday_last()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday_last& mwdl) +{ + detail::low_level_fmt(os, mwdl); + if (!mwdl.ok()) + os << " is not a valid month_weekday_last"; + return os; +} + +// year_month_day_last + +CONSTCD11 +inline +year_month_day_last::year_month_day_last(const date::year& y, + const date::month_day_last& mdl) NOEXCEPT + : y_(y) + , mdl_(mdl) + {} + +template +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_day_last::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_day_last::month() const NOEXCEPT {return mdl_.month();} + +CONSTCD11 +inline +month_day_last +year_month_day_last::month_day_last() const NOEXCEPT +{ + return mdl_; +} + +CONSTCD14 +inline +day +year_month_day_last::day() const NOEXCEPT +{ + CONSTDATA date::day d[] = + { + date::day(31), date::day(28), date::day(31), + date::day(30), date::day(31), date::day(30), + date::day(31), date::day(31), date::day(30), + date::day(31), date::day(30), date::day(31) + }; + return (month() != February || !y_.is_leap()) && mdl_.ok() ? + d[static_cast(month()) - 1] : date::day{29}; +} + +CONSTCD14 +inline +year_month_day_last::operator sys_days() const NOEXCEPT +{ + return sys_days(year()/month()/day()); +} + +CONSTCD14 +inline +year_month_day_last::operator local_days() const NOEXCEPT +{ + return local_days(year()/month()/day()); +} + +CONSTCD11 +inline +bool +year_month_day_last::ok() const NOEXCEPT +{ + return y_.ok() && mdl_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return x.year() == y.year() && x.month_day_last() == y.month_day_last(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month_day_last() < y.month_day_last())); +} + +CONSTCD11 +inline +bool +operator>(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year_month_day_last& ymdl) +{ + low_level_fmt(os, ymdl.year()) << '/'; + return low_level_fmt(os, ymdl.month_day_last()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day_last& ymdl) +{ + detail::low_level_fmt(os, ymdl); + if (!ymdl.ok()) + os << " is not a valid year_month_day_last"; + return os; +} + +template +CONSTCD14 +inline +year_month_day_last +operator+(const year_month_day_last& ymdl, const months& dm) NOEXCEPT +{ + return (ymdl.year() / ymdl.month() + dm) / last; +} + +template +CONSTCD14 +inline +year_month_day_last +operator+(const months& dm, const year_month_day_last& ymdl) NOEXCEPT +{ + return ymdl + dm; +} + +template +CONSTCD14 +inline +year_month_day_last +operator-(const year_month_day_last& ymdl, const months& dm) NOEXCEPT +{ + return ymdl + (-dm); +} + +CONSTCD11 +inline +year_month_day_last +operator+(const year_month_day_last& ymdl, const years& dy) NOEXCEPT +{ + return {ymdl.year()+dy, ymdl.month_day_last()}; +} + +CONSTCD11 +inline +year_month_day_last +operator+(const years& dy, const year_month_day_last& ymdl) NOEXCEPT +{ + return ymdl + dy; +} + +CONSTCD11 +inline +year_month_day_last +operator-(const year_month_day_last& ymdl, const years& dy) NOEXCEPT +{ + return ymdl + (-dy); +} + +// year_month_day + +CONSTCD11 +inline +year_month_day::year_month_day(const date::year& y, const date::month& m, + const date::day& d) NOEXCEPT + : y_(y) + , m_(m) + , d_(d) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(const year_month_day_last& ymdl) NOEXCEPT + : y_(ymdl.year()) + , m_(ymdl.month()) + , d_(ymdl.day()) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(sys_days dp) NOEXCEPT + : year_month_day(from_days(dp.time_since_epoch())) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(local_days dp) NOEXCEPT + : year_month_day(from_days(dp.time_since_epoch())) + {} + +CONSTCD11 inline year year_month_day::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_day::month() const NOEXCEPT {return m_;} +CONSTCD11 inline day year_month_day::day() const NOEXCEPT {return d_;} + +template +CONSTCD14 +inline +year_month_day& +year_month_day::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_day& +year_month_day::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_day& +year_month_day::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_day& +year_month_day::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD14 +inline +days +year_month_day::to_days() const NOEXCEPT +{ + static_assert(std::numeric_limits::digits >= 18, + "This algorithm has not been ported to a 16 bit unsigned integer"); + static_assert(std::numeric_limits::digits >= 20, + "This algorithm has not been ported to a 16 bit signed integer"); + auto const y = static_cast(y_) - (m_ <= February); + auto const m = static_cast(m_); + auto const d = static_cast(d_); + auto const era = (y >= 0 ? y : y-399) / 400; + auto const yoe = static_cast(y - era * 400); // [0, 399] + auto const doy = (153*(m > 2 ? m-3 : m+9) + 2)/5 + d-1; // [0, 365] + auto const doe = yoe * 365 + yoe/4 - yoe/100 + doy; // [0, 146096] + return days{era * 146097 + static_cast(doe) - 719468}; +} + +CONSTCD14 +inline +year_month_day::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_day::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD14 +inline +bool +year_month_day::ok() const NOEXCEPT +{ + if (!(y_.ok() && m_.ok())) + return false; + return date::day{1} <= d_ && d_ <= (y_ / m_ / last).day(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && x.day() == y.day(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month() < y.month() ? true + : (x.month() > y.month() ? false + : (x.day() < y.day())))); +} + +CONSTCD11 +inline +bool +operator>(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(x < y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day& ymd) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.imbue(std::locale::classic()); + os << static_cast(ymd.year()) << '-'; + os.width(2); + os << static_cast(ymd.month()) << '-'; + os.width(2); + os << static_cast(ymd.day()); + if (!ymd.ok()) + os << " is not a valid year_month_day"; + return os; +} + +CONSTCD14 +inline +year_month_day +year_month_day::from_days(days dp) NOEXCEPT +{ + static_assert(std::numeric_limits::digits >= 18, + "This algorithm has not been ported to a 16 bit unsigned integer"); + static_assert(std::numeric_limits::digits >= 20, + "This algorithm has not been ported to a 16 bit signed integer"); + auto const z = dp.count() + 719468; + auto const era = (z >= 0 ? z : z - 146096) / 146097; + auto const doe = static_cast(z - era * 146097); // [0, 146096] + auto const yoe = (doe - doe/1460 + doe/36524 - doe/146096) / 365; // [0, 399] + auto const y = static_cast(yoe) + era * 400; + auto const doy = doe - (365*yoe + yoe/4 - yoe/100); // [0, 365] + auto const mp = (5*doy + 2)/153; // [0, 11] + auto const d = doy - (153*mp+2)/5 + 1; // [1, 31] + auto const m = mp < 10 ? mp+3 : mp-9; // [1, 12] + return year_month_day{date::year{y + (m <= 2)}, date::month(m), date::day(d)}; +} + +template +CONSTCD14 +inline +year_month_day +operator+(const year_month_day& ymd, const months& dm) NOEXCEPT +{ + return (ymd.year() / ymd.month() + dm) / ymd.day(); +} + +template +CONSTCD14 +inline +year_month_day +operator+(const months& dm, const year_month_day& ymd) NOEXCEPT +{ + return ymd + dm; +} + +template +CONSTCD14 +inline +year_month_day +operator-(const year_month_day& ymd, const months& dm) NOEXCEPT +{ + return ymd + (-dm); +} + +CONSTCD11 +inline +year_month_day +operator+(const year_month_day& ymd, const years& dy) NOEXCEPT +{ + return (ymd.year() + dy) / ymd.month() / ymd.day(); +} + +CONSTCD11 +inline +year_month_day +operator+(const years& dy, const year_month_day& ymd) NOEXCEPT +{ + return ymd + dy; +} + +CONSTCD11 +inline +year_month_day +operator-(const year_month_day& ymd, const years& dy) NOEXCEPT +{ + return ymd + (-dy); +} + +// year_month_weekday + +CONSTCD11 +inline +year_month_weekday::year_month_weekday(const date::year& y, const date::month& m, + const date::weekday_indexed& wdi) + NOEXCEPT + : y_(y) + , m_(m) + , wdi_(wdi) + {} + +CONSTCD14 +inline +year_month_weekday::year_month_weekday(const sys_days& dp) NOEXCEPT + : year_month_weekday(from_days(dp.time_since_epoch())) + {} + +CONSTCD14 +inline +year_month_weekday::year_month_weekday(const local_days& dp) NOEXCEPT + : year_month_weekday(from_days(dp.time_since_epoch())) + {} + +template +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_weekday::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_weekday::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday +year_month_weekday::weekday() const NOEXCEPT +{ + return wdi_.weekday(); +} + +CONSTCD11 +inline +unsigned +year_month_weekday::index() const NOEXCEPT +{ + return wdi_.index(); +} + +CONSTCD11 +inline +weekday_indexed +year_month_weekday::weekday_indexed() const NOEXCEPT +{ + return wdi_; +} + +CONSTCD14 +inline +year_month_weekday::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_weekday::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD14 +inline +bool +year_month_weekday::ok() const NOEXCEPT +{ + if (!y_.ok() || !m_.ok() || !wdi_.weekday().ok() || wdi_.index() < 1) + return false; + if (wdi_.index() <= 4) + return true; + auto d2 = wdi_.weekday() - date::weekday(static_cast(y_/m_/1)) + + days((wdi_.index()-1)*7 + 1); + return static_cast(d2.count()) <= static_cast((y_/m_/last).day()); +} + +CONSTCD14 +inline +year_month_weekday +year_month_weekday::from_days(days d) NOEXCEPT +{ + sys_days dp{d}; + auto const wd = date::weekday(dp); + auto const ymd = year_month_day(dp); + return {ymd.year(), ymd.month(), wd[(static_cast(ymd.day())-1)/7+1]}; +} + +CONSTCD14 +inline +days +year_month_weekday::to_days() const NOEXCEPT +{ + auto d = sys_days(y_/m_/1); + return (d + (wdi_.weekday() - date::weekday(d) + days{(wdi_.index()-1)*7}) + ).time_since_epoch(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && + x.weekday_indexed() == y.weekday_indexed(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT +{ + return !(x == y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday& ymwdi) +{ + detail::low_level_fmt(os, ymwdi.year()) << '/'; + detail::low_level_fmt(os, ymwdi.month()) << '/'; + detail::low_level_fmt(os, ymwdi.weekday_indexed()); + if (!ymwdi.ok()) + os << " is not a valid year_month_weekday"; + return os; +} + +template +CONSTCD14 +inline +year_month_weekday +operator+(const year_month_weekday& ymwd, const months& dm) NOEXCEPT +{ + return (ymwd.year() / ymwd.month() + dm) / ymwd.weekday_indexed(); +} + +template +CONSTCD14 +inline +year_month_weekday +operator+(const months& dm, const year_month_weekday& ymwd) NOEXCEPT +{ + return ymwd + dm; +} + +template +CONSTCD14 +inline +year_month_weekday +operator-(const year_month_weekday& ymwd, const months& dm) NOEXCEPT +{ + return ymwd + (-dm); +} + +CONSTCD11 +inline +year_month_weekday +operator+(const year_month_weekday& ymwd, const years& dy) NOEXCEPT +{ + return {ymwd.year()+dy, ymwd.month(), ymwd.weekday_indexed()}; +} + +CONSTCD11 +inline +year_month_weekday +operator+(const years& dy, const year_month_weekday& ymwd) NOEXCEPT +{ + return ymwd + dy; +} + +CONSTCD11 +inline +year_month_weekday +operator-(const year_month_weekday& ymwd, const years& dy) NOEXCEPT +{ + return ymwd + (-dy); +} + +// year_month_weekday_last + +CONSTCD11 +inline +year_month_weekday_last::year_month_weekday_last(const date::year& y, + const date::month& m, + const date::weekday_last& wdl) NOEXCEPT + : y_(y) + , m_(m) + , wdl_(wdl) + {} + +template +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_weekday_last::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_weekday_last::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday +year_month_weekday_last::weekday() const NOEXCEPT +{ + return wdl_.weekday(); +} + +CONSTCD11 +inline +weekday_last +year_month_weekday_last::weekday_last() const NOEXCEPT +{ + return wdl_; +} + +CONSTCD14 +inline +year_month_weekday_last::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_weekday_last::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD11 +inline +bool +year_month_weekday_last::ok() const NOEXCEPT +{ + return y_.ok() && m_.ok() && wdl_.ok(); +} + +CONSTCD14 +inline +days +year_month_weekday_last::to_days() const NOEXCEPT +{ + auto const d = sys_days(y_/m_/last); + return (d - (date::weekday{d} - wdl_.weekday())).time_since_epoch(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && + x.weekday_last() == y.weekday_last(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday_last& ymwdl) +{ + detail::low_level_fmt(os, ymwdl.year()) << '/'; + detail::low_level_fmt(os, ymwdl.month()) << '/'; + detail::low_level_fmt(os, ymwdl.weekday_last()); + if (!ymwdl.ok()) + os << " is not a valid year_month_weekday_last"; + return os; +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT +{ + return (ymwdl.year() / ymwdl.month() + dm) / ymwdl.weekday_last(); +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator+(const months& dm, const year_month_weekday_last& ymwdl) NOEXCEPT +{ + return ymwdl + dm; +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT +{ + return ymwdl + (-dm); +} + +CONSTCD11 +inline +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT +{ + return {ymwdl.year()+dy, ymwdl.month(), ymwdl.weekday_last()}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator+(const years& dy, const year_month_weekday_last& ymwdl) NOEXCEPT +{ + return ymwdl + dy; +} + +CONSTCD11 +inline +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT +{ + return ymwdl + (-dy); +} + +// year_month from operator/() + +CONSTCD11 +inline +year_month +operator/(const year& y, const month& m) NOEXCEPT +{ + return {y, m}; +} + +CONSTCD11 +inline +year_month +operator/(const year& y, int m) NOEXCEPT +{ + return y / month(static_cast(m)); +} + +// month_day from operator/() + +CONSTCD11 +inline +month_day +operator/(const month& m, const day& d) NOEXCEPT +{ + return {m, d}; +} + +CONSTCD11 +inline +month_day +operator/(const day& d, const month& m) NOEXCEPT +{ + return m / d; +} + +CONSTCD11 +inline +month_day +operator/(const month& m, int d) NOEXCEPT +{ + return m / day(static_cast(d)); +} + +CONSTCD11 +inline +month_day +operator/(int m, const day& d) NOEXCEPT +{ + return month(static_cast(m)) / d; +} + +CONSTCD11 inline month_day operator/(const day& d, int m) NOEXCEPT {return m / d;} + +// month_day_last from operator/() + +CONSTCD11 +inline +month_day_last +operator/(const month& m, last_spec) NOEXCEPT +{ + return month_day_last{m}; +} + +CONSTCD11 +inline +month_day_last +operator/(last_spec, const month& m) NOEXCEPT +{ + return m/last; +} + +CONSTCD11 +inline +month_day_last +operator/(int m, last_spec) NOEXCEPT +{ + return month(static_cast(m))/last; +} + +CONSTCD11 +inline +month_day_last +operator/(last_spec, int m) NOEXCEPT +{ + return m/last; +} + +// month_weekday from operator/() + +CONSTCD11 +inline +month_weekday +operator/(const month& m, const weekday_indexed& wdi) NOEXCEPT +{ + return {m, wdi}; +} + +CONSTCD11 +inline +month_weekday +operator/(const weekday_indexed& wdi, const month& m) NOEXCEPT +{ + return m / wdi; +} + +CONSTCD11 +inline +month_weekday +operator/(int m, const weekday_indexed& wdi) NOEXCEPT +{ + return month(static_cast(m)) / wdi; +} + +CONSTCD11 +inline +month_weekday +operator/(const weekday_indexed& wdi, int m) NOEXCEPT +{ + return m / wdi; +} + +// month_weekday_last from operator/() + +CONSTCD11 +inline +month_weekday_last +operator/(const month& m, const weekday_last& wdl) NOEXCEPT +{ + return {m, wdl}; +} + +CONSTCD11 +inline +month_weekday_last +operator/(const weekday_last& wdl, const month& m) NOEXCEPT +{ + return m / wdl; +} + +CONSTCD11 +inline +month_weekday_last +operator/(int m, const weekday_last& wdl) NOEXCEPT +{ + return month(static_cast(m)) / wdl; +} + +CONSTCD11 +inline +month_weekday_last +operator/(const weekday_last& wdl, int m) NOEXCEPT +{ + return m / wdl; +} + +// year_month_day from operator/() + +CONSTCD11 +inline +year_month_day +operator/(const year_month& ym, const day& d) NOEXCEPT +{ + return {ym.year(), ym.month(), d}; +} + +CONSTCD11 +inline +year_month_day +operator/(const year_month& ym, int d) NOEXCEPT +{ + return ym / day(static_cast(d)); +} + +CONSTCD11 +inline +year_month_day +operator/(const year& y, const month_day& md) NOEXCEPT +{ + return y / md.month() / md.day(); +} + +CONSTCD11 +inline +year_month_day +operator/(int y, const month_day& md) NOEXCEPT +{ + return year(y) / md; +} + +CONSTCD11 +inline +year_month_day +operator/(const month_day& md, const year& y) NOEXCEPT +{ + return y / md; +} + +CONSTCD11 +inline +year_month_day +operator/(const month_day& md, int y) NOEXCEPT +{ + return year(y) / md; +} + +// year_month_day_last from operator/() + +CONSTCD11 +inline +year_month_day_last +operator/(const year_month& ym, last_spec) NOEXCEPT +{ + return {ym.year(), month_day_last{ym.month()}}; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const year& y, const month_day_last& mdl) NOEXCEPT +{ + return {y, mdl}; +} + +CONSTCD11 +inline +year_month_day_last +operator/(int y, const month_day_last& mdl) NOEXCEPT +{ + return year(y) / mdl; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const month_day_last& mdl, const year& y) NOEXCEPT +{ + return y / mdl; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const month_day_last& mdl, int y) NOEXCEPT +{ + return year(y) / mdl; +} + +// year_month_weekday from operator/() + +CONSTCD11 +inline +year_month_weekday +operator/(const year_month& ym, const weekday_indexed& wdi) NOEXCEPT +{ + return {ym.year(), ym.month(), wdi}; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const year& y, const month_weekday& mwd) NOEXCEPT +{ + return {y, mwd.month(), mwd.weekday_indexed()}; +} + +CONSTCD11 +inline +year_month_weekday +operator/(int y, const month_weekday& mwd) NOEXCEPT +{ + return year(y) / mwd; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const month_weekday& mwd, const year& y) NOEXCEPT +{ + return y / mwd; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const month_weekday& mwd, int y) NOEXCEPT +{ + return year(y) / mwd; +} + +// year_month_weekday_last from operator/() + +CONSTCD11 +inline +year_month_weekday_last +operator/(const year_month& ym, const weekday_last& wdl) NOEXCEPT +{ + return {ym.year(), ym.month(), wdl}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const year& y, const month_weekday_last& mwdl) NOEXCEPT +{ + return {y, mwdl.month(), mwdl.weekday_last()}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(int y, const month_weekday_last& mwdl) NOEXCEPT +{ + return year(y) / mwdl; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const month_weekday_last& mwdl, const year& y) NOEXCEPT +{ + return y / mwdl; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const month_weekday_last& mwdl, int y) NOEXCEPT +{ + return year(y) / mwdl; +} + +template +struct fields; + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev = nullptr, + const std::chrono::seconds* offset_sec = nullptr); + +template +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr); + +// hh_mm_ss + +namespace detail +{ + +struct undocumented {explicit undocumented() = default;}; + +// width::value is the number of fractional decimal digits in 1/n +// width<0>::value and width<1>::value are defined to be 0 +// If 1/n takes more than 18 fractional decimal digits, +// the result is truncated to 19. +// Example: width<2>::value == 1 +// Example: width<3>::value == 19 +// Example: width<4>::value == 2 +// Example: width<10>::value == 1 +// Example: width<1000>::value == 3 +template +struct width +{ + static_assert(d > 0, "width called with zero denominator"); + static CONSTDATA unsigned value = 1 + width::value; +}; + +template +struct width +{ + static CONSTDATA unsigned value = 0; +}; + +template +struct static_pow10 +{ +private: + static CONSTDATA std::uint64_t h = static_pow10::value; +public: + static CONSTDATA std::uint64_t value = h * h * (exp % 2 ? 10 : 1); +}; + +template <> +struct static_pow10<0> +{ + static CONSTDATA std::uint64_t value = 1; +}; + +template +class decimal_format_seconds +{ + using CT = typename std::common_type::type; + using rep = typename CT::rep; + static unsigned CONSTDATA trial_width = + detail::width::value; +public: + static unsigned CONSTDATA width = trial_width < 19 ? trial_width : 6u; + using precision = std::chrono::duration::value>>; + +private: + std::chrono::seconds s_; + precision sub_s_; + +public: + CONSTCD11 decimal_format_seconds() + : s_() + , sub_s_() + {} + + CONSTCD11 explicit decimal_format_seconds(const Duration& d) NOEXCEPT + : s_(std::chrono::duration_cast(d)) + , sub_s_(std::chrono::duration_cast(d - s_)) + {} + + CONSTCD14 std::chrono::seconds& seconds() NOEXCEPT {return s_;} + CONSTCD11 std::chrono::seconds seconds() const NOEXCEPT {return s_;} + CONSTCD11 precision subseconds() const NOEXCEPT {return sub_s_;} + + CONSTCD14 precision to_duration() const NOEXCEPT + { + return s_ + sub_s_; + } + + CONSTCD11 bool in_conventional_range() const NOEXCEPT + { + return sub_s_ < std::chrono::seconds{1} && s_ < std::chrono::minutes{1}; + } + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, const decimal_format_seconds& x) + { + return x.print(os, std::chrono::treat_as_floating_point{}); + } + + template + std::basic_ostream& + print(std::basic_ostream& os, std::true_type) const + { + date::detail::save_ostream _(os); + std::chrono::duration d = s_ + sub_s_; + if (d < std::chrono::seconds{10}) + os << '0'; + os.precision(width+6); + os << std::fixed << d.count(); + return os; + } + + template + std::basic_ostream& + print(std::basic_ostream& os, std::false_type) const + { + date::detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << s_.count(); + if (width > 0) + { +#if !ONLY_C_LOCALE + os << std::use_facet>(os.getloc()).decimal_point(); +#else + os << '.'; +#endif + date::detail::save_ostream _s(os); + os.imbue(std::locale::classic()); + os.width(width); + os << sub_s_.count(); + } + return os; + } +}; + +template +inline +CONSTCD11 +typename std::enable_if + < + std::numeric_limits::is_signed, + std::chrono::duration + >::type +abs(std::chrono::duration d) +{ + return d >= d.zero() ? +d : -d; +} + +template +inline +CONSTCD11 +typename std::enable_if + < + !std::numeric_limits::is_signed, + std::chrono::duration + >::type +abs(std::chrono::duration d) +{ + return d; +} + +} // namespace detail + +template +class hh_mm_ss +{ + using dfs = detail::decimal_format_seconds::type>; + + std::chrono::hours h_; + std::chrono::minutes m_; + dfs s_; + bool neg_; + +public: + static unsigned CONSTDATA fractional_width = dfs::width; + using precision = typename dfs::precision; + + CONSTCD11 hh_mm_ss() NOEXCEPT + : hh_mm_ss(Duration::zero()) + {} + + CONSTCD11 explicit hh_mm_ss(Duration d) NOEXCEPT + : h_(std::chrono::duration_cast(detail::abs(d))) + , m_(std::chrono::duration_cast(detail::abs(d)) - h_) + , s_(detail::abs(d) - h_ - m_) + , neg_(d < Duration::zero()) + {} + + CONSTCD11 std::chrono::hours hours() const NOEXCEPT {return h_;} + CONSTCD11 std::chrono::minutes minutes() const NOEXCEPT {return m_;} + CONSTCD11 std::chrono::seconds seconds() const NOEXCEPT {return s_.seconds();} + CONSTCD14 std::chrono::seconds& + seconds(detail::undocumented) NOEXCEPT {return s_.seconds();} + CONSTCD11 precision subseconds() const NOEXCEPT {return s_.subseconds();} + CONSTCD11 bool is_negative() const NOEXCEPT {return neg_;} + + CONSTCD11 explicit operator precision() const NOEXCEPT {return to_duration();} + CONSTCD11 precision to_duration() const NOEXCEPT + {return (s_.to_duration() + m_ + h_) * (1-2*neg_);} + + CONSTCD11 bool in_conventional_range() const NOEXCEPT + { + return !neg_ && h_ < days{1} && m_ < std::chrono::hours{1} && + s_.in_conventional_range(); + } + +private: + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, hh_mm_ss const& tod) + { + if (tod.is_negative()) + os << '-'; + if (tod.h_ < std::chrono::hours{10}) + os << '0'; + os << tod.h_.count() << ':'; + if (tod.m_ < std::chrono::minutes{10}) + os << '0'; + os << tod.m_.count() << ':' << tod.s_; + return os; + } + + template + friend + std::basic_ostream& + date::to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev, + const std::chrono::seconds* offset_sec); + + template + friend + std::basic_istream& + date::from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, + std::basic_string* abbrev, std::chrono::minutes* offset); +}; + +inline +CONSTCD14 +bool +is_am(std::chrono::hours const& h) NOEXCEPT +{ + using std::chrono::hours; + return hours{0} <= h && h < hours{12}; +} + +inline +CONSTCD14 +bool +is_pm(std::chrono::hours const& h) NOEXCEPT +{ + using std::chrono::hours; + return hours{12} <= h && h < hours{24}; +} + +inline +CONSTCD14 +std::chrono::hours +make12(std::chrono::hours h) NOEXCEPT +{ + using std::chrono::hours; + if (h < hours{12}) + { + if (h == hours{0}) + h = hours{12}; + } + else + { + if (h != hours{12}) + h = h - hours{12}; + } + return h; +} + +inline +CONSTCD14 +std::chrono::hours +make24(std::chrono::hours h, bool is_pm) NOEXCEPT +{ + using std::chrono::hours; + if (is_pm) + { + if (h != hours{12}) + h = h + hours{12}; + } + else if (h == hours{12}) + h = hours{0}; + return h; +} + +template +using time_of_day = hh_mm_ss; + +template +CONSTCD11 +inline +hh_mm_ss> +make_time(const std::chrono::duration& d) +{ + return hh_mm_ss>(d); +} + +template +inline +typename std::enable_if +< + !std::is_convertible::value, + std::basic_ostream& +>::type +operator<<(std::basic_ostream& os, const sys_time& tp) +{ + auto const dp = date::floor(tp); + return os << year_month_day(dp) << ' ' << make_time(tp-dp); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const sys_days& dp) +{ + return os << year_month_day(dp); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const local_time& ut) +{ + return (os << sys_time{ut.time_since_epoch()}); +} + +namespace detail +{ + +template +class string_literal; + +template +inline +CONSTCD14 +string_literal::type, + N1 + N2 - 1> +operator+(const string_literal& x, const string_literal& y) NOEXCEPT; + +template +class string_literal +{ + CharT p_[N]; + + CONSTCD11 string_literal() NOEXCEPT + : p_{} + {} + +public: + using const_iterator = const CharT*; + + string_literal(string_literal const&) = default; + string_literal& operator=(string_literal const&) = delete; + + template ::type> + CONSTCD11 string_literal(CharT c) NOEXCEPT + : p_{c} + { + } + + template ::type> + CONSTCD11 string_literal(CharT c1, CharT c2) NOEXCEPT + : p_{c1, c2} + { + } + + template ::type> + CONSTCD11 string_literal(CharT c1, CharT c2, CharT c3) NOEXCEPT + : p_{c1, c2, c3} + { + } + + CONSTCD14 string_literal(const CharT(&a)[N]) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + template ::type> + CONSTCD14 string_literal(const char(&a)[N]) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + template ::value>::type> + CONSTCD14 string_literal(string_literal const& a) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + CONSTCD11 const CharT* data() const NOEXCEPT {return p_;} + CONSTCD11 std::size_t size() const NOEXCEPT {return N-1;} + + CONSTCD11 const_iterator begin() const NOEXCEPT {return p_;} + CONSTCD11 const_iterator end() const NOEXCEPT {return p_ + N-1;} + + CONSTCD11 CharT const& operator[](std::size_t n) const NOEXCEPT + { + return p_[n]; + } + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, const string_literal& s) + { + return os << s.p_; + } + + template + friend + CONSTCD14 + string_literal::type, + N1 + N2 - 1> + operator+(const string_literal& x, const string_literal& y) NOEXCEPT; +}; + +template +CONSTCD11 +inline +string_literal +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + return string_literal(x[0], y[0]); +} + +template +CONSTCD11 +inline +string_literal +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + return string_literal(x[0], x[1], y[0]); +} + +template +CONSTCD14 +inline +string_literal::type, + N1 + N2 - 1> +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + using CT = typename std::conditional::type; + + string_literal r; + std::size_t i = 0; + for (; i < N1-1; ++i) + r.p_[i] = CT(x.p_[i]); + for (std::size_t j = 0; j < N2; ++j, ++i) + r.p_[i] = CT(y.p_[j]); + + return r; +} + + +template +inline +std::basic_string +operator+(std::basic_string x, const string_literal& y) +{ + x.append(y.data(), y.size()); + return x; +} + +#if __cplusplus >= 201402 && (!defined(__EDG_VERSION__) || __EDG_VERSION__ > 411) \ + && (!defined(__SUNPRO_CC) || __SUNPRO_CC > 0x5150) + +template ::value || + std::is_same::value || + std::is_same::value || + std::is_same::value>> +CONSTCD14 +inline +string_literal +msl(CharT c) NOEXCEPT +{ + return string_literal{c}; +} + +CONSTCD14 +inline +std::size_t +to_string_len(std::intmax_t i) +{ + std::size_t r = 0; + do + { + i /= 10; + ++r; + } while (i > 0); + return r; +} + +template +CONSTCD14 +inline +std::enable_if_t +< + N < 10, + string_literal +> +msl() NOEXCEPT +{ + return msl(char(N % 10 + '0')); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + 10 <= N, + string_literal +> +msl() NOEXCEPT +{ + return msl() + msl(char(N % 10 + '0')); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + std::ratio::type::den != 1, + string_literal::type::num) + + to_string_len(std::ratio::type::den) + 4> +> +msl(std::ratio) NOEXCEPT +{ + using R = typename std::ratio::type; + return msl(CharT{'['}) + msl() + msl(CharT{'/'}) + + msl() + msl(CharT{']'}); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + std::ratio::type::den == 1, + string_literal::type::num) + 3> +> +msl(std::ratio) NOEXCEPT +{ + using R = typename std::ratio::type; + return msl(CharT{'['}) + msl() + msl(CharT{']'}); +} + + +#else // __cplusplus < 201402 || (defined(__EDG_VERSION__) && __EDG_VERSION__ <= 411) + +inline +std::string +to_string(std::uint64_t x) +{ + return std::to_string(x); +} + +template +inline +std::basic_string +to_string(std::uint64_t x) +{ + auto y = std::to_string(x); + return std::basic_string(y.begin(), y.end()); +} + +template +inline +typename std::enable_if +< + std::ratio::type::den != 1, + std::basic_string +>::type +msl(std::ratio) +{ + using R = typename std::ratio::type; + return std::basic_string(1, '[') + to_string(R::num) + CharT{'/'} + + to_string(R::den) + CharT{']'}; +} + +template +inline +typename std::enable_if +< + std::ratio::type::den == 1, + std::basic_string +>::type +msl(std::ratio) +{ + using R = typename std::ratio::type; + return std::basic_string(1, '[') + to_string(R::num) + CharT{']'}; +} + +#endif // __cplusplus < 201402 || (defined(__EDG_VERSION__) && __EDG_VERSION__ <= 411) + +template +CONSTCD11 +inline +string_literal +msl(std::atto) NOEXCEPT +{ + return string_literal{'a'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::femto) NOEXCEPT +{ + return string_literal{'f'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::pico) NOEXCEPT +{ + return string_literal{'p'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::nano) NOEXCEPT +{ + return string_literal{'n'}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + std::is_same::value, + string_literal +>::type +msl(std::micro) NOEXCEPT +{ + return string_literal{'\xC2', '\xB5'}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + !std::is_same::value, + string_literal +>::type +msl(std::micro) NOEXCEPT +{ + return string_literal{CharT{static_cast('\xB5')}}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::milli) NOEXCEPT +{ + return string_literal{'m'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::centi) NOEXCEPT +{ + return string_literal{'c'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::deca) NOEXCEPT +{ + return string_literal{'d', 'a'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::deci) NOEXCEPT +{ + return string_literal{'d'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::hecto) NOEXCEPT +{ + return string_literal{'h'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::kilo) NOEXCEPT +{ + return string_literal{'k'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::mega) NOEXCEPT +{ + return string_literal{'M'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::giga) NOEXCEPT +{ + return string_literal{'G'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::tera) NOEXCEPT +{ + return string_literal{'T'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::peta) NOEXCEPT +{ + return string_literal{'P'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::exa) NOEXCEPT +{ + return string_literal{'E'}; +} + +template +CONSTCD11 +inline +auto +get_units(Period p) + -> decltype(msl(p) + string_literal{'s'}) +{ + return msl(p) + string_literal{'s'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<1>) +{ + return string_literal{'s'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<3600>) +{ + return string_literal{'h'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<60>) +{ + return string_literal{'m', 'i', 'n'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<86400>) +{ + return string_literal{'d'}; +} + +template > +struct make_string; + +template <> +struct make_string +{ + template + static + std::string + from(Rep n) + { + return std::to_string(n); + } +}; + +template +struct make_string +{ + template + static + std::basic_string + from(Rep n) + { + auto s = std::to_string(n); + return std::basic_string(s.begin(), s.end()); + } +}; + +template <> +struct make_string +{ + template + static + std::wstring + from(Rep n) + { + return std::to_wstring(n); + } +}; + +template +struct make_string +{ + template + static + std::basic_string + from(Rep n) + { + auto s = std::to_wstring(n); + return std::basic_string(s.begin(), s.end()); + } +}; + +} // namespace detail + +// to_stream + +CONSTDATA year nanyear{-32768}; + +template +struct fields +{ + year_month_day ymd{nanyear/0/0}; + weekday wd{8u}; + hh_mm_ss tod{}; + bool has_tod = false; + +#if !defined(__clang__) && defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ <= 409) + fields() : ymd{nanyear/0/0}, wd{8u}, tod{}, has_tod{false} {} +#else + fields() = default; +#endif + + fields(year_month_day ymd_) : ymd(ymd_) {} + fields(weekday wd_) : wd(wd_) {} + fields(hh_mm_ss tod_) : tod(tod_), has_tod(true) {} + + fields(year_month_day ymd_, weekday wd_) : ymd(ymd_), wd(wd_) {} + fields(year_month_day ymd_, hh_mm_ss tod_) : ymd(ymd_), tod(tod_), + has_tod(true) {} + + fields(weekday wd_, hh_mm_ss tod_) : wd(wd_), tod(tod_), has_tod(true) {} + + fields(year_month_day ymd_, weekday wd_, hh_mm_ss tod_) + : ymd(ymd_) + , wd(wd_) + , tod(tod_) + , has_tod(true) + {} +}; + +namespace detail +{ + +template +unsigned +extract_weekday(std::basic_ostream& os, const fields& fds) +{ + if (!fds.ymd.ok() && !fds.wd.ok()) + { + // fds does not contain a valid weekday + os.setstate(std::ios::failbit); + return 8; + } + weekday wd; + if (fds.ymd.ok()) + { + wd = weekday{sys_days(fds.ymd)}; + if (fds.wd.ok() && wd != fds.wd) + { + // fds.ymd and fds.wd are inconsistent + os.setstate(std::ios::failbit); + return 8; + } + } + else + wd = fds.wd; + return static_cast((wd - Sunday).count()); +} + +template +unsigned +extract_month(std::basic_ostream& os, const fields& fds) +{ + if (!fds.ymd.month().ok()) + { + // fds does not contain a valid month + os.setstate(std::ios::failbit); + return 0; + } + return static_cast(fds.ymd.month()); +} + +} // namespace detail + +#if ONLY_C_LOCALE + +namespace detail +{ + +inline +std::pair +weekday_names() +{ + static const std::string nm[] = + { + "Sunday", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sun", + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +inline +std::pair +month_names() +{ + static const std::string nm[] = + { + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +inline +std::pair +ampm_names() +{ + static const std::string nm[] = + { + "AM", + "PM" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +template +FwdIter +scan_keyword(std::basic_istream& is, FwdIter kb, FwdIter ke) +{ + size_t nkw = static_cast(std::distance(kb, ke)); + const unsigned char doesnt_match = '\0'; + const unsigned char might_match = '\1'; + const unsigned char does_match = '\2'; + unsigned char statbuf[100]; + unsigned char* status = statbuf; + std::unique_ptr stat_hold(0, free); + if (nkw > sizeof(statbuf)) + { + status = (unsigned char*)std::malloc(nkw); + if (status == nullptr) + throw std::bad_alloc(); + stat_hold.reset(status); + } + size_t n_might_match = nkw; // At this point, any keyword might match + size_t n_does_match = 0; // but none of them definitely do + // Initialize all statuses to might_match, except for "" keywords are does_match + unsigned char* st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (!ky->empty()) + *st = might_match; + else + { + *st = does_match; + --n_might_match; + ++n_does_match; + } + } + // While there might be a match, test keywords against the next CharT + for (size_t indx = 0; is && n_might_match > 0; ++indx) + { + // Peek at the next CharT but don't consume it + auto ic = is.peek(); + if (ic == EOF) + { + is.setstate(std::ios::eofbit); + break; + } + auto c = static_cast(toupper(static_cast(ic))); + bool consume = false; + // For each keyword which might match, see if the indx character is c + // If a match if found, consume c + // If a match is found, and that is the last character in the keyword, + // then that keyword matches. + // If the keyword doesn't match this character, then change the keyword + // to doesn't match + st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (*st == might_match) + { + if (c == static_cast(toupper(static_cast((*ky)[indx])))) + { + consume = true; + if (ky->size() == indx+1) + { + *st = does_match; + --n_might_match; + ++n_does_match; + } + } + else + { + *st = doesnt_match; + --n_might_match; + } + } + } + // consume if we matched a character + if (consume) + { + (void)is.get(); + // If we consumed a character and there might be a matched keyword that + // was marked matched on a previous iteration, then such keywords + // are now marked as not matching. + if (n_might_match + n_does_match > 1) + { + st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (*st == does_match && ky->size() != indx+1) + { + *st = doesnt_match; + --n_does_match; + } + } + } + } + } + // We've exited the loop because we hit eof and/or we have no more "might matches". + // Return the first matching result + for (st = status; kb != ke; ++kb, ++st) + if (*st == does_match) + break; + if (kb == ke) + is.setstate(std::ios::failbit); + return kb; +} + +} // namespace detail + +#endif // ONLY_C_LOCALE + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev, + const std::chrono::seconds* offset_sec) +{ +#if ONLY_C_LOCALE + using detail::weekday_names; + using detail::month_names; + using detail::ampm_names; +#endif + using detail::save_ostream; + using detail::get_units; + using detail::extract_weekday; + using detail::extract_month; + using std::ios; + using std::chrono::duration_cast; + using std::chrono::seconds; + using std::chrono::minutes; + using std::chrono::hours; + date::detail::save_ostream ss(os); + os.fill(' '); + os.flags(std::ios::skipws | std::ios::dec); + os.width(0); + tm tm{}; + bool insert_negative = fds.has_tod && fds.tod.to_duration() < Duration::zero(); +#if !ONLY_C_LOCALE + auto& facet = std::use_facet>(os.getloc()); +#endif + const CharT* command = nullptr; + CharT modified = CharT{}; + for (; *fmt; ++fmt) + { + switch (*fmt) + { + case 'a': + case 'A': + if (command) + { + if (modified == CharT{}) + { + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else // ONLY_C_LOCALE + os << weekday_names().first[tm.tm_wday+7*(*fmt == 'a')]; +#endif // ONLY_C_LOCALE + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'b': + case 'B': + case 'h': + if (command) + { + if (modified == CharT{}) + { + tm.tm_mon = static_cast(extract_month(os, fds)) - 1; +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else // ONLY_C_LOCALE + os << month_names().first[tm.tm_mon+12*(*fmt != 'B')]; +#endif // ONLY_C_LOCALE + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'c': + case 'x': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + if (*fmt == 'c' && !fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + tm = std::tm{}; + auto const& ymd = fds.ymd; + auto ld = local_days(ymd); + if (*fmt == 'c') + { + tm.tm_sec = static_cast(fds.tod.seconds().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_hour = static_cast(fds.tod.hours().count()); + } + tm.tm_mday = static_cast(static_cast(ymd.day())); + tm.tm_mon = static_cast(extract_month(os, fds) - 1); + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + CharT f[3] = {'%'}; + auto fe = std::begin(f) + 1; + if (modified == CharT{'E'}) + *fe++ = modified; + *fe++ = *fmt; + facet.put(os, os, os.fill(), &tm, std::begin(f), fe); +#else // ONLY_C_LOCALE + if (*fmt == 'c') + { + auto wd = static_cast(extract_weekday(os, fds)); + os << weekday_names().first[static_cast(wd)+7] + << ' '; + os << month_names().first[extract_month(os, fds)-1+12] << ' '; + auto d = static_cast(static_cast(fds.ymd.day())); + if (d < 10) + os << ' '; + os << d << ' ' + << make_time(duration_cast(fds.tod.to_duration())) + << ' ' << fds.ymd.year(); + + } + else // *fmt == 'x' + { + auto const& ymd = fds.ymd; + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(ymd.month()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.day()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.year()) % 100; + } +#endif // ONLY_C_LOCALE + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'C': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = static_cast(fds.ymd.year()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + if (y >= 0) + { + os.width(2); + os << y/100; + } + else + { + os << CharT{'-'}; + os.width(2); + os << -(y-99)/100; + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + tm.tm_year = y - 1900; + CharT f[3] = {'%', 'E', 'C'}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'd': + case 'e': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.day().ok()) + os.setstate(std::ios::failbit); + auto d = static_cast(static_cast(fds.ymd.day())); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + if (*fmt == CharT{'d'}) + os.fill('0'); + else + os.fill(' '); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << d; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + tm.tm_mday = d; + CharT f[3] = {'%', 'O', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'D': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto const& ymd = fds.ymd; + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(ymd.month()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.day()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.year()) % 100; + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'F': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto const& ymd = fds.ymd; + save_ostream _(os); + os.imbue(std::locale::classic()); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(4); + os << static_cast(ymd.year()) << CharT{'-'}; + os.width(2); + os << static_cast(ymd.month()) << CharT{'-'}; + os.width(2); + os << static_cast(ymd.day()); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'g': + case 'G': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(fds.ymd); + auto y = year_month_day{ld + days{3}}.year(); + auto start = local_days((y-years{1})/December/Thursday[last]) + + (Monday-Thursday); + if (ld < start) + --y; + if (*fmt == CharT{'G'}) + os << y; + else + { + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << std::abs(static_cast(y)) % 100; + } + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'H': + case 'I': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } + auto hms = fds.tod; +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto h = *fmt == CharT{'I'} ? date::make12(hms.hours()) : hms.hours(); + if (h < hours{10}) + os << CharT{'0'}; + os << h.count(); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_hour = static_cast(hms.hours().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'j': + if (command) + { + if (modified == CharT{}) + { + if (fds.ymd.ok() || fds.has_tod) + { + days doy; + if (fds.ymd.ok()) + { + auto ld = local_days(fds.ymd); + auto y = fds.ymd.year(); + doy = ld - local_days(y/January/1) + days{1}; + } + else + { + doy = duration_cast(fds.tod.to_duration()); + } + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(3); + os << doy.count(); + } + else + { + os.setstate(std::ios::failbit); + } + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'm': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.month().ok()) + os.setstate(std::ios::failbit); + auto m = static_cast(fds.ymd.month()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + if (m < 10) + os << CharT{'0'}; + os << m; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_mon = static_cast(m-1); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'M': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + if (fds.tod.minutes() < minutes{10}) + os << CharT{'0'}; + os << fds.tod.minutes().count(); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_min = static_cast(fds.tod.minutes().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'n': + if (command) + { + if (modified == CharT{}) + os << CharT{'\n'}; + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'p': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + tm.tm_hour = static_cast(fds.tod.hours().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else + if (date::is_am(fds.tod.hours())) + os << ampm_names().first[0]; + else + os << ampm_names().first[1]; +#endif + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'Q': + case 'q': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + auto d = fds.tod.to_duration(); + if (*fmt == 'q') + os << get_units(typename decltype(d)::period::type{}); + else + os << d.count(); + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'r': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + tm.tm_hour = static_cast(fds.tod.hours().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_sec = static_cast(fds.tod.seconds().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else + hh_mm_ss tod(duration_cast(fds.tod.to_duration())); + save_ostream _(os); + os.fill('0'); + os.width(2); + os << date::make12(tod.hours()).count() << CharT{':'}; + os.width(2); + os << tod.minutes().count() << CharT{':'}; + os.width(2); + os << tod.seconds().count() << CharT{' '}; + if (date::is_am(tod.hours())) + os << ampm_names().first[0]; + else + os << ampm_names().first[1]; +#endif + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'R': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (fds.tod.hours() < hours{10}) + os << CharT{'0'}; + os << fds.tod.hours().count() << CharT{':'}; + if (fds.tod.minutes() < minutes{10}) + os << CharT{'0'}; + os << fds.tod.minutes().count(); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'S': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + os << fds.tod.s_; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_sec = static_cast(fds.tod.s_.seconds().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 't': + if (command) + { + if (modified == CharT{}) + os << CharT{'\t'}; + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'T': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + os << fds.tod; + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'u': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto wd = extract_weekday(os, fds); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + os << (wd != 0 ? wd : 7u); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_wday = static_cast(wd); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'U': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto const& ymd = fds.ymd; + if (!ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto st = local_days(Sunday[1]/January/ymd.year()); + if (ld < st) + os << CharT{'0'} << CharT{'0'}; + else + { + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } + } + #if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'V': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(fds.ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto y = year_month_day{ld + days{3}}.year(); + auto st = local_days((y-years{1})/12/Thursday[last]) + + (Monday-Thursday); + if (ld < st) + { + --y; + st = local_days((y - years{1})/12/Thursday[last]) + + (Monday-Thursday); + } + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + auto const& ymd = fds.ymd; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'w': + if (command) + { + auto wd = extract_weekday(os, fds); + if (os.fail()) + return os; +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + os << wd; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_wday = static_cast(wd); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'W': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto const& ymd = fds.ymd; + if (!ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto st = local_days(Monday[1]/January/ymd.year()); + if (ld < st) + os << CharT{'0'} << CharT{'0'}; + else + { + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'X': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + tm = std::tm{}; + tm.tm_sec = static_cast(fds.tod.seconds().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_hour = static_cast(fds.tod.hours().count()); + CharT f[3] = {'%'}; + auto fe = std::begin(f) + 1; + if (modified == CharT{'E'}) + *fe++ = modified; + *fe++ = *fmt; + facet.put(os, os, os.fill(), &tm, std::begin(f), fe); +#else + os << fds.tod; +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'y': + if (command) + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = static_cast(fds.ymd.year()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) + { +#endif + y = std::abs(y) % 100; + if (y < 10) + os << CharT{'0'}; + os << y; +#if !ONLY_C_LOCALE + } + else + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = y - 1900; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'Y': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = fds.ymd.year(); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + os.imbue(std::locale::classic()); + os << y; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(y) - 1900; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'z': + if (command) + { + if (offset_sec == nullptr) + { + // Can not format %z with unknown offset + os.setstate(ios::failbit); + return os; + } + auto m = duration_cast(*offset_sec); + auto neg = m < minutes{0}; + m = date::abs(m); + auto h = duration_cast(m); + m -= h; + if (neg) + os << CharT{'-'}; + else + os << CharT{'+'}; + if (h < hours{10}) + os << CharT{'0'}; + os << h.count(); + if (modified != CharT{}) + os << CharT{':'}; + if (m < minutes{10}) + os << CharT{'0'}; + os << m.count(); + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'Z': + if (command) + { + if (modified == CharT{}) + { + if (abbrev == nullptr) + { + // Can not format %Z with unknown time_zone + os.setstate(ios::failbit); + return os; + } + for (auto c : *abbrev) + os << CharT(c); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'E': + case 'O': + if (command) + { + if (modified == CharT{}) + { + modified = *fmt; + } + else + { + os << CharT{'%'} << modified << *fmt; + command = nullptr; + modified = CharT{}; + } + } + else + os << *fmt; + break; + case '%': + if (command) + { + if (modified == CharT{}) + { + os << CharT{'%'}; + command = nullptr; + } + else + { + os << CharT{'%'} << modified << CharT{'%'}; + command = nullptr; + modified = CharT{}; + } + } + else + command = fmt; + break; + default: + if (command) + { + os << CharT{'%'}; + command = nullptr; + } + if (modified != CharT{}) + { + os << modified; + modified = CharT{}; + } + os << *fmt; + break; + } + } + if (command) + os << CharT{'%'}; + if (modified != CharT{}) + os << modified; + return os; +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const year& y) +{ + using CT = std::chrono::seconds; + fields fds{y/0/0}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const month& m) +{ + using CT = std::chrono::seconds; + fields fds{m/0/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const day& d) +{ + using CT = std::chrono::seconds; + fields fds{d/0/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const weekday& wd) +{ + using CT = std::chrono::seconds; + fields fds{wd}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const year_month& ym) +{ + using CT = std::chrono::seconds; + fields fds{ym/0}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const month_day& md) +{ + using CT = std::chrono::seconds; + fields fds{md/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const year_month_day& ymd) +{ + using CT = std::chrono::seconds; + fields fds{ymd}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const std::chrono::duration& d) +{ + using Duration = std::chrono::duration; + using CT = typename std::common_type::type; + fields fds{hh_mm_ss{d}}; + return to_stream(os, fmt, fds); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const local_time& tp, const std::string* abbrev = nullptr, + const std::chrono::seconds* offset_sec = nullptr) +{ + using CT = typename std::common_type::type; + auto ld = std::chrono::time_point_cast(tp); + fields fds; + if (ld <= tp) + fds = fields{year_month_day{ld}, hh_mm_ss{tp-local_seconds{ld}}}; + else + fds = fields{year_month_day{ld - days{1}}, + hh_mm_ss{days{1} - (local_seconds{ld} - tp)}}; + return to_stream(os, fmt, fds, abbrev, offset_sec); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const sys_time& tp) +{ + using std::chrono::seconds; + using CT = typename std::common_type::type; + const std::string abbrev("UTC"); + CONSTDATA seconds offset{0}; + auto sd = std::chrono::time_point_cast(tp); + fields fds; + if (sd <= tp) + fds = fields{year_month_day{sd}, hh_mm_ss{tp-sys_seconds{sd}}}; + else + fds = fields{year_month_day{sd - days{1}}, + hh_mm_ss{days{1} - (sys_seconds{sd} - tp)}}; + return to_stream(os, fmt, fds, &abbrev, &offset); +} + +// format + +template +auto +format(const std::locale& loc, const CharT* fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt, tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + os.imbue(loc); + to_stream(os, fmt, tp); + return os.str(); +} + +template +auto +format(const CharT* fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt, tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + to_stream(os, fmt, tp); + return os.str(); +} + +template +auto +format(const std::locale& loc, const std::basic_string& fmt, + const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt.c_str(), tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + os.imbue(loc); + to_stream(os, fmt.c_str(), tp); + return os.str(); +} + +template +auto +format(const std::basic_string& fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt.c_str(), tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + to_stream(os, fmt.c_str(), tp); + return os.str(); +} + +// parse + +namespace detail +{ + +template +bool +read_char(std::basic_istream& is, CharT fmt, std::ios::iostate& err) +{ + auto ic = is.get(); + if (Traits::eq_int_type(ic, Traits::eof()) || + !Traits::eq(Traits::to_char_type(ic), fmt)) + { + err |= std::ios::failbit; + is.setstate(std::ios::failbit); + return false; + } + return true; +} + +template +unsigned +read_unsigned(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + unsigned x = 0; + unsigned count = 0; + while (true) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + break; + auto c = static_cast(Traits::to_char_type(ic)); + if (!('0' <= c && c <= '9')) + break; + (void)is.get(); + ++count; + x = 10*x + static_cast(c - '0'); + if (count == M) + break; + } + if (count < m) + is.setstate(std::ios::failbit); + return x; +} + +template +int +read_signed(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + auto ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (('0' <= c && c <= '9') || c == '-' || c == '+') + { + if (c == '-' || c == '+') + (void)is.get(); + auto x = static_cast(read_unsigned(is, std::max(m, 1u), M)); + if (!is.fail()) + { + if (c == '-') + x = -x; + return x; + } + } + } + if (m > 0) + is.setstate(std::ios::failbit); + return 0; +} + +template +long double +read_long_double(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + unsigned count = 0; + unsigned fcount = 0; + unsigned long long i = 0; + unsigned long long f = 0; + bool parsing_fraction = false; +#if ONLY_C_LOCALE + typename Traits::int_type decimal_point = '.'; +#else + auto decimal_point = Traits::to_int_type( + std::use_facet>(is.getloc()).decimal_point()); +#endif + while (true) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + break; + if (Traits::eq_int_type(ic, decimal_point)) + { + decimal_point = Traits::eof(); + parsing_fraction = true; + } + else + { + auto c = static_cast(Traits::to_char_type(ic)); + if (!('0' <= c && c <= '9')) + break; + if (!parsing_fraction) + { + i = 10*i + static_cast(c - '0'); + } + else + { + f = 10*f + static_cast(c - '0'); + ++fcount; + } + } + (void)is.get(); + if (++count == M) + break; + } + if (count < m) + { + is.setstate(std::ios::failbit); + return 0; + } + return static_cast(i) + static_cast(f)/std::pow(10.L, fcount); +} + +struct rs +{ + int& i; + unsigned m; + unsigned M; +}; + +struct ru +{ + int& i; + unsigned m; + unsigned M; +}; + +struct rld +{ + long double& i; + unsigned m; + unsigned M; +}; + +template +void +read(std::basic_istream&) +{ +} + +template +void +read(std::basic_istream& is, CharT a0, Args&& ...args); + +template +void +read(std::basic_istream& is, rs a0, Args&& ...args); + +template +void +read(std::basic_istream& is, ru a0, Args&& ...args); + +template +void +read(std::basic_istream& is, int a0, Args&& ...args); + +template +void +read(std::basic_istream& is, rld a0, Args&& ...args); + +template +void +read(std::basic_istream& is, CharT a0, Args&& ...args) +{ + // No-op if a0 == CharT{} + if (a0 != CharT{}) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + { + is.setstate(std::ios::failbit | std::ios::eofbit); + return; + } + if (!Traits::eq(Traits::to_char_type(ic), a0)) + { + is.setstate(std::ios::failbit); + return; + } + (void)is.get(); + } + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, rs a0, Args&& ...args) +{ + auto x = read_signed(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = x; + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, ru a0, Args&& ...args) +{ + auto x = read_unsigned(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = static_cast(x); + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, int a0, Args&& ...args) +{ + if (a0 != -1) + { + auto u = static_cast(a0); + CharT buf[std::numeric_limits::digits10+2u] = {}; + auto e = buf; + do + { + *e++ = static_cast(CharT(u % 10) + CharT{'0'}); + u /= 10; + } while (u > 0); + std::reverse(buf, e); + for (auto p = buf; p != e && is.rdstate() == std::ios::goodbit; ++p) + read(is, *p); + } + if (is.rdstate() == std::ios::goodbit) + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, rld a0, Args&& ...args) +{ + auto x = read_long_double(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = x; + read(is, std::forward(args)...); +} + +template +inline +void +checked_set(T& value, T from, T not_a_value, std::basic_ios& is) +{ + if (!is.fail()) + { + if (value == not_a_value) + value = std::move(from); + else if (value != from) + is.setstate(std::ios::failbit); + } +} + +} // namespace detail; + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, std::basic_string* abbrev, + std::chrono::minutes* offset) +{ + using std::numeric_limits; + using std::ios; + using std::chrono::duration; + using std::chrono::duration_cast; + using std::chrono::seconds; + using std::chrono::minutes; + using std::chrono::hours; + using detail::round_i; + typename std::basic_istream::sentry ok{is, true}; + if (ok) + { + date::detail::save_istream ss(is); + is.fill(' '); + is.flags(std::ios::skipws | std::ios::dec); + is.width(0); +#if !ONLY_C_LOCALE + auto& f = std::use_facet>(is.getloc()); + std::tm tm{}; +#endif + const CharT* command = nullptr; + auto modified = CharT{}; + auto width = -1; + + CONSTDATA int not_a_year = numeric_limits::min(); + CONSTDATA int not_a_2digit_year = 100; + CONSTDATA int not_a_century = not_a_year / 100; + CONSTDATA int not_a_month = 0; + CONSTDATA int not_a_day = 0; + CONSTDATA int not_a_hour = numeric_limits::min(); + CONSTDATA int not_a_hour_12_value = 0; + CONSTDATA int not_a_minute = not_a_hour; + CONSTDATA Duration not_a_second = Duration::min(); + CONSTDATA int not_a_doy = -1; + CONSTDATA int not_a_weekday = 8; + CONSTDATA int not_a_week_num = 100; + CONSTDATA int not_a_ampm = -1; + CONSTDATA minutes not_a_offset = minutes::min(); + + int Y = not_a_year; // c, F, Y * + int y = not_a_2digit_year; // D, x, y * + int g = not_a_2digit_year; // g * + int G = not_a_year; // G * + int C = not_a_century; // C * + int m = not_a_month; // b, B, h, m, c, D, F, x * + int d = not_a_day; // c, d, D, e, F, x * + int j = not_a_doy; // j * + int wd = not_a_weekday; // a, A, u, w * + int H = not_a_hour; // c, H, R, T, X * + int I = not_a_hour_12_value; // I, r * + int p = not_a_ampm; // p, r * + int M = not_a_minute; // c, M, r, R, T, X * + Duration s = not_a_second; // c, r, S, T, X * + int U = not_a_week_num; // U * + int V = not_a_week_num; // V * + int W = not_a_week_num; // W * + std::basic_string temp_abbrev; // Z * + minutes temp_offset = not_a_offset; // z * + + using detail::read; + using detail::rs; + using detail::ru; + using detail::rld; + using detail::checked_set; + for (; *fmt != CharT{} && !is.fail(); ++fmt) + { + switch (*fmt) + { + case 'a': + case 'A': + case 'u': + case 'w': // wd: a, A, u, w + if (command) + { + int trial_wd = not_a_weekday; + if (*fmt == 'a' || *fmt == 'A') + { + if (modified == CharT{}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (!is.fail()) + trial_wd = tm.tm_wday; +#else + auto nm = detail::weekday_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + if (!is.fail()) + trial_wd = i % 7; +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + } + else // *fmt == 'u' || *fmt == 'w' + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + read(is, ru{trial_wd, 1, width == -1 ? + 1u : static_cast(width)}); + if (!is.fail()) + { + if (*fmt == 'u') + { + if (!(1 <= trial_wd && trial_wd <= 7)) + { + trial_wd = not_a_weekday; + is.setstate(ios::failbit); + } + else if (trial_wd == 7) + trial_wd = 0; + } + else // *fmt == 'w' + { + if (!(0 <= trial_wd && trial_wd <= 6)) + { + trial_wd = not_a_weekday; + is.setstate(ios::failbit); + } + } + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (!is.fail()) + trial_wd = tm.tm_wday; + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + } + if (trial_wd != not_a_weekday) + checked_set(wd, trial_wd, not_a_weekday, is); + } + else // !command + read(is, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + break; + case 'b': + case 'B': + case 'h': + if (command) + { + if (modified == CharT{}) + { + int ttm = not_a_month; +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + ttm = tm.tm_mon + 1; + is.setstate(err); +#else + auto nm = detail::month_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + if (!is.fail()) + ttm = i % 12 + 1; +#endif + checked_set(m, ttm, not_a_month, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'c': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + checked_set(m, tm.tm_mon + 1, not_a_month, is); + checked_set(d, tm.tm_mday, not_a_day, is); + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_minute, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%a %b %e %T %Y" + auto nm = detail::weekday_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(wd, static_cast(i % 7), not_a_weekday, is); + ws(is); + nm = detail::month_names(); + i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(m, static_cast(i % 12 + 1), not_a_month, is); + ws(is); + int td = not_a_day; + read(is, rs{td, 1, 2}); + checked_set(d, td, not_a_day, is); + ws(is); + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH; + int tM; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + ws(is); + int tY = not_a_year; + read(is, rs{tY, 1, 4u}); + checked_set(Y, tY, not_a_year, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'x': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + checked_set(m, tm.tm_mon + 1, not_a_month, is); + checked_set(d, tm.tm_mday, not_a_day, is); + } + is.setstate(err); +#else + // "%m/%d/%y" + int ty = not_a_2digit_year; + int tm = not_a_month; + int td = not_a_day; + read(is, ru{tm, 1, 2}, CharT{'/'}, ru{td, 1, 2}, CharT{'/'}, + rs{ty, 1, 2}); + checked_set(y, ty, not_a_2digit_year, is); + checked_set(m, tm, not_a_month, is); + checked_set(d, td, not_a_day, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'X': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_minute, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%T" + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH = not_a_hour; + int tM = not_a_minute; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'C': + if (command) + { + int tC = not_a_century; +#if !ONLY_C_LOCALE + if (modified == CharT{}) + { +#endif + read(is, rs{tC, 1, width == -1 ? 2u : static_cast(width)}); +#if !ONLY_C_LOCALE + } + else + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + auto tY = tm.tm_year + 1900; + tC = (tY >= 0 ? tY : tY-99) / 100; + } + is.setstate(err); + } +#endif + checked_set(C, tC, not_a_century, is); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'D': + if (command) + { + if (modified == CharT{}) + { + int tn = not_a_month; + int td = not_a_day; + int ty = not_a_2digit_year; + read(is, ru{tn, 1, 2}, CharT{'\0'}, CharT{'/'}, CharT{'\0'}, + ru{td, 1, 2}, CharT{'\0'}, CharT{'/'}, CharT{'\0'}, + rs{ty, 1, 2}); + checked_set(y, ty, not_a_2digit_year, is); + checked_set(m, tn, not_a_month, is); + checked_set(d, td, not_a_day, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'F': + if (command) + { + if (modified == CharT{}) + { + int tY = not_a_year; + int tn = not_a_month; + int td = not_a_day; + read(is, rs{tY, 1, width == -1 ? 4u : static_cast(width)}, + CharT{'-'}, ru{tn, 1, 2}, CharT{'-'}, ru{td, 1, 2}); + checked_set(Y, tY, not_a_year, is); + checked_set(m, tn, not_a_month, is); + checked_set(d, td, not_a_day, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'd': + case 'e': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int td = not_a_day; + read(is, rs{td, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(d, td, not_a_day, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + command = nullptr; + width = -1; + modified = CharT{}; + if ((err & ios::failbit) == 0) + checked_set(d, tm.tm_mday, not_a_day, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'H': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tH = not_a_hour; + read(is, ru{tH, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(H, tH, not_a_hour, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(H, tm.tm_hour, not_a_hour, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'I': + if (command) + { + if (modified == CharT{}) + { + int tI = not_a_hour_12_value; + // reads in an hour into I, but most be in [1, 12] + read(is, rs{tI, 1, width == -1 ? 2u : static_cast(width)}); + if (!(1 <= tI && tI <= 12)) + is.setstate(ios::failbit); + checked_set(I, tI, not_a_hour_12_value, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'j': + if (command) + { + if (modified == CharT{}) + { + int tj = not_a_doy; + read(is, ru{tj, 1, width == -1 ? 3u : static_cast(width)}); + checked_set(j, tj, not_a_doy, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'M': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tM = not_a_minute; + read(is, ru{tM, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(M, tM, not_a_minute, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(M, tm.tm_min, not_a_minute, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'm': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tn = not_a_month; + read(is, rs{tn, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(m, tn, not_a_month, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(m, tm.tm_mon + 1, not_a_month, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'n': + case 't': + if (command) + { + if (modified == CharT{}) + { + // %n matches a single white space character + // %t matches 0 or 1 white space characters + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + { + ios::iostate err = ios::eofbit; + if (*fmt == 'n') + err |= ios::failbit; + is.setstate(err); + break; + } + if (isspace(ic)) + { + (void)is.get(); + } + else if (*fmt == 'n') + is.setstate(ios::failbit); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'p': + if (command) + { + if (modified == CharT{}) + { + int tp = not_a_ampm; +#if !ONLY_C_LOCALE + tm = std::tm{}; + tm.tm_hour = 1; + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (tm.tm_hour == 1) + tp = 0; + else if (tm.tm_hour == 13) + tp = 1; + else + is.setstate(err); +#else + auto nm = detail::ampm_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + tp = static_cast(i); +#endif + checked_set(p, tp, not_a_ampm, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + + break; + case 'r': + if (command) + { + if (modified == CharT{}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_hour, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%I:%M:%S %p" + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + long double S{}; + int tI = not_a_hour_12_value; + int tM = not_a_minute; + read(is, ru{tI, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(I, tI, not_a_hour_12_value, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + ws(is); + auto nm = detail::ampm_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(p, static_cast(i), not_a_ampm, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'R': + if (command) + { + if (modified == CharT{}) + { + int tH = not_a_hour; + int tM = not_a_minute; + read(is, ru{tH, 1, 2}, CharT{'\0'}, CharT{':'}, CharT{'\0'}, + ru{tM, 1, 2}, CharT{'\0'}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'S': + if (command) + { + #if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + long double S{}; + read(is, rld{S, 1, width == -1 ? w : static_cast(width)}); + checked_set(s, round_i(duration{S}), + not_a_second, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'T': + if (command) + { + if (modified == CharT{}) + { + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH = not_a_hour; + int tM = not_a_minute; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'Y': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'O'}) +#endif + { + int tY = not_a_year; + read(is, rs{tY, 1, width == -1 ? 4u : static_cast(width)}); + checked_set(Y, tY, not_a_year, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'y': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + int ty = not_a_2digit_year; + read(is, ru{ty, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(y, ty, not_a_2digit_year, is); + } +#if !ONLY_C_LOCALE + else + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + is.setstate(err); + } +#endif + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'g': + if (command) + { + if (modified == CharT{}) + { + int tg = not_a_2digit_year; + read(is, ru{tg, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(g, tg, not_a_2digit_year, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'G': + if (command) + { + if (modified == CharT{}) + { + int tG = not_a_year; + read(is, rs{tG, 1, width == -1 ? 4u : static_cast(width)}); + checked_set(G, tG, not_a_year, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'U': + if (command) + { + if (modified == CharT{}) + { + int tU = not_a_week_num; + read(is, ru{tU, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(U, tU, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'V': + if (command) + { + if (modified == CharT{}) + { + int tV = not_a_week_num; + read(is, ru{tV, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(V, tV, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'W': + if (command) + { + if (modified == CharT{}) + { + int tW = not_a_week_num; + read(is, ru{tW, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(W, tW, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'E': + case 'O': + if (command) + { + if (modified == CharT{}) + { + modified = *fmt; + } + else + { + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + } + else + read(is, *fmt); + break; + case '%': + if (command) + { + if (modified == CharT{}) + read(is, *fmt); + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + command = fmt; + break; + case 'z': + if (command) + { + int tH, tM; + minutes toff = not_a_offset; + bool neg = false; + auto ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (c == '-') + neg = true; + } + if (modified == CharT{}) + { + read(is, rs{tH, 2, 2}); + if (!is.fail()) + toff = hours{std::abs(tH)}; + if (is.good()) + { + ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if ('0' <= c && c <= '9') + { + read(is, ru{tM, 2, 2}); + if (!is.fail()) + toff += minutes{tM}; + } + } + } + } + else + { + read(is, rs{tH, 1, 2}); + if (!is.fail()) + toff = hours{std::abs(tH)}; + if (is.good()) + { + ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (c == ':') + { + (void)is.get(); + read(is, ru{tM, 2, 2}); + if (!is.fail()) + toff += minutes{tM}; + } + } + } + } + if (neg) + toff = -toff; + checked_set(temp_offset, toff, not_a_offset, is); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'Z': + if (command) + { + if (modified == CharT{}) + { + std::basic_string buf; + while (is.rdstate() == std::ios::goodbit) + { + auto i = is.rdbuf()->sgetc(); + if (Traits::eq_int_type(i, Traits::eof())) + { + is.setstate(ios::eofbit); + break; + } + auto wc = Traits::to_char_type(i); + auto c = static_cast(wc); + // is c a valid time zone name or abbreviation character? + if (!(CharT{1} < wc && wc < CharT{127}) || !(isalnum(c) || + c == '_' || c == '/' || c == '-' || c == '+')) + break; + buf.push_back(c); + is.rdbuf()->sbumpc(); + } + if (buf.empty()) + is.setstate(ios::failbit); + checked_set(temp_abbrev, buf, {}, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + default: + if (command) + { + if (width == -1 && modified == CharT{} && '0' <= *fmt && *fmt <= '9') + { + width = static_cast(*fmt) - '0'; + while ('0' <= fmt[1] && fmt[1] <= '9') + width = 10*width + static_cast(*++fmt) - '0'; + } + else + { + if (modified == CharT{}) + read(is, CharT{'%'}, width, *fmt); + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + } + else // !command + { + if (isspace(static_cast(*fmt))) + { + // space matches 0 or more white space characters + if (is.good()) + ws(is); + } + else + read(is, *fmt); + } + break; + } + } + // is.fail() || *fmt == CharT{} + if (is.rdstate() == ios::goodbit && command) + { + if (modified == CharT{}) + read(is, CharT{'%'}, width); + else + read(is, CharT{'%'}, width, modified); + } + if (!is.fail()) + { + if (y != not_a_2digit_year) + { + // Convert y and an optional C to Y + if (!(0 <= y && y <= 99)) + goto broken; + if (C == not_a_century) + { + if (Y == not_a_year) + { + if (y >= 69) + C = 19; + else + C = 20; + } + else + { + C = (Y >= 0 ? Y : Y-100) / 100; + } + } + int tY; + if (C >= 0) + tY = 100*C + y; + else + tY = 100*(C+1) - (y == 0 ? 100 : y); + if (Y != not_a_year && Y != tY) + goto broken; + Y = tY; + } + if (g != not_a_2digit_year) + { + // Convert g and an optional C to G + if (!(0 <= g && g <= 99)) + goto broken; + if (C == not_a_century) + { + if (G == not_a_year) + { + if (g >= 69) + C = 19; + else + C = 20; + } + else + { + C = (G >= 0 ? G : G-100) / 100; + } + } + int tG; + if (C >= 0) + tG = 100*C + g; + else + tG = 100*(C+1) - (g == 0 ? 100 : g); + if (G != not_a_year && G != tG) + goto broken; + G = tG; + } + if (Y < static_cast(year::min()) || Y > static_cast(year::max())) + Y = not_a_year; + bool computed = false; + if (G != not_a_year && V != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{G-1}/December/Thursday[last]) + + (Monday-Thursday) + weeks{V-1} + + (weekday{static_cast(wd)}-Monday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (Y != not_a_year && U != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{Y}/January/Sunday[1]) + + weeks{U-1} + + (weekday{static_cast(wd)} - Sunday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (Y != not_a_year && W != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{Y}/January/Monday[1]) + + weeks{W-1} + + (weekday{static_cast(wd)} - Monday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (j != not_a_doy && Y != not_a_year) + { + auto ymd_trial = year_month_day{local_days(year{Y}/1/1) + days{j-1}}; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + j = not_a_doy; + } + auto ymd = year{Y}/m/d; + if (ymd.ok()) + { + if (wd == not_a_weekday) + wd = static_cast((weekday(sys_days(ymd)) - Sunday).count()); + else if (wd != static_cast((weekday(sys_days(ymd)) - Sunday).count())) + goto broken; + if (!computed) + { + if (G != not_a_year || V != not_a_week_num) + { + sys_days sd = ymd; + auto G_trial = year_month_day{sd + days{3}}.year(); + auto start = sys_days((G_trial - years{1})/December/Thursday[last]) + + (Monday - Thursday); + if (sd < start) + { + --G_trial; + if (V != not_a_week_num) + start = sys_days((G_trial - years{1})/December/Thursday[last]) + + (Monday - Thursday); + } + if (G != not_a_year && G != static_cast(G_trial)) + goto broken; + if (V != not_a_week_num) + { + auto V_trial = duration_cast(sd - start).count() + 1; + if (V != V_trial) + goto broken; + } + } + if (U != not_a_week_num) + { + auto start = sys_days(Sunday[1]/January/ymd.year()); + auto U_trial = floor(sys_days(ymd) - start).count() + 1; + if (U != U_trial) + goto broken; + } + if (W != not_a_week_num) + { + auto start = sys_days(Monday[1]/January/ymd.year()); + auto W_trial = floor(sys_days(ymd) - start).count() + 1; + if (W != W_trial) + goto broken; + } + } + } + fds.ymd = ymd; + if (I != not_a_hour_12_value) + { + if (!(1 <= I && I <= 12)) + goto broken; + if (p != not_a_ampm) + { + // p is in [0, 1] == [AM, PM] + // Store trial H in I + if (I == 12) + --p; + I += p*12; + // Either set H from I or make sure H and I are consistent + if (H == not_a_hour) + H = I; + else if (I != H) + goto broken; + } + else // p == not_a_ampm + { + // if H, make sure H and I could be consistent + if (H != not_a_hour) + { + if (I == 12) + { + if (H != 0 && H != 12) + goto broken; + } + else if (!(I == H || I == H+12)) + { + goto broken; + } + } + else // I is ambiguous, AM or PM? + goto broken; + } + } + if (H != not_a_hour) + { + fds.has_tod = true; + fds.tod = hh_mm_ss{hours{H}}; + } + if (M != not_a_minute) + { + fds.has_tod = true; + fds.tod.m_ = minutes{M}; + } + if (s != not_a_second) + { + fds.has_tod = true; + fds.tod.s_ = detail::decimal_format_seconds{s}; + } + if (j != not_a_doy) + { + fds.has_tod = true; + fds.tod.h_ += hours{days{j}}; + } + if (wd != not_a_weekday) + fds.wd = weekday{static_cast(wd)}; + if (abbrev != nullptr) + *abbrev = std::move(temp_abbrev); + if (offset != nullptr && temp_offset != not_a_offset) + *offset = temp_offset; + } + return is; + } +broken: + is.setstate(ios::failbit); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, year& y, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.year().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + y = fds.ymd.year(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, month& m, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + m = fds.ymd.month(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, day& d, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.day().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + d = fds.ymd.day(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, weekday& wd, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.wd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + wd = fds.wd; + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, year_month& ym, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + ym = fds.ymd.year()/fds.ymd.month(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, month_day& md, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok() || !fds.ymd.day().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + md = fds.ymd.month()/fds.ymd.day(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + year_month_day& ymd, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + ymd = fds.ymd; + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + sys_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = typename std::common_type::type; + using detail::round_i; + std::chrono::minutes offset_local{}; + auto offptr = offset ? offset : &offset_local; + fields fds{}; + fds.has_tod = true; + date::from_stream(is, fmt, fds, abbrev, offptr); + if (!fds.ymd.ok() || !fds.tod.in_conventional_range()) + is.setstate(std::ios::failbit); + if (!is.fail()) + tp = round_i(sys_days(fds.ymd) - *offptr + fds.tod.to_duration()); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + local_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = typename std::common_type::type; + using detail::round_i; + fields fds{}; + fds.has_tod = true; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.ok() || !fds.tod.in_conventional_range()) + is.setstate(std::ios::failbit); + if (!is.fail()) + tp = round_i(local_seconds{local_days(fds.ymd)} + fds.tod.to_duration()); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + std::chrono::duration& d, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using Duration = std::chrono::duration; + using CT = typename std::common_type::type; + using detail::round_i; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.has_tod) + is.setstate(std::ios::failbit); + if (!is.fail()) + d = round_i(fds.tod.to_duration()); + return is; +} + +template , + class Alloc = std::allocator> +struct parse_manip +{ + const std::basic_string format_; + Parsable& tp_; + std::basic_string* abbrev_; + std::chrono::minutes* offset_; + +public: + parse_manip(std::basic_string format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(std::move(format)) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} + +#if HAS_STRING_VIEW + parse_manip(const CharT* format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(format) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} + + parse_manip(std::basic_string_view format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(format) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} +#endif // HAS_STRING_VIEW +}; + +template +std::basic_istream& +operator>>(std::basic_istream& is, + const parse_manip& x) +{ + return date::from_stream(is, x.format_.c_str(), x.tp_, x.abbrev_, x.offset_); +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp), + parse_manip{format, tp}) +{ + return {format, tp}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::basic_string& abbrev) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, &abbrev), + parse_manip{format, tp, &abbrev}) +{ + return {format, tp, &abbrev}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, + std::declval*>(), + &offset), + parse_manip{format, tp, nullptr, &offset}) +{ + return {format, tp, nullptr, &offset}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::basic_string& abbrev, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, &abbrev, &offset), + parse_manip{format, tp, &abbrev, &offset}) +{ + return {format, tp, &abbrev, &offset}; +} + +// const CharT* formats + +template +inline +auto +parse(const CharT* format, Parsable& tp) + -> decltype(date::from_stream(std::declval&>(), format, tp), + parse_manip{format, tp}) +{ + return {format, tp}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, std::basic_string& abbrev) + -> decltype(date::from_stream(std::declval&>(), format, + tp, &abbrev), + parse_manip{format, tp, &abbrev}) +{ + return {format, tp, &abbrev}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), format, + tp, std::declval*>(), &offset), + parse_manip{format, tp, nullptr, &offset}) +{ + return {format, tp, nullptr, &offset}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, + std::basic_string& abbrev, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), format, + tp, &abbrev, &offset), + parse_manip{format, tp, &abbrev, &offset}) +{ + return {format, tp, &abbrev, &offset}; +} + +// duration streaming + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, + const std::chrono::duration& d) +{ + return os << detail::make_string::from(d.count()) + + detail::get_units(typename Period::type{}); +} + +} // namespace date +} // namespace arrow_vendored + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#ifdef __GNUC__ +# pragma GCC diagnostic pop +#endif + +#endif // DATE_H diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h new file mode 100644 index 0000000000000000000000000000000000000000..acad28d13b558b9ce97ad2100e90d0835aaadc0e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h @@ -0,0 +1,53 @@ +// +// ios.h +// DateTimeLib +// +// The MIT License (MIT) +// +// Copyright (c) 2016 Alexander Kormanovsky +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ios_hpp +#define ios_hpp + +#if __APPLE__ +# include +# if TARGET_OS_IPHONE +# include + + namespace arrow_vendored + { + namespace date + { + namespace iOSUtils + { + + std::string get_tzdata_path(); + std::string get_current_timezone(); + + } // namespace iOSUtils + } // namespace date + } // namespace arrow_vendored + +# endif // TARGET_OS_IPHONE +#else // !__APPLE__ +# define TARGET_OS_IPHONE 0 +#endif // !__APPLE__ +#endif // ios_hpp diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h new file mode 100644 index 0000000000000000000000000000000000000000..467db6d19979375f310374d5bc20eeb1d09a3034 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h @@ -0,0 +1,2801 @@ +#ifndef TZ_H +#define TZ_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016, 2017 Howard Hinnant +// Copyright (c) 2017 Jiangang Zhuang +// Copyright (c) 2017 Aaron Bishop +// Copyright (c) 2017 Tomasz Kamiński +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +// Get more recent database at http://www.iana.org/time-zones + +// The notion of "current timezone" is something the operating system is expected to "just +// know". How it knows this is system specific. It's often a value set by the user at OS +// installation time and recorded by the OS somewhere. On Linux and Mac systems the current +// timezone name is obtained by looking at the name or contents of a particular file on +// disk. On Windows the current timezone name comes from the registry. In either method, +// there is no guarantee that the "native" current timezone name obtained will match any +// of the "Standard" names in this library's "database". On Linux, the names usually do +// seem to match so mapping functions to map from native to "Standard" are typically not +// required. On Windows, the names are never "Standard" so mapping is always required. +// Technically any OS may use the mapping process but currently only Windows does use it. + +// NOTE(ARROW): If this is not set, then the library will attempt to +// use libcurl to obtain a timezone database, and we probably do not want this. +#ifndef _WIN32 +#define USE_OS_TZDB 1 +#endif + +#ifndef USE_OS_TZDB +# define USE_OS_TZDB 0 +#endif + +#ifndef HAS_REMOTE_API +# if USE_OS_TZDB == 0 +# ifdef _WIN32 +# define HAS_REMOTE_API 0 +# else +# define HAS_REMOTE_API 1 +# endif +# else // HAS_REMOTE_API makes no sense when using the OS timezone database +# define HAS_REMOTE_API 0 +# endif +#endif + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wconstant-logical-operand" +#endif + +static_assert(!(USE_OS_TZDB && HAS_REMOTE_API), + "USE_OS_TZDB and HAS_REMOTE_API can not be used together"); + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + +#ifndef AUTO_DOWNLOAD +# define AUTO_DOWNLOAD HAS_REMOTE_API +#endif + +static_assert(HAS_REMOTE_API == 0 ? AUTO_DOWNLOAD == 0 : true, + "AUTO_DOWNLOAD can not be turned on without HAS_REMOTE_API"); + +#ifndef USE_SHELL_API +# define USE_SHELL_API 1 +#endif + +#if USE_OS_TZDB +# ifdef _WIN32 +# error "USE_OS_TZDB can not be used on Windows" +# endif +#endif + +#ifndef HAS_DEDUCTION_GUIDES +# if __cplusplus >= 201703 +# define HAS_DEDUCTION_GUIDES 1 +# else +# define HAS_DEDUCTION_GUIDES 0 +# endif +#endif // HAS_DEDUCTION_GUIDES + +#include "date.h" + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +#include "tz_private.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +# ifdef DATE_BUILD_DLL +# define DATE_API __declspec(dllexport) +# elif defined(DATE_USE_DLL) +# define DATE_API __declspec(dllimport) +# else +# define DATE_API +# endif +#else +# ifdef DATE_BUILD_DLL +# define DATE_API __attribute__ ((visibility ("default"))) +# else +# define DATE_API +# endif +#endif + +namespace arrow_vendored +{ +namespace date +{ + +enum class choose {earliest, latest}; + +namespace detail +{ + struct undocumented; + + template + struct nodeduct + { + using type = T; + }; + + template + using nodeduct_t = typename nodeduct::type; +} + +struct sys_info +{ + sys_seconds begin; + sys_seconds end; + std::chrono::seconds offset; + std::chrono::minutes save; + std::string abbrev; +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const sys_info& r) +{ + os << r.begin << '\n'; + os << r.end << '\n'; + os << make_time(r.offset) << "\n"; + os << make_time(r.save) << "\n"; + os << r.abbrev << '\n'; + return os; +} + +struct local_info +{ + enum {unique, nonexistent, ambiguous} result; + sys_info first; + sys_info second; +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const local_info& r) +{ + if (r.result == local_info::nonexistent) + os << "nonexistent between\n"; + else if (r.result == local_info::ambiguous) + os << "ambiguous between\n"; + os << r.first; + if (r.result != local_info::unique) + { + os << "and\n"; + os << r.second; + } + return os; +} + +class nonexistent_local_time + : public std::runtime_error +{ +public: + template + nonexistent_local_time(local_time tp, const local_info& i); + +private: + template + static + std::string + make_msg(local_time tp, const local_info& i); +}; + +template +inline +nonexistent_local_time::nonexistent_local_time(local_time tp, + const local_info& i) + : std::runtime_error(make_msg(tp, i)) +{ +} + +template +std::string +nonexistent_local_time::make_msg(local_time tp, const local_info& i) +{ + assert(i.result == local_info::nonexistent); + std::ostringstream os; + os << tp << " is in a gap between\n" + << local_seconds{i.first.end.time_since_epoch()} + i.first.offset << ' ' + << i.first.abbrev << " and\n" + << local_seconds{i.second.begin.time_since_epoch()} + i.second.offset << ' ' + << i.second.abbrev + << " which are both equivalent to\n" + << i.first.end << " UTC"; + return os.str(); +} + +class ambiguous_local_time + : public std::runtime_error +{ +public: + template + ambiguous_local_time(local_time tp, const local_info& i); + +private: + template + static + std::string + make_msg(local_time tp, const local_info& i); +}; + +template +inline +ambiguous_local_time::ambiguous_local_time(local_time tp, const local_info& i) + : std::runtime_error(make_msg(tp, i)) +{ +} + +template +std::string +ambiguous_local_time::make_msg(local_time tp, const local_info& i) +{ + assert(i.result == local_info::ambiguous); + std::ostringstream os; + os << tp << " is ambiguous. It could be\n" + << tp << ' ' << i.first.abbrev << " == " + << tp - i.first.offset << " UTC or\n" + << tp << ' ' << i.second.abbrev << " == " + << tp - i.second.offset << " UTC"; + return os.str(); +} + +class time_zone; + +#if HAS_STRING_VIEW +DATE_API const time_zone* locate_zone(std::string_view tz_name); +#else +DATE_API const time_zone* locate_zone(const std::string& tz_name); +#endif + +DATE_API const time_zone* current_zone(); + +template +struct zoned_traits +{ +}; + +template <> +struct zoned_traits +{ + static + const time_zone* + default_zone() + { + return date::locate_zone("Etc/UTC"); + } + +#if HAS_STRING_VIEW + + static + const time_zone* + locate_zone(std::string_view name) + { + return date::locate_zone(name); + } + +#else // !HAS_STRING_VIEW + + static + const time_zone* + locate_zone(const std::string& name) + { + return date::locate_zone(name); + } + + static + const time_zone* + locate_zone(const char* name) + { + return date::locate_zone(name); + } + +#endif // !HAS_STRING_VIEW +}; + +template +class zoned_time; + +template +bool +operator==(const zoned_time& x, + const zoned_time& y); + +template +class zoned_time +{ +public: + using duration = typename std::common_type::type; + +private: + TimeZonePtr zone_; + sys_time tp_; + +public: +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::default_zone())> +#endif + zoned_time(); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::default_zone())> +#endif + zoned_time(const sys_time& st); + explicit zoned_time(TimeZonePtr z); + +#if HAS_STRING_VIEW + template ::locate_zone(std::string_view())) + >::value + >::type> + explicit zoned_time(std::string_view name); +#else +# if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())) + >::value + >::type> +# endif + explicit zoned_time(const std::string& name); +#endif + + template , + sys_time>::value + >::type> + zoned_time(const zoned_time& zt) NOEXCEPT; + + zoned_time(TimeZonePtr z, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ()->to_sys(local_time{})), + sys_time + >::value + >::type> +#endif + zoned_time(TimeZonePtr z, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ()->to_sys(local_time{}, + choose::earliest)), + sys_time + >::value + >::type> +#endif + zoned_time(TimeZonePtr z, const local_time& tp, choose c); + + template , + sys_time>::value + >::type> + zoned_time(TimeZonePtr z, const zoned_time& zt); + + template , + sys_time>::value + >::type> + zoned_time(TimeZonePtr z, const zoned_time& zt, choose); + +#if HAS_STRING_VIEW + + template ::locate_zone(std::string_view())), + sys_time + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> st); + + template ::locate_zone(std::string_view())), + local_time + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> tp); + + template ::locate_zone(std::string_view())), + local_time, + choose + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> tp, choose c); + + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string_view())), + zoned_time + >::value + >::type> + zoned_time(std::string_view name, const zoned_time& zt); + + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string_view())), + zoned_time, + choose + >::value + >::type> + zoned_time(std::string_view name, const zoned_time& zt, choose); + +#else // !HAS_STRING_VIEW + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + sys_time + >::value + >::type> +#endif + zoned_time(const std::string& name, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + sys_time + >::value + >::type> +#endif + zoned_time(const char* name, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time + >::value + >::type> +#endif + zoned_time(const std::string& name, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time + >::value + >::type> +#endif + zoned_time(const char* name, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time, + choose + >::value + >::type> +#endif + zoned_time(const std::string& name, const local_time& tp, choose c); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time, + choose + >::value + >::type> +#endif + zoned_time(const char* name, const local_time& tp, choose c); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time + >::value + >::type> +#else + template +#endif + zoned_time(const std::string& name, const zoned_time& zt); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time + >::value + >::type> +#else + template +#endif + zoned_time(const char* name, const zoned_time& zt); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time, + choose + >::value + >::type> +#else + template +#endif + zoned_time(const std::string& name, const zoned_time& zt, + choose); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time, + choose + >::value + >::type> +#else + template +#endif + zoned_time(const char* name, const zoned_time& zt, + choose); + +#endif // !HAS_STRING_VIEW + + zoned_time& operator=(const sys_time& st); + zoned_time& operator=(const local_time& ut); + + explicit operator sys_time() const; + explicit operator local_time() const; + + TimeZonePtr get_time_zone() const; + local_time get_local_time() const; + sys_time get_sys_time() const; + sys_info get_info() const; + + template + friend + bool + operator==(const zoned_time& x, + const zoned_time& y); + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, + const zoned_time& t); + +private: + template friend class zoned_time; + + template + static + TimeZonePtr2&& + check(TimeZonePtr2&& p); +}; + +using zoned_seconds = zoned_time; + +#if HAS_DEDUCTION_GUIDES + +namespace detail +{ + template + using time_zone_representation = + std::conditional_t + < + std::is_convertible::value, + time_zone const*, + std::remove_cv_t> + >; +} + +zoned_time() + -> zoned_time; + +template +zoned_time(sys_time) + -> zoned_time>; + +template +zoned_time(TimeZonePtrOrName&&) + -> zoned_time>; + +template +zoned_time(TimeZonePtrOrName&&, sys_time) + -> zoned_time, detail::time_zone_representation>; + +template +zoned_time(TimeZonePtrOrName&&, local_time, choose = choose::earliest) + -> zoned_time, detail::time_zone_representation>; + +template +zoned_time(TimeZonePtrOrName&&, zoned_time, choose = choose::earliest) + -> zoned_time, detail::time_zone_representation>; + +#endif // HAS_DEDUCTION_GUIDES + +template +inline +bool +operator==(const zoned_time& x, + const zoned_time& y) +{ + return x.zone_ == y.zone_ && x.tp_ == y.tp_; +} + +template +inline +bool +operator!=(const zoned_time& x, + const zoned_time& y) +{ + return !(x == y); +} + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + +namespace detail +{ +# if USE_OS_TZDB + struct transition; + struct expanded_ttinfo; +# else // !USE_OS_TZDB + struct zonelet; + class Rule; +# endif // !USE_OS_TZDB +} + +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +class time_zone +{ +private: + std::string name_; +#if USE_OS_TZDB + std::vector transitions_; + std::vector ttinfos_; +#else // !USE_OS_TZDB + std::vector zonelets_; +#endif // !USE_OS_TZDB + std::unique_ptr adjusted_; + +public: +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + time_zone(time_zone&&) = default; + time_zone& operator=(time_zone&&) = default; +#else // defined(_MSC_VER) && (_MSC_VER < 1900) + time_zone(time_zone&& src); + time_zone& operator=(time_zone&& src); +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + + DATE_API explicit time_zone(const std::string& s, detail::undocumented); + + const std::string& name() const NOEXCEPT; + + template sys_info get_info(sys_time st) const; + template local_info get_info(local_time tp) const; + + template + sys_time::type> + to_sys(local_time tp) const; + + template + sys_time::type> + to_sys(local_time tp, choose z) const; + + template + local_time::type> + to_local(sys_time tp) const; + + friend bool operator==(const time_zone& x, const time_zone& y) NOEXCEPT; + friend bool operator< (const time_zone& x, const time_zone& y) NOEXCEPT; + friend DATE_API std::ostream& operator<<(std::ostream& os, const time_zone& z); + +#if !USE_OS_TZDB + DATE_API void add(const std::string& s); +#endif // !USE_OS_TZDB + +private: + DATE_API sys_info get_info_impl(sys_seconds tp) const; + DATE_API local_info get_info_impl(local_seconds tp) const; + + template + sys_time::type> + to_sys_impl(local_time tp, choose z, std::false_type) const; + template + sys_time::type> + to_sys_impl(local_time tp, choose, std::true_type) const; + +#if USE_OS_TZDB + DATE_API void init() const; + DATE_API void init_impl(); + DATE_API sys_info + load_sys_info(std::vector::const_iterator i) const; + + template + DATE_API void + load_data(std::istream& inf, std::int32_t tzh_leapcnt, std::int32_t tzh_timecnt, + std::int32_t tzh_typecnt, std::int32_t tzh_charcnt); +#else // !USE_OS_TZDB + DATE_API sys_info get_info_impl(sys_seconds tp, int tz_int) const; + DATE_API void adjust_infos(const std::vector& rules); + DATE_API void parse_info(std::istream& in); +#endif // !USE_OS_TZDB +}; + +#if defined(_MSC_VER) && (_MSC_VER < 1900) + +inline +time_zone::time_zone(time_zone&& src) + : name_(std::move(src.name_)) + , zonelets_(std::move(src.zonelets_)) + , adjusted_(std::move(src.adjusted_)) + {} + +inline +time_zone& +time_zone::operator=(time_zone&& src) +{ + name_ = std::move(src.name_); + zonelets_ = std::move(src.zonelets_); + adjusted_ = std::move(src.adjusted_); + return *this; +} + +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + +inline +const std::string& +time_zone::name() const NOEXCEPT +{ + return name_; +} + +template +inline +sys_info +time_zone::get_info(sys_time st) const +{ + return get_info_impl(date::floor(st)); +} + +template +inline +local_info +time_zone::get_info(local_time tp) const +{ + return get_info_impl(date::floor(tp)); +} + +template +inline +sys_time::type> +time_zone::to_sys(local_time tp) const +{ + return to_sys_impl(tp, choose{}, std::true_type{}); +} + +template +inline +sys_time::type> +time_zone::to_sys(local_time tp, choose z) const +{ + return to_sys_impl(tp, z, std::false_type{}); +} + +template +inline +local_time::type> +time_zone::to_local(sys_time tp) const +{ + using LT = local_time::type>; + auto i = get_info(tp); + return LT{(tp + i.offset).time_since_epoch()}; +} + +inline bool operator==(const time_zone& x, const time_zone& y) NOEXCEPT {return x.name_ == y.name_;} +inline bool operator< (const time_zone& x, const time_zone& y) NOEXCEPT {return x.name_ < y.name_;} + +inline bool operator!=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(x == y);} +inline bool operator> (const time_zone& x, const time_zone& y) NOEXCEPT {return y < x;} +inline bool operator<=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(y < x);} +inline bool operator>=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(x < y);} + +template +sys_time::type> +time_zone::to_sys_impl(local_time tp, choose z, std::false_type) const +{ + auto i = get_info(tp); + if (i.result == local_info::nonexistent) + { + return i.first.end; + } + else if (i.result == local_info::ambiguous) + { + if (z == choose::latest) + return sys_time{tp.time_since_epoch()} - i.second.offset; + } + return sys_time{tp.time_since_epoch()} - i.first.offset; +} + +template +sys_time::type> +time_zone::to_sys_impl(local_time tp, choose, std::true_type) const +{ + auto i = get_info(tp); + if (i.result == local_info::nonexistent) + throw nonexistent_local_time(tp, i); + else if (i.result == local_info::ambiguous) + throw ambiguous_local_time(tp, i); + return sys_time{tp.time_since_epoch()} - i.first.offset; +} + +#if !USE_OS_TZDB + +class time_zone_link +{ +private: + std::string name_; + std::string target_; +public: + DATE_API explicit time_zone_link(const std::string& s); + + const std::string& name() const {return name_;} + const std::string& target() const {return target_;} + + friend bool operator==(const time_zone_link& x, const time_zone_link& y) {return x.name_ == y.name_;} + friend bool operator< (const time_zone_link& x, const time_zone_link& y) {return x.name_ < y.name_;} + + friend DATE_API std::ostream& operator<<(std::ostream& os, const time_zone_link& x); +}; + +using link = time_zone_link; + +inline bool operator!=(const time_zone_link& x, const time_zone_link& y) {return !(x == y);} +inline bool operator> (const time_zone_link& x, const time_zone_link& y) {return y < x;} +inline bool operator<=(const time_zone_link& x, const time_zone_link& y) {return !(y < x);} +inline bool operator>=(const time_zone_link& x, const time_zone_link& y) {return !(x < y);} + +#endif // !USE_OS_TZDB + +class leap_second +{ +private: + sys_seconds date_; + +public: +#if USE_OS_TZDB + DATE_API explicit leap_second(const sys_seconds& s, detail::undocumented); +#else + DATE_API explicit leap_second(const std::string& s, detail::undocumented); +#endif + + sys_seconds date() const {return date_;} + + friend bool operator==(const leap_second& x, const leap_second& y) {return x.date_ == y.date_;} + friend bool operator< (const leap_second& x, const leap_second& y) {return x.date_ < y.date_;} + + template + friend + bool + operator==(const leap_second& x, const sys_time& y) + { + return x.date_ == y; + } + + template + friend + bool + operator< (const leap_second& x, const sys_time& y) + { + return x.date_ < y; + } + + template + friend + bool + operator< (const sys_time& x, const leap_second& y) + { + return x < y.date_; + } + + friend DATE_API std::ostream& operator<<(std::ostream& os, const leap_second& x); +}; + +inline bool operator!=(const leap_second& x, const leap_second& y) {return !(x == y);} +inline bool operator> (const leap_second& x, const leap_second& y) {return y < x;} +inline bool operator<=(const leap_second& x, const leap_second& y) {return !(y < x);} +inline bool operator>=(const leap_second& x, const leap_second& y) {return !(x < y);} + +template +inline +bool +operator==(const sys_time& x, const leap_second& y) +{ + return y == x; +} + +template +inline +bool +operator!=(const leap_second& x, const sys_time& y) +{ + return !(x == y); +} + +template +inline +bool +operator!=(const sys_time& x, const leap_second& y) +{ + return !(x == y); +} + +template +inline +bool +operator> (const leap_second& x, const sys_time& y) +{ + return y < x; +} + +template +inline +bool +operator> (const sys_time& x, const leap_second& y) +{ + return y < x; +} + +template +inline +bool +operator<=(const leap_second& x, const sys_time& y) +{ + return !(y < x); +} + +template +inline +bool +operator<=(const sys_time& x, const leap_second& y) +{ + return !(y < x); +} + +template +inline +bool +operator>=(const leap_second& x, const sys_time& y) +{ + return !(x < y); +} + +template +inline +bool +operator>=(const sys_time& x, const leap_second& y) +{ + return !(x < y); +} + +using leap = leap_second; + +#ifdef _WIN32 + +namespace detail +{ + +// The time zone mapping is modelled after this data file: +// http://unicode.org/repos/cldr/trunk/common/supplemental/windowsZones.xml +// and the field names match the element names from the mapZone element +// of windowsZones.xml. +// The website displays this file here: +// http://www.unicode.org/cldr/charts/latest/supplemental/zone_tzid.html +// The html view is sorted before being displayed but is otherwise the same +// There is a mapping between the os centric view (in this case windows) +// the html displays uses and the generic view the xml file. +// That mapping is this: +// display column "windows" -> xml field "other". +// display column "region" -> xml field "territory". +// display column "tzid" -> xml field "type". +// This structure uses the generic terminology because it could be +// used to to support other os/native name conversions, not just windows, +// and using the same generic names helps retain the connection to the +// origin of the data that we are using. +struct timezone_mapping +{ + timezone_mapping(const char* other, const char* territory, const char* type) + : other(other), territory(territory), type(type) + { + } + timezone_mapping() = default; + std::string other; + std::string territory; + std::string type; +}; + +} // detail + +#endif // _WIN32 + +struct tzdb +{ + std::string version = "unknown"; + std::vector zones; +#if !USE_OS_TZDB + std::vector links; +#endif + std::vector leap_seconds; +#if !USE_OS_TZDB + std::vector rules; +#endif +#ifdef _WIN32 + std::vector mappings; +#endif + tzdb* next = nullptr; + + tzdb() = default; +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + tzdb(tzdb&&) = default; + tzdb& operator=(tzdb&&) = default; +#else // defined(_MSC_VER) && (_MSC_VER < 1900) + tzdb(tzdb&& src) + : version(std::move(src.version)) + , zones(std::move(src.zones)) + , links(std::move(src.links)) + , leap_seconds(std::move(src.leap_seconds)) + , rules(std::move(src.rules)) + , mappings(std::move(src.mappings)) + {} + + tzdb& operator=(tzdb&& src) + { + version = std::move(src.version); + zones = std::move(src.zones); + links = std::move(src.links); + leap_seconds = std::move(src.leap_seconds); + rules = std::move(src.rules); + mappings = std::move(src.mappings); + return *this; + } +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + +#if HAS_STRING_VIEW + const time_zone* locate_zone(std::string_view tz_name) const; +#else + const time_zone* locate_zone(const std::string& tz_name) const; +#endif + const time_zone* current_zone() const; +}; + +using TZ_DB = tzdb; + +DATE_API std::ostream& +operator<<(std::ostream& os, const tzdb& db); + +DATE_API const tzdb& get_tzdb(); + +class tzdb_list +{ + std::atomic head_{nullptr}; + +public: + ~tzdb_list(); + tzdb_list() = default; + tzdb_list(tzdb_list&& x) NOEXCEPT; + + const tzdb& front() const NOEXCEPT {return *head_;} + tzdb& front() NOEXCEPT {return *head_;} + + class const_iterator; + + const_iterator begin() const NOEXCEPT; + const_iterator end() const NOEXCEPT; + + const_iterator cbegin() const NOEXCEPT; + const_iterator cend() const NOEXCEPT; + + const_iterator erase_after(const_iterator p) NOEXCEPT; + + struct undocumented_helper; +private: + void push_front(tzdb* tzdb) NOEXCEPT; +}; + +class tzdb_list::const_iterator +{ + tzdb* p_ = nullptr; + + explicit const_iterator(tzdb* p) NOEXCEPT : p_{p} {} +public: + const_iterator() = default; + + using iterator_category = std::forward_iterator_tag; + using value_type = tzdb; + using reference = const value_type&; + using pointer = const value_type*; + using difference_type = std::ptrdiff_t; + + reference operator*() const NOEXCEPT {return *p_;} + pointer operator->() const NOEXCEPT {return p_;} + + const_iterator& operator++() NOEXCEPT {p_ = p_->next; return *this;} + const_iterator operator++(int) NOEXCEPT {auto t = *this; ++(*this); return t;} + + friend + bool + operator==(const const_iterator& x, const const_iterator& y) NOEXCEPT + {return x.p_ == y.p_;} + + friend + bool + operator!=(const const_iterator& x, const const_iterator& y) NOEXCEPT + {return !(x == y);} + + friend class tzdb_list; +}; + +inline +tzdb_list::const_iterator +tzdb_list::begin() const NOEXCEPT +{ + return const_iterator{head_}; +} + +inline +tzdb_list::const_iterator +tzdb_list::end() const NOEXCEPT +{ + return const_iterator{nullptr}; +} + +inline +tzdb_list::const_iterator +tzdb_list::cbegin() const NOEXCEPT +{ + return begin(); +} + +inline +tzdb_list::const_iterator +tzdb_list::cend() const NOEXCEPT +{ + return end(); +} + +DATE_API tzdb_list& get_tzdb_list(); + +#if !USE_OS_TZDB + +DATE_API const tzdb& reload_tzdb(); +DATE_API void set_install(const std::string& install); + +#endif // !USE_OS_TZDB + +#if HAS_REMOTE_API + +DATE_API std::string remote_version(); +// if provided error_buffer size should be at least CURL_ERROR_SIZE +DATE_API bool remote_download(const std::string& version, char* error_buffer = nullptr); +DATE_API bool remote_install(const std::string& version); + +#endif + +// zoned_time + +namespace detail +{ + +template +inline +T* +to_raw_pointer(T* p) NOEXCEPT +{ + return p; +} + +template +inline +auto +to_raw_pointer(Pointer p) NOEXCEPT + -> decltype(detail::to_raw_pointer(p.operator->())) +{ + return detail::to_raw_pointer(p.operator->()); +} + +} // namespace detail + +template +template +inline +TimeZonePtr2&& +zoned_time::check(TimeZonePtr2&& p) +{ + if (detail::to_raw_pointer(p) == nullptr) + throw std::runtime_error( + "zoned_time constructed with a time zone pointer == nullptr"); + return std::forward(p); +} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time() + : zone_(check(zoned_traits::default_zone())) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const sys_time& st) + : zone_(check(zoned_traits::default_zone())) + , tp_(st) + {} + +template +inline +zoned_time::zoned_time(TimeZonePtr z) + : zone_(check(std::move(z))) + {} + +#if HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(std::string_view name) + : zoned_time(zoned_traits::locate_zone(name)) + {} + +#else // !HAS_STRING_VIEW + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name) + : zoned_time(zoned_traits::locate_zone(name)) + {} + +#endif // !HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(const zoned_time& zt) NOEXCEPT + : zone_(zt.zone_) + , tp_(zt.tp_) + {} + +template +inline +zoned_time::zoned_time(TimeZonePtr z, const sys_time& st) + : zone_(check(std::move(z))) + , tp_(st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(TimeZonePtr z, const local_time& t) + : zone_(check(std::move(z))) + , tp_(zone_->to_sys(t)) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(TimeZonePtr z, const local_time& t, + choose c) + : zone_(check(std::move(z))) + , tp_(zone_->to_sys(t, c)) + {} + +template +template +inline +zoned_time::zoned_time(TimeZonePtr z, + const zoned_time& zt) + : zone_(check(std::move(z))) + , tp_(zt.tp_) + {} + +template +template +inline +zoned_time::zoned_time(TimeZonePtr z, + const zoned_time& zt, choose) + : zoned_time(std::move(z), zt) + {} + +#if HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +#else // !HAS_STRING_VIEW + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const sys_time& st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const sys_time& st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const local_time& t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const local_time& t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const local_time& t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const local_time& t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const char* name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const char* name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +#endif // HAS_STRING_VIEW + +template +inline +zoned_time& +zoned_time::operator=(const sys_time& st) +{ + tp_ = st; + return *this; +} + +template +inline +zoned_time& +zoned_time::operator=(const local_time& ut) +{ + tp_ = zone_->to_sys(ut); + return *this; +} + +template +inline +zoned_time::operator local_time::duration>() const +{ + return get_local_time(); +} + +template +inline +zoned_time::operator sys_time::duration>() const +{ + return get_sys_time(); +} + +template +inline +TimeZonePtr +zoned_time::get_time_zone() const +{ + return zone_; +} + +template +inline +local_time::duration> +zoned_time::get_local_time() const +{ + return zone_->to_local(tp_); +} + +template +inline +sys_time::duration> +zoned_time::get_sys_time() const +{ + return tp_; +} + +template +inline +sys_info +zoned_time::get_info() const +{ + return zone_->get_info(tp_); +} + +// make_zoned_time + +inline +zoned_time +make_zoned() +{ + return zoned_time(); +} + +template +inline +zoned_time::type> +make_zoned(const sys_time& tp) +{ + return zoned_time::type>(tp); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class + < + typename std::decay + < + decltype(*detail::to_raw_pointer(std::declval())) + >::type + >{} + >::type +#endif +#endif + > +inline +zoned_time +make_zoned(TimeZonePtr z) +{ + return zoned_time(std::move(z)); +} + +inline +zoned_seconds +make_zoned(const std::string& name) +{ + return zoned_seconds(name); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const local_time& tp) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), tp); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const local_time& tp, choose c) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), tp, c); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const local_time& tp) +{ + return zoned_time::type>(name, tp); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const local_time& tp, choose c) +{ + return zoned_time::type>(name, tp, c); +} + +template +inline +zoned_time +make_zoned(TimeZonePtr zone, const zoned_time& zt) +{ + return zoned_time(std::move(zone), zt); +} + +template +inline +zoned_time +make_zoned(const std::string& name, const zoned_time& zt) +{ + return zoned_time(name, zt); +} + +template +inline +zoned_time +make_zoned(TimeZonePtr zone, const zoned_time& zt, choose c) +{ + return zoned_time(std::move(zone), zt, c); +} + +template +inline +zoned_time +make_zoned(const std::string& name, const zoned_time& zt, choose c) +{ + return zoned_time(name, zt, c); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const sys_time& st) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), st); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const sys_time& st) +{ + return zoned_time::type>(name, st); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const zoned_time& tp) +{ + using duration = typename zoned_time::duration; + using LT = local_time; + auto const st = tp.get_sys_time(); + auto const info = tp.get_time_zone()->get_info(st); + return to_stream(os, fmt, LT{(st+info.offset).time_since_epoch()}, + &info.abbrev, &info.offset); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const zoned_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', ' ', '%', 'Z', CharT{}}; + return to_stream(os, fmt, t); +} + +class utc_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static CONSTDATA bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_sys(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + from_sys(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&); +}; + +template + using utc_time = std::chrono::time_point; + +using utc_seconds = utc_time; + +template +utc_time::type> +utc_clock::from_sys(const sys_time& st) +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + auto const& leaps = get_tzdb().leap_seconds; + auto const lt = std::upper_bound(leaps.begin(), leaps.end(), st); + return utc_time{st.time_since_epoch() + seconds{lt-leaps.begin()}}; +} + +// Return pair +// first is true if ut is during a leap second insertion, otherwise false. +// If ut is during a leap second insertion, that leap second is included in the count +template +std::pair +is_leap_second(date::utc_time const& ut) +{ + using std::chrono::seconds; + using duration = typename std::common_type::type; + auto const& leaps = get_tzdb().leap_seconds; + auto tp = sys_time{ut.time_since_epoch()}; + auto const lt = std::upper_bound(leaps.begin(), leaps.end(), tp); + auto ds = seconds{lt-leaps.begin()}; + tp -= ds; + auto ls = false; + if (lt > leaps.begin()) + { + if (tp < lt[-1]) + { + if (tp >= lt[-1].date() - seconds{1}) + ls = true; + else + --ds; + } + } + return {ls, ds}; +} + +struct leap_second_info +{ + bool is_leap_second; + std::chrono::seconds elapsed; +}; + +template +leap_second_info +get_leap_second_info(date::utc_time const& ut) +{ + auto p = is_leap_second(ut); + return {p.first, p.second}; +} + +template +sys_time::type> +utc_clock::to_sys(const utc_time& ut) +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + auto ls = is_leap_second(ut); + auto tp = sys_time{ut.time_since_epoch() - ls.second}; + if (ls.first) + tp = floor(tp) + seconds{1} - CD{1}; + return tp; +} + +inline +utc_clock::time_point +utc_clock::now() +{ + return from_sys(std::chrono::system_clock::now()); +} + +template +utc_time::type> +utc_clock::from_local(const local_time& st) +{ + return from_sys(sys_time{st.time_since_epoch()}); +} + +template +local_time::type> +utc_clock::to_local(const utc_time& ut) +{ + using CD = typename std::common_type::type; + return local_time{to_sys(ut).time_since_epoch()}; +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const utc_time& t) +{ + using std::chrono::seconds; + using CT = typename std::common_type::type; + const std::string abbrev("UTC"); + CONSTDATA seconds offset{0}; + auto ls = is_leap_second(t); + auto tp = sys_time{t.time_since_epoch() - ls.second}; + auto const sd = floor(tp); + year_month_day ymd = sd; + auto time = make_time(tp - sys_seconds{sd}); + time.seconds(detail::undocumented{}) += seconds{ls.first}; + fields fds{ymd, time}; + return to_stream(os, fmt, fds, &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const utc_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + utc_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using std::chrono::seconds; + using std::chrono::minutes; + using CT = typename std::common_type::type; + minutes offset_local{}; + auto offptr = offset ? offset : &offset_local; + fields fds{}; + fds.has_tod = true; + from_stream(is, fmt, fds, abbrev, offptr); + if (!fds.ymd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + { + bool is_60_sec = fds.tod.seconds() == seconds{60}; + if (is_60_sec) + fds.tod.seconds(detail::undocumented{}) -= seconds{1}; + auto tmp = utc_clock::from_sys(sys_days(fds.ymd) - *offptr + fds.tod.to_duration()); + if (is_60_sec) + tmp += seconds{1}; + if (is_60_sec != is_leap_second(tmp).first || !fds.tod.in_conventional_range()) + { + is.setstate(std::ios::failbit); + return is; + } + tp = std::chrono::time_point_cast(tmp); + } + return is; +} + +// tai_clock + +class tai_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static const bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&) NOEXCEPT; +}; + +template + using tai_time = std::chrono::time_point; + +using tai_seconds = tai_time; + +template +inline +utc_time::type> +tai_clock::to_utc(const tai_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return utc_time{t.time_since_epoch()} - + (sys_days(year{1970}/January/1) - sys_days(year{1958}/January/1) + seconds{10}); +} + +template +inline +tai_time::type> +tai_clock::from_utc(const utc_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return tai_time{t.time_since_epoch()} + + (sys_days(year{1970}/January/1) - sys_days(year{1958}/January/1) + seconds{10}); +} + +inline +tai_clock::time_point +tai_clock::now() +{ + return from_utc(utc_clock::now()); +} + +template +inline +local_time::type> +tai_clock::to_local(const tai_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return local_time{t.time_since_epoch()} - + (local_days(year{1970}/January/1) - local_days(year{1958}/January/1)); +} + +template +inline +tai_time::type> +tai_clock::from_local(const local_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return tai_time{t.time_since_epoch()} + + (local_days(year{1970}/January/1) - local_days(year{1958}/January/1)); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const tai_time& t) +{ + const std::string abbrev("TAI"); + CONSTDATA std::chrono::seconds offset{0}; + return to_stream(os, fmt, tai_clock::to_local(t), &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const tai_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + tai_time& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + local_time lp; + from_stream(is, fmt, lp, abbrev, offset); + if (!is.fail()) + tp = tai_clock::from_local(lp); + return is; +} + +// gps_clock + +class gps_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static const bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&) NOEXCEPT; +}; + +template + using gps_time = std::chrono::time_point; + +using gps_seconds = gps_time; + +template +inline +utc_time::type> +gps_clock::to_utc(const gps_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return utc_time{t.time_since_epoch()} + + (sys_days(year{1980}/January/Sunday[1]) - sys_days(year{1970}/January/1) + + seconds{9}); +} + +template +inline +gps_time::type> +gps_clock::from_utc(const utc_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return gps_time{t.time_since_epoch()} - + (sys_days(year{1980}/January/Sunday[1]) - sys_days(year{1970}/January/1) + + seconds{9}); +} + +inline +gps_clock::time_point +gps_clock::now() +{ + return from_utc(utc_clock::now()); +} + +template +inline +local_time::type> +gps_clock::to_local(const gps_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return local_time{t.time_since_epoch()} + + (local_days(year{1980}/January/Sunday[1]) - local_days(year{1970}/January/1)); +} + +template +inline +gps_time::type> +gps_clock::from_local(const local_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return gps_time{t.time_since_epoch()} - + (local_days(year{1980}/January/Sunday[1]) - local_days(year{1970}/January/1)); +} + + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const gps_time& t) +{ + const std::string abbrev("GPS"); + CONSTDATA std::chrono::seconds offset{0}; + return to_stream(os, fmt, gps_clock::to_local(t), &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const gps_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + gps_time& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + local_time lp; + from_stream(is, fmt, lp, abbrev, offset); + if (!is.fail()) + tp = gps_clock::from_local(lp); + return is; +} + +// clock_time_conversion + +template +struct clock_time_conversion +{}; + +template <> +struct clock_time_conversion +{ + template + CONSTCD14 + sys_time + operator()(const sys_time& st) const + { + return st; + } +}; + +template <> +struct clock_time_conversion +{ + template + CONSTCD14 + utc_time + operator()(const utc_time& ut) const + { + return ut; + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + local_time + operator()(const local_time& lt) const + { + return lt; + } +}; + +template <> +struct clock_time_conversion +{ + template + utc_time::type> + operator()(const sys_time& st) const + { + return utc_clock::from_sys(st); + } +}; + +template <> +struct clock_time_conversion +{ + template + sys_time::type> + operator()(const utc_time& ut) const + { + return utc_clock::to_sys(ut); + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + local_time + operator()(const sys_time& st) const + { + return local_time{st.time_since_epoch()}; + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + sys_time + operator()(const local_time& lt) const + { + return sys_time{lt.time_since_epoch()}; + } +}; + +template<> +struct clock_time_conversion +{ + template + utc_time::type> + operator()(const local_time& lt) const + { + return utc_clock::from_local(lt); + } +}; + +template<> +struct clock_time_conversion +{ + template + local_time::type> + operator()(const utc_time& ut) const + { + return utc_clock::to_local(ut); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + std::chrono::time_point + operator()(const std::chrono::time_point& tp) const + { + return tp; + } +}; + +namespace ctc_detail +{ + +template + using time_point = std::chrono::time_point; + +using std::declval; +using std::chrono::system_clock; + +//Check if TimePoint is time for given clock, +//if not emits hard error +template +struct return_clock_time +{ + using clock_time_point = time_point; + using type = TimePoint; + + static_assert(std::is_same::value, + "time point with appropariate clock shall be returned"); +}; + +// Check if Clock has to_sys method accepting TimePoint with given duration const& and +// returning sys_time. If so has nested type member equal to return type to_sys. +template +struct return_to_sys +{}; + +template +struct return_to_sys + < + Clock, Duration, + decltype(Clock::to_sys(declval const&>()), void()) + > + : return_clock_time + < + system_clock, + decltype(Clock::to_sys(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_from_sys +{}; + +template +struct return_from_sys + < + Clock, Duration, + decltype(Clock::from_sys(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_sys(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_to_utc +{}; + +template +struct return_to_utc + < + Clock, Duration, + decltype(Clock::to_utc(declval const&>()), void()) + > + : return_clock_time + < + utc_clock, + decltype(Clock::to_utc(declval const&>()))> +{}; + +// Similiar to above +template +struct return_from_utc +{}; + +template +struct return_from_utc + < + Clock, Duration, + decltype(Clock::from_utc(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_utc(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_to_local +{}; + +template +struct return_to_local + < + Clock, Duration, + decltype(Clock::to_local(declval const&>()), + void()) + > + : return_clock_time + < + local_t, + decltype(Clock::to_local(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_from_local +{}; + +template +struct return_from_local + < + Clock, Duration, + decltype(Clock::from_local(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_local(declval const&>())) + > +{}; + +} // namespace ctc_detail + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_sys::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_sys(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_sys::type + operator()(const sys_time& st) const + { + return DstClock::from_sys(st); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_utc::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_utc(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_utc::type + operator()(const utc_time& ut) const + { + return DstClock::from_utc(ut); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_local::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_local(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_local::type + operator()(const local_time& lt) const + { + return DstClock::from_local(lt); + } +}; + +namespace clock_cast_detail +{ + +template + using time_point = std::chrono::time_point; +using std::chrono::system_clock; + +template +CONSTCD14 +auto +conv_clock(const time_point& t) + -> decltype(std::declval>()(t)) +{ + return clock_time_conversion{}(t); +} + +//direct trait conversion, 1st candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const time_point*) + -> decltype(conv_clock(t)) +{ + return conv_clock(t); +} + +//conversion through sys, 2nd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const void*) + -> decltype(conv_clock(conv_clock(t))) +{ + return conv_clock(conv_clock(t)); +} + +//conversion through utc, 2nd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const void*) + -> decltype(0, // MSVC_WORKAROUND + conv_clock(conv_clock(t))) +{ + return conv_clock(conv_clock(t)); +} + +//conversion through sys and utc, 3rd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, ...) + -> decltype(conv_clock(conv_clock(conv_clock(t)))) +{ + return conv_clock(conv_clock(conv_clock(t))); +} + +//conversion through utc and sys, 3rd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, ...) + -> decltype(0, // MSVC_WORKAROUND + conv_clock(conv_clock(conv_clock(t)))) +{ + return conv_clock(conv_clock(conv_clock(t))); +} + +} // namespace clock_cast_detail + +template +CONSTCD14 +auto +clock_cast(const std::chrono::time_point& tp) + -> decltype(clock_cast_detail::cc_impl(tp, &tp)) +{ + return clock_cast_detail::cc_impl(tp, &tp); +} + +// Deprecated API + +template +inline +sys_time::type> +to_sys_time(const utc_time& t) +{ + return utc_clock::to_sys(t); +} + +template +inline +sys_time::type> +to_sys_time(const tai_time& t) +{ + return utc_clock::to_sys(tai_clock::to_utc(t)); +} + +template +inline +sys_time::type> +to_sys_time(const gps_time& t) +{ + return utc_clock::to_sys(gps_clock::to_utc(t)); +} + + +template +inline +utc_time::type> +to_utc_time(const sys_time& t) +{ + return utc_clock::from_sys(t); +} + +template +inline +utc_time::type> +to_utc_time(const tai_time& t) +{ + return tai_clock::to_utc(t); +} + +template +inline +utc_time::type> +to_utc_time(const gps_time& t) +{ + return gps_clock::to_utc(t); +} + + +template +inline +tai_time::type> +to_tai_time(const sys_time& t) +{ + return tai_clock::from_utc(utc_clock::from_sys(t)); +} + +template +inline +tai_time::type> +to_tai_time(const utc_time& t) +{ + return tai_clock::from_utc(t); +} + +template +inline +tai_time::type> +to_tai_time(const gps_time& t) +{ + return tai_clock::from_utc(gps_clock::to_utc(t)); +} + + +template +inline +gps_time::type> +to_gps_time(const sys_time& t) +{ + return gps_clock::from_utc(utc_clock::from_sys(t)); +} + +template +inline +gps_time::type> +to_gps_time(const utc_time& t) +{ + return gps_clock::from_utc(t); +} + +template +inline +gps_time::type> +to_gps_time(const tai_time& t) +{ + return gps_clock::from_utc(tai_clock::to_utc(t)); +} + +} // namespace date +} // namespace arrow_vendored + +#endif // TZ_H diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h new file mode 100644 index 0000000000000000000000000000000000000000..6b7a91493e1033a7c2808040ee65b72cef5efc6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h @@ -0,0 +1,319 @@ +#ifndef TZ_PRIVATE_H +#define TZ_PRIVATE_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016 Howard Hinnant +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +#include "tz.h" +#else +#include "date.h" +#include +#endif + +namespace arrow_vendored +{ +namespace date +{ + +namespace detail +{ + +#if !USE_OS_TZDB + +enum class tz {utc, local, standard}; + +//forward declare to avoid warnings in gcc 6.2 +class MonthDayTime; +std::istream& operator>>(std::istream& is, MonthDayTime& x); +std::ostream& operator<<(std::ostream& os, const MonthDayTime& x); + + +class MonthDayTime +{ +private: + struct pair + { +#if defined(_MSC_VER) && (_MSC_VER < 1900) + pair() : month_day_(date::jan / 1), weekday_(0U) {} + + pair(const date::month_day& month_day, const date::weekday& weekday) + : month_day_(month_day), weekday_(weekday) {} +#endif + + date::month_day month_day_; + date::weekday weekday_; + }; + + enum Type {month_day, month_last_dow, lteq, gteq}; + + Type type_{month_day}; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + union U +#else + struct U +#endif + { + date::month_day month_day_; + date::month_weekday_last month_weekday_last_; + pair month_day_weekday_; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + U() : month_day_{date::jan/1} {} +#else + U() : + month_day_(date::jan/1), + month_weekday_last_(date::month(0U), date::weekday_last(date::weekday(0U))) + {} + +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + + U& operator=(const date::month_day& x); + U& operator=(const date::month_weekday_last& x); + U& operator=(const pair& x); + } u; + + std::chrono::hours h_{0}; + std::chrono::minutes m_{0}; + std::chrono::seconds s_{0}; + tz zone_{tz::local}; + +public: + MonthDayTime() = default; + MonthDayTime(local_seconds tp, tz timezone); + MonthDayTime(const date::month_day& md, tz timezone); + + date::day day() const; + date::month month() const; + tz zone() const {return zone_;} + + void canonicalize(date::year y); + + sys_seconds + to_sys(date::year y, std::chrono::seconds offset, std::chrono::seconds save) const; + sys_days to_sys_days(date::year y) const; + + sys_seconds to_time_point(date::year y) const; + int compare(date::year y, const MonthDayTime& x, date::year yx, + std::chrono::seconds offset, std::chrono::minutes prev_save) const; + + friend std::istream& operator>>(std::istream& is, MonthDayTime& x); + friend std::ostream& operator<<(std::ostream& os, const MonthDayTime& x); +}; + +// A Rule specifies one or more set of datetimes without using an offset. +// Multiple dates are specified with multiple years. The years in effect +// go from starting_year_ to ending_year_, inclusive. starting_year_ <= +// ending_year_. save_ is in effect for times from the specified time +// onward, including the specified time. When the specified time is +// local, it uses the save_ from the chronologically previous Rule, or if +// there is none, 0. + +//forward declare to avoid warnings in gcc 6.2 +class Rule; +bool operator==(const Rule& x, const Rule& y); +bool operator<(const Rule& x, const Rule& y); +bool operator==(const Rule& x, const date::year& y); +bool operator<(const Rule& x, const date::year& y); +bool operator==(const date::year& x, const Rule& y); +bool operator<(const date::year& x, const Rule& y); +bool operator==(const Rule& x, const std::string& y); +bool operator<(const Rule& x, const std::string& y); +bool operator==(const std::string& x, const Rule& y); +bool operator<(const std::string& x, const Rule& y); +std::ostream& operator<<(std::ostream& os, const Rule& r); + +class Rule +{ +private: + std::string name_; + date::year starting_year_{0}; + date::year ending_year_{0}; + MonthDayTime starting_at_; + std::chrono::minutes save_{0}; + std::string abbrev_; + +public: + Rule() = default; + explicit Rule(const std::string& s); + Rule(const Rule& r, date::year starting_year, date::year ending_year); + + const std::string& name() const {return name_;} + const std::string& abbrev() const {return abbrev_;} + + const MonthDayTime& mdt() const {return starting_at_;} + const date::year& starting_year() const {return starting_year_;} + const date::year& ending_year() const {return ending_year_;} + const std::chrono::minutes& save() const {return save_;} + + static void split_overlaps(std::vector& rules); + + friend bool operator==(const Rule& x, const Rule& y); + friend bool operator<(const Rule& x, const Rule& y); + friend bool operator==(const Rule& x, const date::year& y); + friend bool operator<(const Rule& x, const date::year& y); + friend bool operator==(const date::year& x, const Rule& y); + friend bool operator<(const date::year& x, const Rule& y); + friend bool operator==(const Rule& x, const std::string& y); + friend bool operator<(const Rule& x, const std::string& y); + friend bool operator==(const std::string& x, const Rule& y); + friend bool operator<(const std::string& x, const Rule& y); + + friend std::ostream& operator<<(std::ostream& os, const Rule& r); + +private: + date::day day() const; + date::month month() const; + static void split_overlaps(std::vector& rules, std::size_t i, std::size_t& e); + static bool overlaps(const Rule& x, const Rule& y); + static void split(std::vector& rules, std::size_t i, std::size_t k, + std::size_t& e); +}; + +inline bool operator!=(const Rule& x, const Rule& y) {return !(x == y);} +inline bool operator> (const Rule& x, const Rule& y) {return y < x;} +inline bool operator<=(const Rule& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const Rule& y) {return !(x < y);} + +inline bool operator!=(const Rule& x, const date::year& y) {return !(x == y);} +inline bool operator> (const Rule& x, const date::year& y) {return y < x;} +inline bool operator<=(const Rule& x, const date::year& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const date::year& y) {return !(x < y);} + +inline bool operator!=(const date::year& x, const Rule& y) {return !(x == y);} +inline bool operator> (const date::year& x, const Rule& y) {return y < x;} +inline bool operator<=(const date::year& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const date::year& x, const Rule& y) {return !(x < y);} + +inline bool operator!=(const Rule& x, const std::string& y) {return !(x == y);} +inline bool operator> (const Rule& x, const std::string& y) {return y < x;} +inline bool operator<=(const Rule& x, const std::string& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const std::string& y) {return !(x < y);} + +inline bool operator!=(const std::string& x, const Rule& y) {return !(x == y);} +inline bool operator> (const std::string& x, const Rule& y) {return y < x;} +inline bool operator<=(const std::string& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const std::string& x, const Rule& y) {return !(x < y);} + +struct zonelet +{ + enum tag {has_rule, has_save, is_empty}; + + std::chrono::seconds gmtoff_; + tag tag_ = has_rule; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + union U +#else + struct U +#endif + { + std::string rule_; + std::chrono::minutes save_; + + ~U() {} + U() {} + U(const U&) {} + U& operator=(const U&) = delete; + } u; + + std::string format_; + date::year until_year_{0}; + MonthDayTime until_date_; + sys_seconds until_utc_; + local_seconds until_std_; + local_seconds until_loc_; + std::chrono::minutes initial_save_{0}; + std::string initial_abbrev_; + std::pair first_rule_{nullptr, date::year::min()}; + std::pair last_rule_{nullptr, date::year::max()}; + + ~zonelet(); + zonelet(); + zonelet(const zonelet& i); + zonelet& operator=(const zonelet&) = delete; +}; + +#else // USE_OS_TZDB + +struct ttinfo +{ + std::int32_t tt_gmtoff; + unsigned char tt_isdst; + unsigned char tt_abbrind; + unsigned char pad[2]; +}; + +static_assert(sizeof(ttinfo) == 8, ""); + +struct expanded_ttinfo +{ + std::chrono::seconds offset; + std::string abbrev; + bool is_dst; +}; + +struct transition +{ + sys_seconds timepoint; + const expanded_ttinfo* info; + + transition(sys_seconds tp, const expanded_ttinfo* i = nullptr) + : timepoint(tp) + , info(i) + {} + + friend + std::ostream& + operator<<(std::ostream& os, const transition& t) + { + using date::operator<<; + os << t.timepoint << "Z "; + if (t.info->offset >= std::chrono::seconds{0}) + os << '+'; + os << make_time(t.info->offset); + if (t.info->is_dst > 0) + os << " daylight "; + else + os << " standard "; + os << t.info->abbrev; + return os; + } +}; + +#endif // USE_OS_TZDB + +} // namespace detail + +} // namespace date +} // namespace arrow_vendored + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +#include "tz.h" +#endif + +#endif // TZ_PRIVATE_H diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..ae031238d85ac81f91bdaccc8c519c250a6fa056 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(ARROW_STATIC) +// intentially empty +#elif defined(ARROW_EXPORTING) +#define DATE_BUILD_DLL +#else +#define DATE_USE_DLL +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..ddd0f04dcf02c8222b24e8e5c55654e80d127684 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h @@ -0,0 +1,90 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_ +#define DOUBLE_CONVERSION_FAST_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +enum FastDtoaMode { + // Computes the shortest representation of the given input. The returned + // result will be the most accurate number of this length. Longer + // representations might be more accurate. + FAST_DTOA_SHORTEST, + // Same as FAST_DTOA_SHORTEST but for single-precision floats. + FAST_DTOA_SHORTEST_SINGLE, + // Computes a representation where the precision (number of digits) is + // given as input. The precision is independent of the decimal point. + FAST_DTOA_PRECISION +}; + +// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not +// include the terminating '\0' character. +static const int kFastDtoaMaximalLength = 17; +// Same for single-precision numbers. +static const int kFastDtoaMaximalSingleLength = 9; + +// Provides a decimal representation of v. +// The result should be interpreted as buffer * 10^(point - length). +// +// Precondition: +// * v must be a strictly positive finite double. +// +// Returns true if it succeeds, otherwise the result can not be trusted. +// There will be *length digits inside the buffer followed by a null terminator. +// If the function returns true and mode equals +// - FAST_DTOA_SHORTEST, then +// the parameter requested_digits is ignored. +// The result satisfies +// v == (double) (buffer * 10^(point - length)). +// The digits in the buffer are the shortest representation possible. E.g. +// if 0.099999999999 and 0.1 represent the same double then "1" is returned +// with point = 0. +// The last digit will be closest to the actual v. That is, even if several +// digits might correctly yield 'v' when read again, the buffer will contain +// the one closest to v. +// - FAST_DTOA_PRECISION, then +// the buffer contains requested_digits digits. +// the difference v - (buffer * 10^(point-length)) is closest to zero for +// all possible representations of requested_digits digits. +// If there are two values that are equally close, then FastDtoa returns +// false. +// For both modes the buffer must be large enough to hold the result. +bool FastDtoa(double d, + FastDtoaMode mode, + int requested_digits, + Vector buffer, + int* length, + int* decimal_point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_FAST_DTOA_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..cf2a59a9805193063997ae621e6e3367d3cc029d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h @@ -0,0 +1,58 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_ +#define DOUBLE_CONVERSION_FIXED_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// Produces digits necessary to print a given number with +// 'fractional_count' digits after the decimal point. +// The buffer must be big enough to hold the result plus one terminating null +// character. +// +// The produced digits might be too short in which case the caller has to fill +// the gaps with '0's. +// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and +// decimal_point = -2. +// Halfway cases are rounded towards +/-Infinity (away from 0). The call +// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0. +// The returned buffer may contain digits that would be truncated from the +// shortest representation of the input. +// +// This method only works for some parameters. If it can't handle the input it +// returns false. The output is null-terminated when the function succeeds. +bool FastFixedDtoa(double v, int fractional_count, + Vector buffer, int* length, int* decimal_point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp new file mode 100644 index 0000000000000000000000000000000000000000..36576cfa91d8c903576d56fb807f594f23a5bf7b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp @@ -0,0 +1,649 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2017 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This file provides support code that is useful for random-number generation + * but not specific to the PCG generation scheme, including: + * - 128-bit int support for platforms where it isn't available natively + * - bit twiddling operations + * - I/O of 128-bit and 8-bit integers + * - Handling the evilness of SeedSeq + * - Support for efficiently producing random numbers less than a given + * bound + */ + +#ifndef PCG_EXTRAS_HPP_INCLUDED +#define PCG_EXTRAS_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __GNUC__ + #include +#endif + +/* + * Abstractions for compiler-specific directives + */ + +#ifdef __GNUC__ + #define PCG_NOINLINE __attribute__((noinline)) +#else + #define PCG_NOINLINE +#endif + +/* + * Some members of the PCG library use 128-bit math. When compiling on 64-bit + * platforms, both GCC and Clang provide 128-bit integer types that are ideal + * for the job. + * + * On 32-bit platforms (or with other compilers), we fall back to a C++ + * class that provides 128-bit unsigned integers instead. It may seem + * like we're reinventing the wheel here, because libraries already exist + * that support large integers, but most existing libraries provide a very + * generic multiprecision code, but here we're operating at a fixed size. + * Also, most other libraries are fairly heavyweight. So we use a direct + * implementation. Sadly, it's much slower than hand-coded assembly or + * direct CPU support. + * + */ +#if __SIZEOF_INT128__ && !PCG_FORCE_EMULATED_128BIT_MATH + namespace arrow_vendored { + namespace pcg_extras { + typedef __uint128_t pcg128_t; + } + } + #define PCG_128BIT_CONSTANT(high,low) \ + ((pcg_extras::pcg128_t(high) << 64) + low) +#else + #include "pcg_uint128.hpp" + namespace arrow_vendored { + namespace pcg_extras { + typedef pcg_extras::uint_x4 pcg128_t; + } + } + #define PCG_128BIT_CONSTANT(high,low) \ + pcg_extras::pcg128_t(high,low) + #define PCG_EMULATED_128BIT_MATH 1 +#endif + + +namespace arrow_vendored { +namespace pcg_extras { + +/* + * We often need to represent a "number of bits". When used normally, these + * numbers are never greater than 128, so an unsigned char is plenty. + * If you're using a nonstandard generator of a larger size, you can set + * PCG_BITCOUNT_T to have it define it as a larger size. (Some compilers + * might produce faster code if you set it to an unsigned int.) + */ + +#ifndef PCG_BITCOUNT_T + typedef uint8_t bitcount_t; +#else + typedef PCG_BITCOUNT_T bitcount_t; +#endif + +/* + * C++ requires us to be able to serialize RNG state by printing or reading + * it from a stream. Because we use 128-bit ints, we also need to be able + * ot print them, so here is code to do so. + * + * This code provides enough functionality to print 128-bit ints in decimal + * and zero-padded in hex. It's not a full-featured implementation. + */ + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, pcg128_t value) +{ + auto desired_base = out.flags() & out.basefield; + bool want_hex = desired_base == out.hex; + + if (want_hex) { + uint64_t highpart = uint64_t(value >> 64); + uint64_t lowpart = uint64_t(value); + auto desired_width = out.width(); + if (desired_width > 16) { + out.width(desired_width - 16); + } + if (highpart != 0 || desired_width > 16) + out << highpart; + CharT oldfill = '\0'; + if (highpart != 0) { + out.width(16); + oldfill = out.fill('0'); + } + auto oldflags = out.setf(decltype(desired_base){}, out.showbase); + out << lowpart; + out.setf(oldflags); + if (highpart != 0) { + out.fill(oldfill); + } + return out; + } + constexpr size_t MAX_CHARS_128BIT = 40; + + char buffer[MAX_CHARS_128BIT]; + char* pos = buffer+sizeof(buffer); + *(--pos) = '\0'; + constexpr auto BASE = pcg128_t(10ULL); + do { + auto div = value / BASE; + auto mod = uint32_t(value - (div * BASE)); + *(--pos) = '0' + char(mod); + value = div; + } while(value != pcg128_t(0ULL)); + return out << pos; +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, pcg128_t& value) +{ + typename std::basic_istream::sentry s(in); + + if (!s) + return in; + + constexpr auto BASE = pcg128_t(10ULL); + pcg128_t current(0ULL); + bool did_nothing = true; + bool overflow = false; + for(;;) { + CharT wide_ch = in.get(); + if (!in.good()) + break; + auto ch = in.narrow(wide_ch, '\0'); + if (ch < '0' || ch > '9') { + in.unget(); + break; + } + did_nothing = false; + pcg128_t digit(uint32_t(ch - '0')); + pcg128_t timesbase = current*BASE; + overflow = overflow || timesbase < current; + current = timesbase + digit; + overflow = overflow || current < digit; + } + + if (did_nothing || overflow) { + in.setstate(std::ios::failbit); + if (overflow) + current = ~pcg128_t(0ULL); + } + + value = current; + + return in; +} + +/* + * Likewise, if people use tiny rngs, we'll be serializing uint8_t. + * If we just used the provided IO operators, they'd read/write chars, + * not ints, so we need to define our own. We *can* redefine this operator + * here because we're in our own namespace. + */ + +template +std::basic_ostream& +operator<<(std::basic_ostream&out, uint8_t value) +{ + return out << uint32_t(value); +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, uint8_t& target) +{ + uint32_t value = 0xdecea5edU; + in >> value; + if (!in && value == 0xdecea5edU) + return in; + if (value > uint8_t(~0)) { + in.setstate(std::ios::failbit); + value = ~0U; + } + target = uint8_t(value); + return in; +} + +/* Unfortunately, the above functions don't get found in preference to the + * built in ones, so we create some more specific overloads that will. + * Ugh. + */ + +inline std::ostream& operator<<(std::ostream& out, uint8_t value) +{ + return pcg_extras::operator<< (out, value); +} + +inline std::istream& operator>>(std::istream& in, uint8_t& value) +{ + return pcg_extras::operator>> (in, value); +} + + + +/* + * Useful bitwise operations. + */ + +/* + * XorShifts are invertable, but they are someting of a pain to invert. + * This function backs them out. It's used by the whacky "inside out" + * generator defined later. + */ + +template +inline itype unxorshift(itype x, bitcount_t bits, bitcount_t shift) +{ + if (2*shift >= bits) { + return x ^ (x >> shift); + } + itype lowmask1 = (itype(1U) << (bits - shift*2)) - 1; + itype highmask1 = ~lowmask1; + itype top1 = x; + itype bottom1 = x & lowmask1; + top1 ^= top1 >> shift; + top1 &= highmask1; + x = top1 | bottom1; + itype lowmask2 = (itype(1U) << (bits - shift)) - 1; + itype bottom2 = x & lowmask2; + bottom2 = unxorshift(bottom2, bits - shift, shift); + bottom2 &= lowmask1; + return top1 | bottom2; +} + +/* + * Rotate left and right. + * + * In ideal world, compilers would spot idiomatic rotate code and convert it + * to a rotate instruction. Of course, opinions vary on what the correct + * idiom is and how to spot it. For clang, sometimes it generates better + * (but still crappy) code if you define PCG_USE_ZEROCHECK_ROTATE_IDIOM. + */ + +template +inline itype rotl(itype value, bitcount_t rot) +{ + constexpr bitcount_t bits = sizeof(itype) * 8; + constexpr bitcount_t mask = bits - 1; +#if PCG_USE_ZEROCHECK_ROTATE_IDIOM + return rot ? (value << rot) | (value >> (bits - rot)) : value; +#else + return (value << rot) | (value >> ((- rot) & mask)); +#endif +} + +template +inline itype rotr(itype value, bitcount_t rot) +{ + constexpr bitcount_t bits = sizeof(itype) * 8; + constexpr bitcount_t mask = bits - 1; +#if PCG_USE_ZEROCHECK_ROTATE_IDIOM + return rot ? (value >> rot) | (value << (bits - rot)) : value; +#else + return (value >> rot) | (value << ((- rot) & mask)); +#endif +} + +/* Unfortunately, both Clang and GCC sometimes perform poorly when it comes + * to properly recognizing idiomatic rotate code, so for we also provide + * assembler directives (enabled with PCG_USE_INLINE_ASM). Boo, hiss. + * (I hope that these compilers get better so that this code can die.) + * + * These overloads will be preferred over the general template code above. + */ +#if PCG_USE_INLINE_ASM && __GNUC__ && (__x86_64__ || __i386__) + +inline uint8_t rotr(uint8_t value, bitcount_t rot) +{ + asm ("rorb %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +inline uint16_t rotr(uint16_t value, bitcount_t rot) +{ + asm ("rorw %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +inline uint32_t rotr(uint32_t value, bitcount_t rot) +{ + asm ("rorl %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +#if __x86_64__ +inline uint64_t rotr(uint64_t value, bitcount_t rot) +{ + asm ("rorq %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} +#endif // __x86_64__ + +#elif defined(_MSC_VER) + // Use MSVC++ bit rotation intrinsics + +#pragma intrinsic(_rotr, _rotr64, _rotr8, _rotr16) + +inline uint8_t rotr(uint8_t value, bitcount_t rot) +{ + return _rotr8(value, rot); +} + +inline uint16_t rotr(uint16_t value, bitcount_t rot) +{ + return _rotr16(value, rot); +} + +inline uint32_t rotr(uint32_t value, bitcount_t rot) +{ + return _rotr(value, rot); +} + +inline uint64_t rotr(uint64_t value, bitcount_t rot) +{ + return _rotr64(value, rot); +} + +#endif // PCG_USE_INLINE_ASM + + +/* + * The C++ SeedSeq concept (modelled by seed_seq) can fill an array of + * 32-bit integers with seed data, but sometimes we want to produce + * larger or smaller integers. + * + * The following code handles this annoyance. + * + * uneven_copy will copy an array of 32-bit ints to an array of larger or + * smaller ints (actually, the code is general it only needing forward + * iterators). The copy is identical to the one that would be performed if + * we just did memcpy on a standard little-endian machine, but works + * regardless of the endian of the machine (or the weirdness of the ints + * involved). + * + * generate_to initializes an array of integers using a SeedSeq + * object. It is given the size as a static constant at compile time and + * tries to avoid memory allocation. If we're filling in 32-bit constants + * we just do it directly. If we need a separate buffer and it's small, + * we allocate it on the stack. Otherwise, we fall back to heap allocation. + * Ugh. + * + * generate_one produces a single value of some integral type using a + * SeedSeq object. + */ + + /* uneven_copy helper, case where destination ints are less than 32 bit. */ + +template +SrcIter uneven_copy_impl( + SrcIter src_first, DestIter dest_first, DestIter dest_last, + std::true_type) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr bitcount_t SRC_SIZE = sizeof(src_t); + constexpr bitcount_t DEST_SIZE = sizeof(dest_t); + constexpr bitcount_t DEST_BITS = DEST_SIZE * 8; + constexpr bitcount_t SCALE = SRC_SIZE / DEST_SIZE; + + size_t count = 0; + src_t value = 0; + + while (dest_first != dest_last) { + if ((count++ % SCALE) == 0) + value = *src_first++; // Get more bits + else + value >>= DEST_BITS; // Move down bits + + *dest_first++ = dest_t(value); // Truncates, ignores high bits. + } + return src_first; +} + + /* uneven_copy helper, case where destination ints are more than 32 bit. */ + +template +SrcIter uneven_copy_impl( + SrcIter src_first, DestIter dest_first, DestIter dest_last, + std::false_type) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr auto SRC_SIZE = sizeof(src_t); + constexpr auto SRC_BITS = SRC_SIZE * 8; + constexpr auto DEST_SIZE = sizeof(dest_t); + constexpr auto SCALE = (DEST_SIZE+SRC_SIZE-1) / SRC_SIZE; + + while (dest_first != dest_last) { + dest_t value(0UL); + unsigned int shift = 0; + + for (size_t i = 0; i < SCALE; ++i) { + value |= dest_t(*src_first++) << shift; + shift += SRC_BITS; + } + + *dest_first++ = value; + } + return src_first; +} + +/* uneven_copy, call the right code for larger vs. smaller */ + +template +inline SrcIter uneven_copy(SrcIter src_first, + DestIter dest_first, DestIter dest_last) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr bool DEST_IS_SMALLER = sizeof(dest_t) < sizeof(src_t); + + return uneven_copy_impl(src_first, dest_first, dest_last, + std::integral_constant{}); +} + +/* generate_to, fill in a fixed-size array of integral type using a SeedSeq + * (actually works for any random-access iterator) + */ + +template +inline void generate_to_impl(SeedSeq&& generator, DestIter dest, + std::true_type) +{ + generator.generate(dest, dest+size); +} + +template +void generate_to_impl(SeedSeq&& generator, DestIter dest, + std::false_type) +{ + typedef typename std::iterator_traits::value_type dest_t; + constexpr auto DEST_SIZE = sizeof(dest_t); + constexpr auto GEN_SIZE = sizeof(uint32_t); + + constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE; + constexpr size_t FROM_ELEMS = + GEN_IS_SMALLER + ? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE) + : (size + (GEN_SIZE / DEST_SIZE) - 1) + / ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER); + // this odd code ^^^^^^^^^^^^^^^^^ is work-around for + // a bug: http://llvm.org/bugs/show_bug.cgi?id=21287 + + if (FROM_ELEMS <= 1024) { + uint32_t buffer[FROM_ELEMS]; + generator.generate(buffer, buffer+FROM_ELEMS); + uneven_copy(buffer, dest, dest+size); + } else { + uint32_t* buffer = static_cast(malloc(GEN_SIZE * FROM_ELEMS)); + generator.generate(buffer, buffer+FROM_ELEMS); + uneven_copy(buffer, dest, dest+size); + free(static_cast(buffer)); + } +} + +template +inline void generate_to(SeedSeq&& generator, DestIter dest) +{ + typedef typename std::iterator_traits::value_type dest_t; + constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t); + + generate_to_impl(std::forward(generator), dest, + std::integral_constant{}); +} + +/* generate_one, produce a value of integral type using a SeedSeq + * (optionally, we can have it produce more than one and pick which one + * we want) + */ + +template +inline UInt generate_one(SeedSeq&& generator) +{ + UInt result[N]; + generate_to(std::forward(generator), result); + return result[i]; +} + +template +auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) + -> typename RngType::result_type +{ + typedef typename RngType::result_type rtype; + rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) + % upper_bound; + for (;;) { + rtype r = rng() - RngType::min(); + if (r >= threshold) + return r % upper_bound; + } +} + +template +void shuffle(Iter from, Iter to, RandType&& rng) +{ + typedef typename std::iterator_traits::difference_type delta_t; + typedef typename std::remove_reference::type::result_type result_t; + auto count = to - from; + while (count > 1) { + delta_t chosen = delta_t(bounded_rand(rng, result_t(count))); + --count; + --to; + using std::swap; + swap(*(from + chosen), *to); + } +} + +/* + * Although std::seed_seq is useful, it isn't everything. Often we want to + * initialize a random-number generator some other way, such as from a random + * device. + * + * Technically, it does not meet the requirements of a SeedSequence because + * it lacks some of the rarely-used member functions (some of which would + * be impossible to provide). However the C++ standard is quite specific + * that actual engines only called the generate method, so it ought not to be + * a problem in practice. + */ + +template +class seed_seq_from { +private: + RngType rng_; + + typedef uint_least32_t result_type; + +public: + template + seed_seq_from(Args&&... args) : + rng_(std::forward(args)...) + { + // Nothing (else) to do... + } + + template + void generate(Iter start, Iter finish) + { + for (auto i = start; i != finish; ++i) + *i = result_type(rng_()); + } + + constexpr size_t size() const + { + return (sizeof(typename RngType::result_type) > sizeof(result_type) + && RngType::max() > ~size_t(0UL)) + ? ~size_t(0UL) + : size_t(RngType::max()); + } +}; + +// Sometimes, when debugging or testing, it's handy to be able print the name +// of a (in human-readable form). This code allows the idiom: +// +// cout << printable_typename() +// +// to print out my_foo_type_t (or its concrete type if it is a synonym) + +#if __cpp_rtti || __GXX_RTTI + +template +struct printable_typename {}; + +template +std::ostream& operator<<(std::ostream& out, printable_typename) { + const char *implementation_typename = typeid(T).name(); +#ifdef __GNUC__ + int status; + char* pretty_name = + abi::__cxa_demangle(implementation_typename, nullptr, nullptr, &status); + if (status == 0) + out << pretty_name; + free(static_cast(pretty_name)); + if (status == 0) + return out; +#endif + out << implementation_typename; + return out; +} + +#endif // __cpp_rtti || __GXX_RTTI + +} // namespace pcg_extras +} // namespace arrow_vendored + +#endif // PCG_EXTRAS_HPP_INCLUDED diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp new file mode 100644 index 0000000000000000000000000000000000000000..e39e61e908a2a30863673fecd0f19e5090b40179 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp @@ -0,0 +1,1954 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2019 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This code provides the reference implementation of the PCG family of + * random number generators. The code is complex because it implements + * + * - several members of the PCG family, specifically members corresponding + * to the output functions: + * - XSH RR (good for 64-bit state, 32-bit output) + * - XSH RS (good for 64-bit state, 32-bit output) + * - XSL RR (good for 128-bit state, 64-bit output) + * - RXS M XS (statistically most powerful generator) + * - XSL RR RR (good for 128-bit state, 128-bit output) + * - and RXS, RXS M, XSH, XSL (mostly for testing) + * - at potentially *arbitrary* bit sizes + * - with four different techniques for random streams (MCG, one-stream + * LCG, settable-stream LCG, unique-stream LCG) + * - and the extended generation schemes allowing arbitrary periods + * - with all features of C++11 random number generation (and more), + * some of which are somewhat painful, including + * - initializing with a SeedSequence which writes 32-bit values + * to memory, even though the state of the generator may not + * use 32-bit values (it might use smaller or larger integers) + * - I/O for RNGs and a prescribed format, which needs to handle + * the issue that 8-bit and 128-bit integers don't have working + * I/O routines (e.g., normally 8-bit = char, not integer) + * - equality and inequality for RNGs + * - and a number of convenience typedefs to mask all the complexity + * + * The code employes a fairly heavy level of abstraction, and has to deal + * with various C++ minutia. If you're looking to learn about how the PCG + * scheme works, you're probably best of starting with one of the other + * codebases (see www.pcg-random.org). But if you're curious about the + * constants for the various output functions used in those other, simpler, + * codebases, this code shows how they are calculated. + * + * On the positive side, at least there are convenience typedefs so that you + * can say + * + * pcg32 myRNG; + * + * rather than: + * + * pcg_detail::engine< + * uint32_t, // Output Type + * uint64_t, // State Type + * pcg_detail::xsh_rr_mixin, true, // Output Func + * pcg_detail::specific_stream, // Stream Kind + * pcg_detail::default_multiplier // LCG Mult + * > myRNG; + * + */ + +#ifndef PCG_RAND_HPP_INCLUDED +#define PCG_RAND_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(disable:4146) +#endif + +#ifdef _MSC_VER + #define PCG_ALWAYS_INLINE __forceinline +#elif __GNUC__ + #define PCG_ALWAYS_INLINE __attribute__((always_inline)) +#else + #define PCG_ALWAYS_INLINE inline +#endif + +/* + * The pcg_extras namespace contains some support code that is likley to + * be useful for a variety of RNGs, including: + * - 128-bit int support for platforms where it isn't available natively + * - bit twiddling operations + * - I/O of 128-bit and 8-bit integers + * - Handling the evilness of SeedSeq + * - Support for efficiently producing random numbers less than a given + * bound + */ + +#include "pcg_extras.hpp" + +namespace arrow_vendored { +namespace pcg_detail { + +using namespace pcg_extras; + +/* + * The LCG generators need some constants to function. This code lets you + * look up the constant by *type*. For example + * + * default_multiplier::multiplier() + * + * gives you the default multipler for 32-bit integers. We use the name + * of the constant and not a generic word like value to allow these classes + * to be used as mixins. + */ + +template +struct default_multiplier { + // Not defined for an arbitrary type +}; + +template +struct default_increment { + // Not defined for an arbitrary type +}; + +#define PCG_DEFINE_CONSTANT(type, what, kind, constant) \ + template <> \ + struct what ## _ ## kind { \ + static constexpr type kind() { \ + return constant; \ + } \ + }; + +PCG_DEFINE_CONSTANT(uint8_t, default, multiplier, 141U) +PCG_DEFINE_CONSTANT(uint8_t, default, increment, 77U) + +PCG_DEFINE_CONSTANT(uint16_t, default, multiplier, 12829U) +PCG_DEFINE_CONSTANT(uint16_t, default, increment, 47989U) + +PCG_DEFINE_CONSTANT(uint32_t, default, multiplier, 747796405U) +PCG_DEFINE_CONSTANT(uint32_t, default, increment, 2891336453U) + +PCG_DEFINE_CONSTANT(uint64_t, default, multiplier, 6364136223846793005ULL) +PCG_DEFINE_CONSTANT(uint64_t, default, increment, 1442695040888963407ULL) + +PCG_DEFINE_CONSTANT(pcg128_t, default, multiplier, + PCG_128BIT_CONSTANT(2549297995355413924ULL,4865540595714422341ULL)) +PCG_DEFINE_CONSTANT(pcg128_t, default, increment, + PCG_128BIT_CONSTANT(6364136223846793005ULL,1442695040888963407ULL)) + +/* Alternative (cheaper) multipliers for 128-bit */ + +template +struct cheap_multiplier : public default_multiplier { + // For most types just use the default. +}; + +template <> +struct cheap_multiplier { + static constexpr uint64_t multiplier() { + return 0xda942042e4dd58b5ULL; + } +}; + + +/* + * Each PCG generator is available in four variants, based on how it applies + * the additive constant for its underlying LCG; the variations are: + * + * single stream - all instances use the same fixed constant, thus + * the RNG always somewhere in same sequence + * mcg - adds zero, resulting in a single stream and reduced + * period + * specific stream - the constant can be changed at any time, selecting + * a different random sequence + * unique stream - the constant is based on the memory address of the + * object, thus every RNG has its own unique sequence + * + * This variation is provided though mixin classes which define a function + * value called increment() that returns the nesessary additive constant. + */ + + + +/* + * unique stream + */ + + +template +class unique_stream { +protected: + static constexpr bool is_mcg = false; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + constexpr itype increment() const { + return itype(reinterpret_cast(this) | 1); + } + + constexpr itype stream() const + { + return increment() >> 1; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return (sizeof(itype) < sizeof(size_t) ? sizeof(itype) + : sizeof(size_t))*8 - 1u; + } + +protected: + constexpr unique_stream() = default; +}; + + +/* + * no stream (mcg) + */ + +template +class no_stream { +protected: + static constexpr bool is_mcg = true; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + static constexpr itype increment() { + return 0; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return 0u; + } + +protected: + constexpr no_stream() = default; +}; + + +/* + * single stream/sequence (oneseq) + */ + +template +class oneseq_stream : public default_increment { +protected: + static constexpr bool is_mcg = false; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + static constexpr itype stream() + { + return default_increment::increment() >> 1; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return 0u; + } + +protected: + constexpr oneseq_stream() = default; +}; + + +/* + * specific stream + */ + +template +class specific_stream { +protected: + static constexpr bool is_mcg = false; + + itype inc_ = default_increment::increment(); + +public: + typedef itype state_type; + typedef itype stream_state; + + constexpr itype increment() const { + return inc_; + } + + itype stream() + { + return inc_ >> 1; + } + + void set_stream(itype specific_seq) + { + inc_ = (specific_seq << 1) | 1; + } + + static constexpr bool can_specify_stream = true; + + static constexpr size_t streams_pow2() + { + return (sizeof(itype)*8) - 1u; + } + +protected: + specific_stream() = default; + + specific_stream(itype specific_seq) + : inc_(itype(specific_seq << 1) | itype(1U)) + { + // Nothing (else) to do. + } +}; + + +/* + * This is where it all comes together. This function joins together three + * mixin classes which define + * - the LCG additive constant (the stream) + * - the LCG multiplier + * - the output function + * in addition, we specify the type of the LCG state, and the result type, + * and whether to use the pre-advance version of the state for the output + * (increasing instruction-level parallelism) or the post-advance version + * (reducing register pressure). + * + * Given the high level of parameterization, the code has to use some + * template-metaprogramming tricks to handle some of the suble variations + * involved. + */ + +template , + typename multiplier_mixin = default_multiplier > +class engine : protected output_mixin, + public stream_mixin, + protected multiplier_mixin { +protected: + itype state_; + + struct can_specify_stream_tag {}; + struct no_specifiable_stream_tag {}; + + using stream_mixin::increment; + using multiplier_mixin::multiplier; + +public: + typedef xtype result_type; + typedef itype state_type; + + static constexpr size_t period_pow2() + { + return sizeof(state_type)*8 - 2*stream_mixin::is_mcg; + } + + // It would be nice to use std::numeric_limits for these, but + // we can't be sure that it'd be defined for the 128-bit types. + + static constexpr result_type min() + { + return result_type(0UL); + } + + static constexpr result_type max() + { + return result_type(~result_type(0UL)); + } + +protected: + itype bump(itype state) + { + return state * multiplier() + increment(); + } + + itype base_generate() + { + return state_ = bump(state_); + } + + itype base_generate0() + { + itype old_state = state_; + state_ = bump(state_); + return old_state; + } + +public: + result_type operator()() + { + if (output_previous) + return this->output(base_generate0()); + else + return this->output(base_generate()); + } + + result_type operator()(result_type upper_bound) + { + return bounded_rand(*this, upper_bound); + } + +protected: + static itype advance(itype state, itype delta, + itype cur_mult, itype cur_plus); + + static itype distance(itype cur_state, itype newstate, itype cur_mult, + itype cur_plus, itype mask = ~itype(0U)); + + itype distance(itype newstate, itype mask = itype(~itype(0U))) const + { + return distance(state_, newstate, multiplier(), increment(), mask); + } + +public: + void advance(itype delta) + { + state_ = advance(state_, delta, this->multiplier(), this->increment()); + } + + void backstep(itype delta) + { + advance(-delta); + } + + void discard(itype delta) + { + advance(delta); + } + + bool wrapped() + { + if (stream_mixin::is_mcg) { + // For MCGs, the low order two bits never change. In this + // implementation, we keep them fixed at 3 to make this test + // easier. + return state_ == 3; + } else { + return state_ == 0; + } + } + + engine(itype state = itype(0xcafef00dd15ea5e5ULL)) + : state_(this->is_mcg ? state|state_type(3U) + : bump(state + this->increment())) + { + // Nothing else to do. + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + engine(itype state, typename sm::stream_state stream_seed) + : stream_mixin(stream_seed), + state_(this->is_mcg ? state|state_type(3U) + : bump(state + this->increment())) + { + // Nothing else to do. + } + + template + engine(SeedSeq&& seedSeq, typename std::enable_if< + !stream_mixin::can_specify_stream + && !std::is_convertible::value + && !std::is_convertible::value, + no_specifiable_stream_tag>::type = {}) + : engine(generate_one(std::forward(seedSeq))) + { + // Nothing else to do. + } + + template + engine(SeedSeq&& seedSeq, typename std::enable_if< + stream_mixin::can_specify_stream + && !std::is_convertible::value + && !std::is_convertible::value, + can_specify_stream_tag>::type = {}) + { + itype seeddata[2]; + generate_to<2>(std::forward(seedSeq), seeddata); + seed(seeddata[1], seeddata[0]); + } + + + template + void seed(Args&&... args) + { + new (this) engine(std::forward(args)...); + } + + template + friend bool operator==(const engine&, + const engine&); + + template + friend itype1 operator-(const engine&, + const engine&); + + template + friend std::basic_ostream& + operator<<(std::basic_ostream& out, + const engine&); + + template + friend std::basic_istream& + operator>>(std::basic_istream& in, + engine& rng); +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, + const engine& rng) +{ + using pcg_extras::operator<<; + + auto orig_flags = out.flags(std::ios_base::dec | std::ios_base::left); + auto space = out.widen(' '); + auto orig_fill = out.fill(); + + out << rng.multiplier() << space + << rng.increment() << space + << rng.state_; + + out.flags(orig_flags); + out.fill(orig_fill); + return out; +} + + +template +std::basic_istream& +operator>>(std::basic_istream& in, + engine& rng) +{ + using pcg_extras::operator>>; + + auto orig_flags = in.flags(std::ios_base::dec | std::ios_base::skipws); + + itype multiplier, increment, state; + in >> multiplier >> increment >> state; + + if (!in.fail()) { + bool good = true; + if (multiplier != rng.multiplier()) { + good = false; + } else if (rng.can_specify_stream) { + rng.set_stream(increment >> 1); + } else if (increment != rng.increment()) { + good = false; + } + if (good) { + rng.state_ = state; + } else { + in.clear(std::ios::failbit); + } + } + + in.flags(orig_flags); + return in; +} + + +template +itype engine::advance( + itype state, itype delta, itype cur_mult, itype cur_plus) +{ + // The method used here is based on Brown, "Random Number Generation + // with Arbitrary Stride,", Transactions of the American Nuclear + // Society (Nov. 1994). The algorithm is very similar to fast + // exponentiation. + // + // Even though delta is an unsigned integer, we can pass a + // signed integer to go backwards, it just goes "the long way round". + + constexpr itype ZERO = 0u; // itype may be a non-trivial types, so + constexpr itype ONE = 1u; // we define some ugly constants. + itype acc_mult = 1; + itype acc_plus = 0; + while (delta > ZERO) { + if (delta & ONE) { + acc_mult *= cur_mult; + acc_plus = acc_plus*cur_mult + cur_plus; + } + cur_plus = (cur_mult+ONE)*cur_plus; + cur_mult *= cur_mult; + delta >>= 1; + } + return acc_mult * state + acc_plus; +} + +template +itype engine::distance( + itype cur_state, itype newstate, itype cur_mult, itype cur_plus, itype mask) +{ + constexpr itype ONE = 1u; // itype could be weird, so use constant + bool is_mcg = cur_plus == itype(0); + itype the_bit = is_mcg ? itype(4u) : itype(1u); + itype distance = 0u; + while ((cur_state & mask) != (newstate & mask)) { + if ((cur_state & the_bit) != (newstate & the_bit)) { + cur_state = cur_state * cur_mult + cur_plus; + distance |= the_bit; + } + assert((cur_state & the_bit) == (newstate & the_bit)); + the_bit <<= 1; + cur_plus = (cur_mult+ONE)*cur_plus; + cur_mult *= cur_mult; + } + return is_mcg ? distance >> 2 : distance; +} + +template +itype operator-(const engine& lhs, + const engine& rhs) +{ + static_assert( + std::is_same::value && + std::is_same::value, + "Incomparable generators"); + if (lhs.increment() == rhs.increment()) { + return rhs.distance(lhs.state_); + } else { + constexpr itype ONE = 1u; + itype lhs_diff = lhs.increment() + (lhs.multiplier()-ONE) * lhs.state_; + itype rhs_diff = rhs.increment() + (rhs.multiplier()-ONE) * rhs.state_; + if ((lhs_diff & itype(3u)) != (rhs_diff & itype(3u))) { + rhs_diff = -rhs_diff; + } + return rhs.distance(rhs_diff, lhs_diff, rhs.multiplier(), itype(0u)); + } +} + + +template +bool operator==(const engine& lhs, + const engine& rhs) +{ + return (lhs.multiplier() == rhs.multiplier()) + && (lhs.increment() == rhs.increment()) + && (lhs.state_ == rhs.state_); +} + +template +inline bool operator!=(const engine& lhs, + const engine& rhs) +{ + return !operator==(lhs,rhs); +} + + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using oneseq_base = engine, output_previous, + oneseq_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using unique_base = engine, output_previous, + unique_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using setseq_base = engine, output_previous, + specific_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using mcg_base = engine, output_previous, + no_stream, + multiplier_mixin >; + +/* + * OUTPUT FUNCTIONS. + * + * These are the core of the PCG generation scheme. They specify how to + * turn the base LCG's internal state into the output value of the final + * generator. + * + * They're implemented as mixin classes. + * + * All of the classes have code that is written to allow it to be applied + * at *arbitrary* bit sizes, although in practice they'll only be used at + * standard sizes supported by C++. + */ + +/* + * XSH RS -- high xorshift, followed by a random shift + * + * Fast. A good performer. + */ + +template +struct xsh_rs_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t opbits = + sparebits-5 >= 64 ? 5 + : sparebits-4 >= 32 ? 4 + : sparebits-3 >= 16 ? 3 + : sparebits-2 >= 4 ? 2 + : sparebits-1 >= 1 ? 1 + : 0; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t maxrandshift = mask; + constexpr bitcount_t topspare = opbits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = topspare + (xtypebits+maxrandshift)/2; + bitcount_t rshift = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> xshift; + xtype result = xtype(internal >> (bottomspare - maxrandshift + rshift)); + return result; + } +}; + +/* + * XSH RR -- high xorshift, followed by a random rotate + * + * Fast. A good performer. Slightly better statistically than XSH RS. + */ + +template +struct xsh_rr_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype)*8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t wantedopbits = + xtypebits >= 128 ? 7 + : xtypebits >= 64 ? 6 + : xtypebits >= 32 ? 5 + : xtypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = + sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = opbits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits)/2; + bitcount_t rot = opbits ? bitcount_t(internal >> (bits - opbits)) & mask + : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + xtype result = xtype(internal >> bottomspare); + result = rotr(result, amprot); + return result; + } +}; + +/* + * RXS -- random xorshift + */ + +template +struct rxs_mixin { +static xtype output_rxs(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype)*8); + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t extrashift = (xtypebits - shift)/2; + bitcount_t rshift = shift > 64+8 ? (internal >> (bits - 6)) & 63 + : shift > 32+4 ? (internal >> (bits - 5)) & 31 + : shift > 16+2 ? (internal >> (bits - 4)) & 15 + : shift > 8+1 ? (internal >> (bits - 3)) & 7 + : shift > 4+1 ? (internal >> (bits - 2)) & 3 + : shift > 2+1 ? (internal >> (bits - 1)) & 1 + : 0; + internal ^= internal >> (shift + extrashift - rshift); + xtype result = internal >> rshift; + return result; + } +}; + +/* + * RXS M XS -- random xorshift, mcg multiply, fixed xorshift + * + * The most statistically powerful generator, but all those steps + * make it slower than some of the others. We give it the rottenest jobs. + * + * Because it's usually used in contexts where the state type and the + * result type are the same, it is a permutation and is thus invertable. + * We thus provide a function to invert it. This function is used to + * for the "inside out" generator used by the extended generator. + */ + +/* Defined type-based concepts for the multiplication step. They're actually + * all derived by truncating the 128-bit, which was computed to be a good + * "universal" constant. + */ + +template +struct mcg_multiplier { + // Not defined for an arbitrary type +}; + +template +struct mcg_unmultiplier { + // Not defined for an arbitrary type +}; + +PCG_DEFINE_CONSTANT(uint8_t, mcg, multiplier, 217U) +PCG_DEFINE_CONSTANT(uint8_t, mcg, unmultiplier, 105U) + +PCG_DEFINE_CONSTANT(uint16_t, mcg, multiplier, 62169U) +PCG_DEFINE_CONSTANT(uint16_t, mcg, unmultiplier, 28009U) + +PCG_DEFINE_CONSTANT(uint32_t, mcg, multiplier, 277803737U) +PCG_DEFINE_CONSTANT(uint32_t, mcg, unmultiplier, 2897767785U) + +PCG_DEFINE_CONSTANT(uint64_t, mcg, multiplier, 12605985483714917081ULL) +PCG_DEFINE_CONSTANT(uint64_t, mcg, unmultiplier, 15009553638781119849ULL) + +PCG_DEFINE_CONSTANT(pcg128_t, mcg, multiplier, + PCG_128BIT_CONSTANT(17766728186571221404ULL, 12605985483714917081ULL)) +PCG_DEFINE_CONSTANT(pcg128_t, mcg, unmultiplier, + PCG_128BIT_CONSTANT(14422606686972528997ULL, 15009553638781119849ULL)) + + +template +struct rxs_m_xs_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = xtypebits >= 128 ? 6 + : xtypebits >= 64 ? 5 + : xtypebits >= 32 ? 4 + : xtypebits >= 16 ? 3 + : 2; + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t mask = (1 << opbits) - 1; + bitcount_t rshift = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> (opbits + rshift); + internal *= mcg_multiplier::multiplier(); + xtype result = internal >> shift; + result ^= result >> ((2U*xtypebits+2U)/3U); + return result; + } + + static itype unoutput(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = bits >= 128 ? 6 + : bits >= 64 ? 5 + : bits >= 32 ? 4 + : bits >= 16 ? 3 + : 2; + constexpr bitcount_t mask = (1 << opbits) - 1; + + internal = unxorshift(internal, bits, (2U*bits+2U)/3U); + + internal *= mcg_unmultiplier::unmultiplier(); + + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + internal = unxorshift(internal, bits, opbits + rshift); + + return internal; + } +}; + + +/* + * RXS M -- random xorshift, mcg multiply + */ + +template +struct rxs_m_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = xtypebits >= 128 ? 6 + : xtypebits >= 64 ? 5 + : xtypebits >= 32 ? 4 + : xtypebits >= 16 ? 3 + : 2; + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t mask = (1 << opbits) - 1; + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> (opbits + rshift); + internal *= mcg_multiplier::multiplier(); + xtype result = internal >> shift; + return result; + } +}; + + +/* + * DXSM -- double xorshift multiply + * + * This is a new, more powerful output permutation (added in 2019). It's + * a more comprehensive scrambling than RXS M, but runs faster on 128-bit + * types. Although primarily intended for use at large sizes, also works + * at smaller sizes as well. + * + * This permutation is similar to xorshift multiply hash functions, except + * that one of the multipliers is the LCG multiplier (to avoid needing to + * have a second constant) and the other is based on the low-order bits. + * This latter aspect means that the scrambling applied to the high bits + * depends on the low bits, and makes it (to my eye) impractical to back + * out the permutation without having the low-order bits. + */ + +template +struct dxsm_mixin { + inline xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t itypebits = bitcount_t(sizeof(itype) * 8); + static_assert(xtypebits <= itypebits/2, + "Output type must be half the size of the state type."); + + xtype hi = xtype(internal >> (itypebits - xtypebits)); + xtype lo = xtype(internal); + + lo |= 1; + hi ^= hi >> (xtypebits/2); + hi *= xtype(cheap_multiplier::multiplier()); + hi ^= hi >> (3*(xtypebits/4)); + hi *= lo; + return hi; + } +}; + + +/* + * XSL RR -- fixed xorshift (to low bits), random rotate + * + * Useful for 128-bit types that are split across two CPU registers. + */ + +template +struct xsl_rr_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t wantedopbits = xtypebits >= 128 ? 7 + : xtypebits >= 64 ? 6 + : xtypebits >= 32 ? 5 + : xtypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + bitcount_t rot = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + xtype result = xtype(internal >> bottomspare); + result = rotr(result, amprot); + return result; + } +}; + + +/* + * XSL RR RR -- fixed xorshift (to low bits), random rotate (both parts) + * + * Useful for 128-bit types that are split across two CPU registers. + * If you really want an invertable 128-bit RNG, I guess this is the one. + */ + +template struct halfsize_trait {}; +template <> struct halfsize_trait { typedef uint64_t type; }; +template <> struct halfsize_trait { typedef uint32_t type; }; +template <> struct halfsize_trait { typedef uint16_t type; }; +template <> struct halfsize_trait { typedef uint8_t type; }; + +template +struct xsl_rr_rr_mixin { + typedef typename halfsize_trait::type htype; + + static itype output(itype internal) + { + constexpr bitcount_t htypebits = bitcount_t(sizeof(htype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - htypebits; + constexpr bitcount_t wantedopbits = htypebits >= 128 ? 7 + : htypebits >= 64 ? 6 + : htypebits >= 32 ? 5 + : htypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t xshift = (topspare + htypebits) / 2; + + bitcount_t rot = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + htype lowbits = htype(internal); + lowbits = rotr(lowbits, amprot); + htype highbits = htype(internal >> topspare); + bitcount_t rot2 = lowbits & mask; + bitcount_t amprot2 = (rot2 << amplifier) & mask; + highbits = rotr(highbits, amprot2); + return (itype(highbits) << topspare) ^ itype(lowbits); + } +}; + + +/* + * XSH -- fixed xorshift (to high bits) + * + * You shouldn't use this at 64-bits or less. + */ + +template +struct xsh_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t topspare = 0; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + internal ^= internal >> xshift; + xtype result = internal >> bottomspare; + return result; + } +}; + +/* + * XSL -- fixed xorshift (to low bits) + * + * You shouldn't use this at 64-bits or less. + */ + +template +struct xsl_mixin { + inline xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + internal ^= internal >> xshift; + xtype result = internal >> bottomspare; + return result; + } +}; + + +/* ---- End of Output Functions ---- */ + + +template +struct inside_out : private baseclass { + inside_out() = delete; + + typedef typename baseclass::result_type result_type; + typedef typename baseclass::state_type state_type; + static_assert(sizeof(result_type) == sizeof(state_type), + "Require a RNG whose output function is a permutation"); + + static bool external_step(result_type& randval, size_t i) + { + state_type state = baseclass::unoutput(randval); + state = state * baseclass::multiplier() + baseclass::increment() + + state_type(i*2); + result_type result = baseclass::output(state); + randval = result; + state_type zero = + baseclass::is_mcg ? state & state_type(3U) : state_type(0U); + return result == zero; + } + + static bool external_advance(result_type& randval, size_t i, + result_type delta, bool forwards = true) + { + state_type state = baseclass::unoutput(randval); + state_type mult = baseclass::multiplier(); + state_type inc = baseclass::increment() + state_type(i*2); + state_type zero = + baseclass::is_mcg ? state & state_type(3U) : state_type(0U); + state_type dist_to_zero = baseclass::distance(state, zero, mult, inc); + bool crosses_zero = + forwards ? dist_to_zero <= delta + : (-dist_to_zero) <= delta; + if (!forwards) + delta = -delta; + state = baseclass::advance(state, delta, mult, inc); + randval = baseclass::output(state); + return crosses_zero; + } +}; + + +template +class extended : public baseclass { +public: + typedef typename baseclass::state_type state_type; + typedef typename baseclass::result_type result_type; + typedef inside_out insideout; + +private: + static constexpr bitcount_t rtypebits = sizeof(result_type)*8; + static constexpr bitcount_t stypebits = sizeof(state_type)*8; + + static constexpr bitcount_t tick_limit_pow2 = 64U; + + static constexpr size_t table_size = 1UL << table_pow2; + static constexpr size_t table_shift = stypebits - table_pow2; + static constexpr state_type table_mask = + (state_type(1U) << table_pow2) - state_type(1U); + + static constexpr bool may_tick = + (advance_pow2 < stypebits) && (advance_pow2 < tick_limit_pow2); + static constexpr size_t tick_shift = stypebits - advance_pow2; + static constexpr state_type tick_mask = + may_tick ? state_type( + (uint64_t(1) << (advance_pow2*may_tick)) - 1) + // ^-- stupidity to appease GCC warnings + : ~state_type(0U); + + static constexpr bool may_tock = stypebits < tick_limit_pow2; + + result_type data_[table_size]; + + PCG_NOINLINE void advance_table(); + + PCG_NOINLINE void advance_table(state_type delta, bool isForwards = true); + + result_type& get_extended_value() + { + state_type state = this->state_; + if (kdd && baseclass::is_mcg) { + // The low order bits of an MCG are constant, so drop them. + state >>= 2; + } + size_t index = kdd ? state & table_mask + : state >> table_shift; + + if (may_tick) { + bool tick = kdd ? (state & tick_mask) == state_type(0u) + : (state >> tick_shift) == state_type(0u); + if (tick) + advance_table(); + } + if (may_tock) { + bool tock = state == state_type(0u); + if (tock) + advance_table(); + } + return data_[index]; + } + +public: + static constexpr size_t period_pow2() + { + return baseclass::period_pow2() + table_size*extvalclass::period_pow2(); + } + + PCG_ALWAYS_INLINE result_type operator()() + { + result_type rhs = get_extended_value(); + result_type lhs = this->baseclass::operator()(); + return lhs ^ rhs; + } + + result_type operator()(result_type upper_bound) + { + return bounded_rand(*this, upper_bound); + } + + void set(result_type wanted) + { + result_type& rhs = get_extended_value(); + result_type lhs = this->baseclass::operator()(); + rhs = lhs ^ wanted; + } + + void advance(state_type distance, bool forwards = true); + + void backstep(state_type distance) + { + advance(distance, false); + } + + extended(const result_type* data) + : baseclass() + { + datainit(data); + } + + extended(const result_type* data, state_type seed) + : baseclass(seed) + { + datainit(data); + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + extended(const result_type* data, state_type seed, + typename bc::stream_state stream_seed) + : baseclass(seed, stream_seed) + { + datainit(data); + } + + extended() + : baseclass() + { + selfinit(); + } + + extended(state_type seed) + : baseclass(seed) + { + selfinit(); + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + extended(state_type seed, typename bc::stream_state stream_seed) + : baseclass(seed, stream_seed) + { + selfinit(); + } + +private: + void selfinit(); + void datainit(const result_type* data); + +public: + + template::value + && !std::is_convertible::value>::type> + extended(SeedSeq&& seedSeq) + : baseclass(seedSeq) + { + generate_to(seedSeq, data_); + } + + template + void seed(Args&&... args) + { + new (this) extended(std::forward(args)...); + } + + template + friend bool operator==(const extended&, + const extended&); + + template + friend std::basic_ostream& + operator<<(std::basic_ostream& out, + const extended&); + + template + friend std::basic_istream& + operator>>(std::basic_istream& in, + extended&); + +}; + + +template +void extended::datainit( + const result_type* data) +{ + for (size_t i = 0; i < table_size; ++i) + data_[i] = data[i]; +} + +template +void extended::selfinit() +{ + // We need to fill the extended table with something, and we have + // very little provided data, so we use the base generator to + // produce values. Although not ideal (use a seed sequence, folks!), + // unexpected correlations are mitigated by + // - using XOR differences rather than the number directly + // - the way the table is accessed, its values *won't* be accessed + // in the same order the were written. + // - any strange correlations would only be apparent if we + // were to backstep the generator so that the base generator + // was generating the same values again + result_type lhs = baseclass::operator()(); + result_type rhs = baseclass::operator()(); + result_type xdiff = lhs - rhs; + for (size_t i = 0; i < table_size; ++i) { + data_[i] = baseclass::operator()() ^ xdiff; + } +} + +template +bool operator==(const extended& lhs, + const extended& rhs) +{ + auto& base_lhs = static_cast(lhs); + auto& base_rhs = static_cast(rhs); + return base_lhs == base_rhs + && std::equal( + std::begin(lhs.data_), std::end(lhs.data_), + std::begin(rhs.data_) + ); +} + +template +inline bool operator!=(const extended& lhs, + const extended& rhs) +{ + return !operator==(lhs, rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, + const extended& rng) +{ + using pcg_extras::operator<<; + + auto orig_flags = out.flags(std::ios_base::dec | std::ios_base::left); + auto space = out.widen(' '); + auto orig_fill = out.fill(); + + out << rng.multiplier() << space + << rng.increment() << space + << rng.state_; + + for (const auto& datum : rng.data_) + out << space << datum; + + out.flags(orig_flags); + out.fill(orig_fill); + return out; +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, + extended& rng) +{ + extended new_rng; + auto& base_rng = static_cast(new_rng); + in >> base_rng; + + if (in.fail()) + return in; + + using pcg_extras::operator>>; + + auto orig_flags = in.flags(std::ios_base::dec | std::ios_base::skipws); + + for (auto& datum : new_rng.data_) { + in >> datum; + if (in.fail()) + goto bail; + } + + rng = new_rng; + +bail: + in.flags(orig_flags); + return in; +} + + + +template +void +extended::advance_table() +{ + bool carry = false; + for (size_t i = 0; i < table_size; ++i) { + if (carry) { + carry = insideout::external_step(data_[i],i+1); + } + bool carry2 = insideout::external_step(data_[i],i+1); + carry = carry || carry2; + } +} + +template +void +extended::advance_table( + state_type delta, bool isForwards) +{ + typedef typename baseclass::state_type base_state_t; + typedef typename extvalclass::state_type ext_state_t; + constexpr bitcount_t basebits = sizeof(base_state_t)*8; + constexpr bitcount_t extbits = sizeof(ext_state_t)*8; + static_assert(basebits <= extbits || advance_pow2 > 0, + "Current implementation might overflow its carry"); + + base_state_t carry = 0; + for (size_t i = 0; i < table_size; ++i) { + base_state_t total_delta = carry + delta; + ext_state_t trunc_delta = ext_state_t(total_delta); + if (basebits > extbits) { + carry = total_delta >> extbits; + } else { + carry = 0; + } + carry += + insideout::external_advance(data_[i],i+1, trunc_delta, isForwards); + } +} + +template +void extended::advance( + state_type distance, bool forwards) +{ + static_assert(kdd, + "Efficient advance is too hard for non-kdd extension. " + "For a weak advance, cast to base class"); + state_type zero = + baseclass::is_mcg ? this->state_ & state_type(3U) : state_type(0U); + if (may_tick) { + state_type ticks = distance >> (advance_pow2*may_tick); + // ^-- stupidity to appease GCC + // warnings + state_type adv_mask = + baseclass::is_mcg ? tick_mask << 2 : tick_mask; + state_type next_advance_distance = this->distance(zero, adv_mask); + if (!forwards) + next_advance_distance = (-next_advance_distance) & tick_mask; + if (next_advance_distance < (distance & tick_mask)) { + ++ticks; + } + if (ticks) + advance_table(ticks, forwards); + } + if (forwards) { + if (may_tock && this->distance(zero) <= distance) + advance_table(); + baseclass::advance(distance); + } else { + if (may_tock && -(this->distance(zero)) <= distance) + advance_table(state_type(1U), false); + baseclass::advance(-distance); + } +} + +} // namespace pcg_detail + +namespace pcg_engines { + +using namespace pcg_detail; + +/* Predefined types for XSH RS */ + +typedef oneseq_base oneseq_xsh_rs_16_8; +typedef oneseq_base oneseq_xsh_rs_32_16; +typedef oneseq_base oneseq_xsh_rs_64_32; +typedef oneseq_base oneseq_xsh_rs_128_64; +typedef oneseq_base + cm_oneseq_xsh_rs_128_64; + +typedef unique_base unique_xsh_rs_16_8; +typedef unique_base unique_xsh_rs_32_16; +typedef unique_base unique_xsh_rs_64_32; +typedef unique_base unique_xsh_rs_128_64; +typedef unique_base + cm_unique_xsh_rs_128_64; + +typedef setseq_base setseq_xsh_rs_16_8; +typedef setseq_base setseq_xsh_rs_32_16; +typedef setseq_base setseq_xsh_rs_64_32; +typedef setseq_base setseq_xsh_rs_128_64; +typedef setseq_base + cm_setseq_xsh_rs_128_64; + +typedef mcg_base mcg_xsh_rs_16_8; +typedef mcg_base mcg_xsh_rs_32_16; +typedef mcg_base mcg_xsh_rs_64_32; +typedef mcg_base mcg_xsh_rs_128_64; +typedef mcg_base + cm_mcg_xsh_rs_128_64; + +/* Predefined types for XSH RR */ + +typedef oneseq_base oneseq_xsh_rr_16_8; +typedef oneseq_base oneseq_xsh_rr_32_16; +typedef oneseq_base oneseq_xsh_rr_64_32; +typedef oneseq_base oneseq_xsh_rr_128_64; +typedef oneseq_base + cm_oneseq_xsh_rr_128_64; + +typedef unique_base unique_xsh_rr_16_8; +typedef unique_base unique_xsh_rr_32_16; +typedef unique_base unique_xsh_rr_64_32; +typedef unique_base unique_xsh_rr_128_64; +typedef unique_base + cm_unique_xsh_rr_128_64; + +typedef setseq_base setseq_xsh_rr_16_8; +typedef setseq_base setseq_xsh_rr_32_16; +typedef setseq_base setseq_xsh_rr_64_32; +typedef setseq_base setseq_xsh_rr_128_64; +typedef setseq_base + cm_setseq_xsh_rr_128_64; + +typedef mcg_base mcg_xsh_rr_16_8; +typedef mcg_base mcg_xsh_rr_32_16; +typedef mcg_base mcg_xsh_rr_64_32; +typedef mcg_base mcg_xsh_rr_128_64; +typedef mcg_base + cm_mcg_xsh_rr_128_64; + + +/* Predefined types for RXS M XS */ + +typedef oneseq_base oneseq_rxs_m_xs_8_8; +typedef oneseq_base oneseq_rxs_m_xs_16_16; +typedef oneseq_base oneseq_rxs_m_xs_32_32; +typedef oneseq_base oneseq_rxs_m_xs_64_64; +typedef oneseq_base + oneseq_rxs_m_xs_128_128; +typedef oneseq_base + cm_oneseq_rxs_m_xs_128_128; + +typedef unique_base unique_rxs_m_xs_8_8; +typedef unique_base unique_rxs_m_xs_16_16; +typedef unique_base unique_rxs_m_xs_32_32; +typedef unique_base unique_rxs_m_xs_64_64; +typedef unique_base unique_rxs_m_xs_128_128; +typedef unique_base + cm_unique_rxs_m_xs_128_128; + +typedef setseq_base setseq_rxs_m_xs_8_8; +typedef setseq_base setseq_rxs_m_xs_16_16; +typedef setseq_base setseq_rxs_m_xs_32_32; +typedef setseq_base setseq_rxs_m_xs_64_64; +typedef setseq_base setseq_rxs_m_xs_128_128; +typedef setseq_base + cm_setseq_rxs_m_xs_128_128; + + // MCG versions don't make sense here, so aren't defined. + +/* Predefined types for RXS M */ + +typedef oneseq_base oneseq_rxs_m_16_8; +typedef oneseq_base oneseq_rxs_m_32_16; +typedef oneseq_base oneseq_rxs_m_64_32; +typedef oneseq_base oneseq_rxs_m_128_64; +typedef oneseq_base + cm_oneseq_rxs_m_128_64; + +typedef unique_base unique_rxs_m_16_8; +typedef unique_base unique_rxs_m_32_16; +typedef unique_base unique_rxs_m_64_32; +typedef unique_base unique_rxs_m_128_64; +typedef unique_base + cm_unique_rxs_m_128_64; + +typedef setseq_base setseq_rxs_m_16_8; +typedef setseq_base setseq_rxs_m_32_16; +typedef setseq_base setseq_rxs_m_64_32; +typedef setseq_base setseq_rxs_m_128_64; +typedef setseq_base + cm_setseq_rxs_m_128_64; + +typedef mcg_base mcg_rxs_m_16_8; +typedef mcg_base mcg_rxs_m_32_16; +typedef mcg_base mcg_rxs_m_64_32; +typedef mcg_base mcg_rxs_m_128_64; +typedef mcg_base + cm_mcg_rxs_m_128_64; + +/* Predefined types for DXSM */ + +typedef oneseq_base oneseq_dxsm_16_8; +typedef oneseq_base oneseq_dxsm_32_16; +typedef oneseq_base oneseq_dxsm_64_32; +typedef oneseq_base oneseq_dxsm_128_64; +typedef oneseq_base + cm_oneseq_dxsm_128_64; + +typedef unique_base unique_dxsm_16_8; +typedef unique_base unique_dxsm_32_16; +typedef unique_base unique_dxsm_64_32; +typedef unique_base unique_dxsm_128_64; +typedef unique_base + cm_unique_dxsm_128_64; + +typedef setseq_base setseq_dxsm_16_8; +typedef setseq_base setseq_dxsm_32_16; +typedef setseq_base setseq_dxsm_64_32; +typedef setseq_base setseq_dxsm_128_64; +typedef setseq_base + cm_setseq_dxsm_128_64; + +typedef mcg_base mcg_dxsm_16_8; +typedef mcg_base mcg_dxsm_32_16; +typedef mcg_base mcg_dxsm_64_32; +typedef mcg_base mcg_dxsm_128_64; +typedef mcg_base + cm_mcg_dxsm_128_64; + +/* Predefined types for XSL RR (only defined for "large" types) */ + +typedef oneseq_base oneseq_xsl_rr_64_32; +typedef oneseq_base oneseq_xsl_rr_128_64; +typedef oneseq_base + cm_oneseq_xsl_rr_128_64; + +typedef unique_base unique_xsl_rr_64_32; +typedef unique_base unique_xsl_rr_128_64; +typedef unique_base + cm_unique_xsl_rr_128_64; + +typedef setseq_base setseq_xsl_rr_64_32; +typedef setseq_base setseq_xsl_rr_128_64; +typedef setseq_base + cm_setseq_xsl_rr_128_64; + +typedef mcg_base mcg_xsl_rr_64_32; +typedef mcg_base mcg_xsl_rr_128_64; +typedef mcg_base + cm_mcg_xsl_rr_128_64; + + +/* Predefined types for XSL RR RR (only defined for "large" types) */ + +typedef oneseq_base + oneseq_xsl_rr_rr_64_64; +typedef oneseq_base + oneseq_xsl_rr_rr_128_128; +typedef oneseq_base + cm_oneseq_xsl_rr_rr_128_128; + +typedef unique_base + unique_xsl_rr_rr_64_64; +typedef unique_base + unique_xsl_rr_rr_128_128; +typedef unique_base + cm_unique_xsl_rr_rr_128_128; + +typedef setseq_base + setseq_xsl_rr_rr_64_64; +typedef setseq_base + setseq_xsl_rr_rr_128_128; +typedef setseq_base + cm_setseq_xsl_rr_rr_128_128; + + // MCG versions don't make sense here, so aren't defined. + +/* Extended generators */ + +template +using ext_std8 = extended; + +template +using ext_std16 = extended; + +template +using ext_std32 = extended; + +template +using ext_std64 = extended; + + +template +using ext_oneseq_rxs_m_xs_32_32 = + ext_std32; + +template +using ext_mcg_xsh_rs_64_32 = + ext_std32; + +template +using ext_oneseq_xsh_rs_64_32 = + ext_std32; + +template +using ext_setseq_xsh_rr_64_32 = + ext_std32; + +template +using ext_mcg_xsl_rr_128_64 = + ext_std64; + +template +using ext_oneseq_xsl_rr_128_64 = + ext_std64; + +template +using ext_setseq_xsl_rr_128_64 = + ext_std64; + +} // namespace pcg_engines + +typedef pcg_engines::setseq_xsh_rr_64_32 pcg32; +typedef pcg_engines::oneseq_xsh_rr_64_32 pcg32_oneseq; +typedef pcg_engines::unique_xsh_rr_64_32 pcg32_unique; +typedef pcg_engines::mcg_xsh_rs_64_32 pcg32_fast; + +typedef pcg_engines::setseq_xsl_rr_128_64 pcg64; +typedef pcg_engines::oneseq_xsl_rr_128_64 pcg64_oneseq; +typedef pcg_engines::unique_xsl_rr_128_64 pcg64_unique; +typedef pcg_engines::mcg_xsl_rr_128_64 pcg64_fast; + +typedef pcg_engines::setseq_rxs_m_xs_8_8 pcg8_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_16_16 pcg16_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_32_32 pcg32_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_64_64 pcg64_once_insecure; +typedef pcg_engines::setseq_xsl_rr_rr_128_128 pcg128_once_insecure; + +typedef pcg_engines::oneseq_rxs_m_xs_8_8 pcg8_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_16_16 pcg16_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_32_32 pcg32_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_64_64 pcg64_oneseq_once_insecure; +typedef pcg_engines::oneseq_xsl_rr_rr_128_128 pcg128_oneseq_once_insecure; + + +// These two extended RNGs provide two-dimensionally equidistributed +// 32-bit generators. pcg32_k2_fast occupies the same space as pcg64, +// and can be called twice to generate 64 bits, but does not required +// 128-bit math; on 32-bit systems, it's faster than pcg64 as well. + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<1,16,true> pcg32_k2; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<1,32,true> pcg32_k2_fast; + +// These eight extended RNGs have about as much state as arc4random +// +// - the k variants are k-dimensionally equidistributed +// - the c variants offer better crypographic security +// +// (just how good the cryptographic security is an open question) + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<6,16,true> pcg32_k64; +typedef pcg_engines::ext_mcg_xsh_rs_64_32<6,32,true> pcg32_k64_oneseq; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<6,32,true> pcg32_k64_fast; + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<6,16,false> pcg32_c64; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<6,32,false> pcg32_c64_oneseq; +typedef pcg_engines::ext_mcg_xsh_rs_64_32<6,32,false> pcg32_c64_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<5,16,true> pcg64_k32; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<5,128,true> pcg64_k32_oneseq; +typedef pcg_engines::ext_mcg_xsl_rr_128_64<5,128,true> pcg64_k32_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<5,16,false> pcg64_c32; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<5,128,false> pcg64_c32_oneseq; +typedef pcg_engines::ext_mcg_xsl_rr_128_64<5,128,false> pcg64_c32_fast; + +// These eight extended RNGs have more state than the Mersenne twister +// +// - the k variants are k-dimensionally equidistributed +// - the c variants offer better crypographic security +// +// (just how good the cryptographic security is an open question) + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<10,16,true> pcg32_k1024; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<10,32,true> pcg32_k1024_fast; + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<10,16,false> pcg32_c1024; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<10,32,false> pcg32_c1024_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<10,16,true> pcg64_k1024; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<10,128,true> pcg64_k1024_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<10,16,false> pcg64_c1024; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<10,128,false> pcg64_c1024_fast; + +// These generators have an insanely huge period (2^524352), and is suitable +// for silly party tricks, such as dumping out 64 KB ZIP files at an arbitrary +// point in the future. [Actually, over the full period of the generator, it +// will produce every 64 KB ZIP file 2^64 times!] + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<14,16,true> pcg32_k16384; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<14,32,true> pcg32_k16384_fast; + +} // namespace arrow_vendored + +#ifdef _MSC_VER + #pragma warning(default:4146) +#endif + +#endif // PCG_RAND_HPP_INCLUDED diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0181e69e4ef37fc279e4d067fd42d0cedacb9126 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp @@ -0,0 +1,1008 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2021 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This code provides a a C++ class that can provide 128-bit (or higher) + * integers. To produce 2K-bit integers, it uses two K-bit integers, + * placed in a union that allowes the code to also see them as four K/2 bit + * integers (and access them either directly name, or by index). + * + * It may seem like we're reinventing the wheel here, because several + * libraries already exist that support large integers, but most existing + * libraries provide a very generic multiprecision code, but here we're + * operating at a fixed size. Also, most other libraries are fairly + * heavyweight. So we use a direct implementation. Sadly, it's much slower + * than hand-coded assembly or direct CPU support. + */ + +#ifndef PCG_UINT128_HPP_INCLUDED +#define PCG_UINT128_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) // Use MSVC++ intrinsics +#include +#endif + +/* + * We want to lay the type out the same way that a native type would be laid + * out, which means we must know the machine's endian, at compile time. + * This ugliness attempts to do so. + */ + +#ifndef PCG_LITTLE_ENDIAN + #if defined(__BYTE_ORDER__) + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #define PCG_LITTLE_ENDIAN 1 + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #define PCG_LITTLE_ENDIAN 0 + #else + #error __BYTE_ORDER__ does not match a standard endian, pick a side + #endif + #elif __LITTLE_ENDIAN__ || _LITTLE_ENDIAN + #define PCG_LITTLE_ENDIAN 1 + #elif __BIG_ENDIAN__ || _BIG_ENDIAN + #define PCG_LITTLE_ENDIAN 0 + #elif __x86_64 || __x86_64__ || _M_X64 || __i386 || __i386__ || _M_IX86 + #define PCG_LITTLE_ENDIAN 1 + #elif __powerpc__ || __POWERPC__ || __ppc__ || __PPC__ \ + || __m68k__ || __mc68000__ + #define PCG_LITTLE_ENDIAN 0 + #else + #error Unable to determine target endianness + #endif +#endif + +#if INTPTR_MAX == INT64_MAX && !defined(PCG_64BIT_SPECIALIZATIONS) + #define PCG_64BIT_SPECIALIZATIONS 1 +#endif + +namespace arrow_vendored { +namespace pcg_extras { + +// Recent versions of GCC have intrinsics we can use to quickly calculate +// the number of leading and trailing zeros in a number. If possible, we +// use them, otherwise we fall back to old-fashioned bit twiddling to figure +// them out. + +#ifndef PCG_BITCOUNT_T + typedef uint8_t bitcount_t; +#else + typedef PCG_BITCOUNT_T bitcount_t; +#endif + +/* + * Provide some useful helper functions + * * flog2 floor(log2(x)) + * * trailingzeros number of trailing zero bits + */ + +#if defined(__GNUC__) // Any GNU-compatible compiler supporting C++11 has + // some useful intrinsics we can use. + +inline bitcount_t flog2(uint32_t v) +{ + return 31 - __builtin_clz(v); +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + return __builtin_ctz(v); +} + +inline bitcount_t flog2(uint64_t v) +{ +#if UINT64_MAX == ULONG_MAX + return 63 - __builtin_clzl(v); +#elif UINT64_MAX == ULLONG_MAX + return 63 - __builtin_clzll(v); +#else + #error Cannot find a function for uint64_t +#endif +} + +inline bitcount_t trailingzeros(uint64_t v) +{ +#if UINT64_MAX == ULONG_MAX + return __builtin_ctzl(v); +#elif UINT64_MAX == ULLONG_MAX + return __builtin_ctzll(v); +#else + #error Cannot find a function for uint64_t +#endif +} + +#elif defined(_MSC_VER) // Use MSVC++ intrinsics + +#pragma intrinsic(_BitScanReverse, _BitScanForward) +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) +#pragma intrinsic(_BitScanReverse64, _BitScanForward64) +#endif + +inline bitcount_t flog2(uint32_t v) +{ + unsigned long i; + _BitScanReverse(&i, v); + return bitcount_t(i); +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + unsigned long i; + _BitScanForward(&i, v); + return bitcount_t(i); +} + +inline bitcount_t flog2(uint64_t v) +{ +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) + unsigned long i; + _BitScanReverse64(&i, v); + return bitcount_t(i); +#else + // 32-bit x86 + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + return high ? 32+flog2(high) : flog2(low); +#endif +} + +inline bitcount_t trailingzeros(uint64_t v) +{ +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) + unsigned long i; + _BitScanForward64(&i, v); + return bitcount_t(i); +#else + // 32-bit x86 + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + return low ? trailingzeros(low) : trailingzeros(high)+32; +#endif +} + +#else // Otherwise, we fall back to bit twiddling + // implementations + +inline bitcount_t flog2(uint32_t v) +{ + // Based on code by Eric Cole and Mark Dickinson, which appears at + // https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn + + static const uint8_t multiplyDeBruijnBitPos[32] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + + v |= v >> 1; // first round down to one less than a power of 2 + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + + return multiplyDeBruijnBitPos[(uint32_t)(v * 0x07C4ACDDU) >> 27]; +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + static const uint8_t multiplyDeBruijnBitPos[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + + return multiplyDeBruijnBitPos[((uint32_t)((v & -v) * 0x077CB531U)) >> 27]; +} + +inline bitcount_t flog2(uint64_t v) +{ + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + + return high ? 32+flog2(high) : flog2(low); +} + +inline bitcount_t trailingzeros(uint64_t v) +{ + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + + return low ? trailingzeros(low) : trailingzeros(high)+32; +} + +#endif + +inline bitcount_t flog2(uint8_t v) +{ + return flog2(uint32_t(v)); +} + +inline bitcount_t flog2(uint16_t v) +{ + return flog2(uint32_t(v)); +} + +#if __SIZEOF_INT128__ +inline bitcount_t flog2(__uint128_t v) +{ + uint64_t high = uint64_t(v >> 64); + uint64_t low = uint64_t(v); + + return high ? 64+flog2(high) : flog2(low); +} +#endif + +inline bitcount_t trailingzeros(uint8_t v) +{ + return trailingzeros(uint32_t(v)); +} + +inline bitcount_t trailingzeros(uint16_t v) +{ + return trailingzeros(uint32_t(v)); +} + +#if __SIZEOF_INT128__ +inline bitcount_t trailingzeros(__uint128_t v) +{ + uint64_t high = uint64_t(v >> 64); + uint64_t low = uint64_t(v); + return low ? trailingzeros(low) : trailingzeros(high)+64; +} +#endif + +template +inline bitcount_t clog2(UInt v) +{ + return flog2(v) + ((v & (-v)) != v); +} + +template +inline UInt addwithcarry(UInt x, UInt y, bool carryin, bool* carryout) +{ + UInt half_result = y + carryin; + UInt result = x + half_result; + *carryout = (half_result < y) || (result < x); + return result; +} + +template +inline UInt subwithcarry(UInt x, UInt y, bool carryin, bool* carryout) +{ + UInt half_result = y + carryin; + UInt result = x - half_result; + *carryout = (half_result < y) || (result > x); + return result; +} + + +template +class uint_x4 { +// private: + static constexpr unsigned int UINT_BITS = sizeof(UInt) * CHAR_BIT; +public: + union { +#if PCG_LITTLE_ENDIAN + struct { + UInt v0, v1, v2, v3; + } w; + struct { + UIntX2 v01, v23; + } d; +#else + struct { + UInt v3, v2, v1, v0; + } w; + struct { + UIntX2 v23, v01; + } d; +#endif + // For the array access versions, the code that uses the array + // must handle endian itself. Yuck. + UInt wa[4]; + }; + +public: + uint_x4() = default; + + constexpr uint_x4(UInt v3, UInt v2, UInt v1, UInt v0) +#if PCG_LITTLE_ENDIAN + : w{v0, v1, v2, v3} +#else + : w{v3, v2, v1, v0} +#endif + { + // Nothing (else) to do + } + + constexpr uint_x4(UIntX2 v23, UIntX2 v01) +#if PCG_LITTLE_ENDIAN + : d{v01,v23} +#else + : d{v23,v01} +#endif + { + // Nothing (else) to do + } + + constexpr uint_x4(UIntX2 v01) +#if PCG_LITTLE_ENDIAN + : d{v01, UIntX2(0)} +#else + : d{UIntX2(0),v01} +#endif + { + // Nothing (else) to do + } + + template::value + && sizeof(Integral) <= sizeof(UIntX2)) + >::type* = nullptr> + constexpr uint_x4(Integral v01) +#if PCG_LITTLE_ENDIAN + : d{UIntX2(v01), UIntX2(0)} +#else + : d{UIntX2(0), UIntX2(v01)} +#endif + { + // Nothing (else) to do + } + + explicit constexpr operator UIntX2() const + { + return d.v01; + } + + template::value + && sizeof(Integral) <= sizeof(UIntX2)) + >::type* = nullptr> + explicit constexpr operator Integral() const + { + return Integral(d.v01); + } + + explicit constexpr operator bool() const + { + return d.v01 || d.v23; + } + + template + friend uint_x4 operator*(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator*(const uint_x4&, V); + + template + friend std::pair< uint_x4,uint_x4 > + divmod(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator+(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator-(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator<<(const uint_x4&, const bitcount_t shift); + + template + friend uint_x4 operator>>(const uint_x4&, const bitcount_t shift); + +#if PCG_64BIT_SPECIALIZATIONS + template + friend uint_x4 operator<<(const uint_x4&, const bitcount_t shift); + + template + friend uint_x4 operator>>(const uint_x4&, const bitcount_t shift); +#endif + + template + friend uint_x4 operator&(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator|(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator^(const uint_x4&, const uint_x4&); + + template + friend bool operator==(const uint_x4&, const uint_x4&); + + template + friend bool operator!=(const uint_x4&, const uint_x4&); + + template + friend bool operator<(const uint_x4&, const uint_x4&); + + template + friend bool operator<=(const uint_x4&, const uint_x4&); + + template + friend bool operator>(const uint_x4&, const uint_x4&); + + template + friend bool operator>=(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator~(const uint_x4&); + + template + friend uint_x4 operator-(const uint_x4&); + + template + friend bitcount_t flog2(const uint_x4&); + + template + friend bitcount_t trailingzeros(const uint_x4&); + +#if PCG_64BIT_SPECIALIZATIONS + template + friend bitcount_t flog2(const uint_x4&); + + template + friend bitcount_t trailingzeros(const uint_x4&); +#endif + + uint_x4& operator*=(const uint_x4& rhs) + { + uint_x4 result = *this * rhs; + return *this = result; + } + + uint_x4& operator*=(UIntX2 rhs) + { + uint_x4 result = *this * rhs; + return *this = result; + } + + uint_x4& operator/=(const uint_x4& rhs) + { + uint_x4 result = *this / rhs; + return *this = result; + } + + uint_x4& operator%=(const uint_x4& rhs) + { + uint_x4 result = *this % rhs; + return *this = result; + } + + uint_x4& operator+=(const uint_x4& rhs) + { + uint_x4 result = *this + rhs; + return *this = result; + } + + uint_x4& operator-=(const uint_x4& rhs) + { + uint_x4 result = *this - rhs; + return *this = result; + } + + uint_x4& operator&=(const uint_x4& rhs) + { + uint_x4 result = *this & rhs; + return *this = result; + } + + uint_x4& operator|=(const uint_x4& rhs) + { + uint_x4 result = *this | rhs; + return *this = result; + } + + uint_x4& operator^=(const uint_x4& rhs) + { + uint_x4 result = *this ^ rhs; + return *this = result; + } + + uint_x4& operator>>=(bitcount_t shift) + { + uint_x4 result = *this >> shift; + return *this = result; + } + + uint_x4& operator<<=(bitcount_t shift) + { + uint_x4 result = *this << shift; + return *this = result; + } + +}; + +template +bitcount_t flog2(const uint_x4& v) +{ +#if PCG_LITTLE_ENDIAN + for (uint8_t i = 4; i !=0; /* dec in loop */) { + --i; +#else + for (uint8_t i = 0; i < 4; ++i) { +#endif + if (v.wa[i] == 0) + continue; + return flog2(v.wa[i]) + uint_x4::UINT_BITS*i; + } + abort(); +} + +template +bitcount_t trailingzeros(const uint_x4& v) +{ +#if PCG_LITTLE_ENDIAN + for (uint8_t i = 0; i < 4; ++i) { +#else + for (uint8_t i = 4; i !=0; /* dec in loop */) { + --i; +#endif + if (v.wa[i] != 0) + return trailingzeros(v.wa[i]) + uint_x4::UINT_BITS*i; + } + return uint_x4::UINT_BITS*4; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +bitcount_t flog2(const uint_x4& v) +{ + return v.d.v23 > 0 ? flog2(v.d.v23) + uint_x4::UINT_BITS*2 + : flog2(v.d.v01); +} + +template +bitcount_t trailingzeros(const uint_x4& v) +{ + return v.d.v01 == 0 ? trailingzeros(v.d.v23) + uint_x4::UINT_BITS*2 + : trailingzeros(v.d.v01); +} +#endif + +template +std::pair< uint_x4, uint_x4 > + divmod(const uint_x4& orig_dividend, + const uint_x4& divisor) +{ + // If the dividend is less than the divisor, the answer is always zero. + // This takes care of boundary cases like 0/x (which would otherwise be + // problematic because we can't take the log of zero. (The boundary case + // of division by zero is undefined.) + if (orig_dividend < divisor) + return { uint_x4(UIntX2(0)), orig_dividend }; + + auto dividend = orig_dividend; + + auto log2_divisor = flog2(divisor); + auto log2_dividend = flog2(dividend); + // assert(log2_dividend >= log2_divisor); + bitcount_t logdiff = log2_dividend - log2_divisor; + + constexpr uint_x4 ONE(UIntX2(1)); + if (logdiff == 0) + return { ONE, dividend - divisor }; + + // Now we change the log difference to + // floor(log2(divisor)) - ceil(log2(dividend)) + // to ensure that we *underestimate* the result. + logdiff -= 1; + + uint_x4 quotient(UIntX2(0)); + + auto qfactor = ONE << logdiff; + auto factor = divisor << logdiff; + + do { + dividend -= factor; + quotient += qfactor; + while (dividend < factor) { + factor >>= 1; + qfactor >>= 1; + } + } while (dividend >= divisor); + + return { quotient, dividend }; +} + +template +uint_x4 operator/(const uint_x4& dividend, + const uint_x4& divisor) +{ + return divmod(dividend, divisor).first; +} + +template +uint_x4 operator%(const uint_x4& dividend, + const uint_x4& divisor) +{ + return divmod(dividend, divisor).second; +} + + +template +uint_x4 operator*(const uint_x4& a, + const uint_x4& b) +{ + constexpr auto UINT_BITS = uint_x4::UINT_BITS; + uint_x4 r = {0U, 0U, 0U, 0U}; + bool carryin = false; + bool carryout; + UIntX2 a0b0 = UIntX2(a.w.v0) * UIntX2(b.w.v0); + r.w.v0 = UInt(a0b0); + r.w.v1 = UInt(a0b0 >> UINT_BITS); + + UIntX2 a1b0 = UIntX2(a.w.v1) * UIntX2(b.w.v0); + r.w.v2 = UInt(a1b0 >> UINT_BITS); + r.w.v1 = addwithcarry(r.w.v1, UInt(a1b0), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a0b1 = UIntX2(a.w.v0) * UIntX2(b.w.v1); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a0b1 >> UINT_BITS), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + carryin = false; + r.w.v1 = addwithcarry(r.w.v1, UInt(a0b1), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a1b1 = UIntX2(a.w.v1) * UIntX2(b.w.v1); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a1b1), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(a1b1 >> UINT_BITS), carryin, &carryout); + + r.d.v23 += a.d.v01 * b.d.v23 + a.d.v23 * b.d.v01; + + return r; +} + + +template +uint_x4 operator*(const uint_x4& a, + UIntX2 b01) +{ + constexpr auto UINT_BITS = uint_x4::UINT_BITS; + uint_x4 r = {0U, 0U, 0U, 0U}; + bool carryin = false; + bool carryout; + UIntX2 a0b0 = UIntX2(a.w.v0) * UIntX2(UInt(b01)); + r.w.v0 = UInt(a0b0); + r.w.v1 = UInt(a0b0 >> UINT_BITS); + + UIntX2 a1b0 = UIntX2(a.w.v1) * UIntX2(UInt(b01)); + r.w.v2 = UInt(a1b0 >> UINT_BITS); + r.w.v1 = addwithcarry(r.w.v1, UInt(a1b0), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a0b1 = UIntX2(a.w.v0) * UIntX2(b01 >> UINT_BITS); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a0b1 >> UINT_BITS), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + carryin = false; + r.w.v1 = addwithcarry(r.w.v1, UInt(a0b1), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a1b1 = UIntX2(a.w.v1) * UIntX2(b01 >> UINT_BITS); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a1b1), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(a1b1 >> UINT_BITS), carryin, &carryout); + + r.d.v23 += a.d.v23 * b01; + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +#if defined(_MSC_VER) +#pragma intrinsic(_umul128) +#endif + +#if defined(_MSC_VER) || __SIZEOF_INT128__ +template +uint_x4 operator*(const uint_x4& a, + const uint_x4& b) +{ +#if defined(_MSC_VER) + uint64_t hi; + uint64_t lo = _umul128(a.d.v01, b.d.v01, &hi); +#else + __uint128_t r = __uint128_t(a.d.v01) * __uint128_t(b.d.v01); + uint64_t lo = uint64_t(r); + uint64_t hi = r >> 64; +#endif + hi += a.d.v23 * b.d.v01 + a.d.v01 * b.d.v23; + return {hi, lo}; +} +#endif +#endif + + +template +uint_x4 operator+(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + + bool carryin = false; + bool carryout; + r.w.v0 = addwithcarry(a.w.v0, b.w.v0, carryin, &carryout); + carryin = carryout; + r.w.v1 = addwithcarry(a.w.v1, b.w.v1, carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(a.w.v2, b.w.v2, carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(a.w.v3, b.w.v3, carryin, &carryout); + + return r; +} + +template +uint_x4 operator-(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + + bool carryin = false; + bool carryout; + r.w.v0 = subwithcarry(a.w.v0, b.w.v0, carryin, &carryout); + carryin = carryout; + r.w.v1 = subwithcarry(a.w.v1, b.w.v1, carryin, &carryout); + carryin = carryout; + r.w.v2 = subwithcarry(a.w.v2, b.w.v2, carryin, &carryout); + carryin = carryout; + r.w.v3 = subwithcarry(a.w.v3, b.w.v3, carryin, &carryout); + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +uint_x4 operator+(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {uint64_t(0u), uint64_t(0u)}; + + bool carryin = false; + bool carryout; + r.d.v01 = addwithcarry(a.d.v01, b.d.v01, carryin, &carryout); + carryin = carryout; + r.d.v23 = addwithcarry(a.d.v23, b.d.v23, carryin, &carryout); + + return r; +} + +template +uint_x4 operator-(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {uint64_t(0u), uint64_t(0u)}; + + bool carryin = false; + bool carryout; + r.d.v01 = subwithcarry(a.d.v01, b.d.v01, carryin, &carryout); + carryin = carryout; + r.d.v23 = subwithcarry(a.d.v23, b.d.v23, carryin, &carryout); + + return r; +} +#endif + +template +uint_x4 operator&(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 & b.d.v23, a.d.v01 & b.d.v01); +} + +template +uint_x4 operator|(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 | b.d.v23, a.d.v01 | b.d.v01); +} + +template +uint_x4 operator^(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 ^ b.d.v23, a.d.v01 ^ b.d.v01); +} + +template +uint_x4 operator~(const uint_x4& v) +{ + return uint_x4(~v.d.v23, ~v.d.v01); +} + +template +uint_x4 operator-(const uint_x4& v) +{ + return uint_x4(0UL,0UL) - v; +} + +template +bool operator==(const uint_x4& a, const uint_x4& b) +{ + return (a.d.v01 == b.d.v01) && (a.d.v23 == b.d.v23); +} + +template +bool operator!=(const uint_x4& a, const uint_x4& b) +{ + return !operator==(a,b); +} + + +template +bool operator<(const uint_x4& a, const uint_x4& b) +{ + return (a.d.v23 < b.d.v23) + || ((a.d.v23 == b.d.v23) && (a.d.v01 < b.d.v01)); +} + +template +bool operator>(const uint_x4& a, const uint_x4& b) +{ + return operator<(b,a); +} + +template +bool operator<=(const uint_x4& a, const uint_x4& b) +{ + return !(operator<(b,a)); +} + +template +bool operator>=(const uint_x4& a, const uint_x4& b) +{ + return !(operator<(a,b)); +} + + + +template +uint_x4 operator<<(const uint_x4& v, + const bitcount_t shift) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + const bitcount_t bits = uint_x4::UINT_BITS; + const bitcount_t bitmask = bits - 1; + const bitcount_t shiftdiv = shift / bits; + const bitcount_t shiftmod = shift & bitmask; + + if (shiftmod) { + UInt carryover = 0; +#if PCG_LITTLE_ENDIAN + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#else + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#endif + r.wa[out] = (v.wa[in] << shiftmod) | carryover; + carryover = (v.wa[in] >> (bits - shiftmod)); + } + } else { +#if PCG_LITTLE_ENDIAN + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#else + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#endif + r.wa[out] = v.wa[in]; + } + } + + return r; +} + +template +uint_x4 operator>>(const uint_x4& v, + const bitcount_t shift) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + const bitcount_t bits = uint_x4::UINT_BITS; + const bitcount_t bitmask = bits - 1; + const bitcount_t shiftdiv = shift / bits; + const bitcount_t shiftmod = shift & bitmask; + + if (shiftmod) { + UInt carryover = 0; +#if PCG_LITTLE_ENDIAN + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#else + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#endif + r.wa[out] = (v.wa[in] >> shiftmod) | carryover; + carryover = (v.wa[in] << (bits - shiftmod)); + } + } else { +#if PCG_LITTLE_ENDIAN + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#else + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#endif + r.wa[out] = v.wa[in]; + } + } + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +uint_x4 operator<<(const uint_x4& v, + const bitcount_t shift) +{ + constexpr bitcount_t bits2 = uint_x4::UINT_BITS * 2; + + if (shift >= bits2) { + return {v.d.v01 << (shift-bits2), uint64_t(0u)}; + } else { + return {shift ? (v.d.v23 << shift) | (v.d.v01 >> (bits2-shift)) + : v.d.v23, + v.d.v01 << shift}; + } +} + +template +uint_x4 operator>>(const uint_x4& v, + const bitcount_t shift) +{ + constexpr bitcount_t bits2 = uint_x4::UINT_BITS * 2; + + if (shift >= bits2) { + return {uint64_t(0u), v.d.v23 >> (shift-bits2)}; + } else { + return {v.d.v23 >> shift, + shift ? (v.d.v01 >> shift) | (v.d.v23 << (bits2-shift)) + : v.d.v01}; + } +} +#endif + +} // namespace pcg_extras +} // namespace arrow_vendored + +#endif // PCG_UINT128_HPP_INCLUDED diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h new file mode 100644 index 0000000000000000000000000000000000000000..6d039064d6ab3f56a2215ae50d1b0286db61ddfe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h @@ -0,0 +1,83 @@ +/* Debugging assertions and traps + * Portable Snippets - https://github.com/nemequ/portable-snippets + * Created by Evan Nemerson + * + * To the extent possible under law, the authors have waived all + * copyright and related or neighboring rights to this code. For + * details, see the Creative Commons Zero 1.0 Universal license at + * https://creativecommons.org/publicdomain/zero/1.0/ + */ + +#if !defined(PSNIP_DEBUG_TRAP_H) +#define PSNIP_DEBUG_TRAP_H + +#if !defined(PSNIP_NDEBUG) && defined(NDEBUG) && !defined(PSNIP_DEBUG) +# define PSNIP_NDEBUG 1 +#endif + +#if defined(__has_builtin) && !defined(__ibmxl__) +# if __has_builtin(__builtin_debugtrap) +# define psnip_trap() __builtin_debugtrap() +# elif __has_builtin(__debugbreak) +# define psnip_trap() __debugbreak() +# endif +#endif +#if !defined(psnip_trap) +# if defined(_MSC_VER) || defined(__INTEL_COMPILER) +# define psnip_trap() __debugbreak() +# elif defined(__ARMCC_VERSION) +# define psnip_trap() __breakpoint(42) +# elif defined(__ibmxl__) || defined(__xlC__) +# include +# define psnip_trap() __trap(42) +# elif defined(__DMC__) && defined(_M_IX86) + static inline void psnip_trap(void) { __asm int 3h; } +# elif defined(__i386__) || defined(__x86_64__) + static inline void psnip_trap(void) { __asm__ __volatile__("int $03"); } +# elif defined(__thumb__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xde01"); } +# elif defined(__aarch64__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xd4200000"); } +# elif defined(__arm__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xe7f001f0"); } +# elif defined (__alpha__) && !defined(__osf__) + static inline void psnip_trap(void) { __asm__ __volatile__("bpt"); } +# elif defined(_54_) + static inline void psnip_trap(void) { __asm__ __volatile__("ESTOP"); } +# elif defined(_55_) + static inline void psnip_trap(void) { __asm__ __volatile__(";\n .if (.MNEMONIC)\n ESTOP_1\n .else\n ESTOP_1()\n .endif\n NOP"); } +# elif defined(_64P_) + static inline void psnip_trap(void) { __asm__ __volatile__("SWBP 0"); } +# elif defined(_6x_) + static inline void psnip_trap(void) { __asm__ __volatile__("NOP\n .word 0x10000000"); } +# elif defined(__STDC_HOSTED__) && (__STDC_HOSTED__ == 0) && defined(__GNUC__) +# define psnip_trap() __builtin_trap() +# else +# include +# if defined(SIGTRAP) +# define psnip_trap() raise(SIGTRAP) +# else +# define psnip_trap() raise(SIGABRT) +# endif +# endif +#endif + +#if defined(HEDLEY_LIKELY) +# define PSNIP_DBG_LIKELY(expr) HEDLEY_LIKELY(expr) +#elif defined(__GNUC__) && (__GNUC__ >= 3) +# define PSNIP_DBG_LIKELY(expr) __builtin_expect(!!(expr), 1) +#else +# define PSNIP_DBG_LIKELY(expr) (!!(expr)) +#endif + +#if !defined(PSNIP_NDEBUG) || (PSNIP_NDEBUG == 0) +# define psnip_dbg_assert(expr) do { \ + if (!PSNIP_DBG_LIKELY(expr)) { \ + psnip_trap(); \ + } \ + } while (0) +#else +# define psnip_dbg_assert(expr) +#endif + +#endif /* !defined(PSNIP_DEBUG_TRAP_H) */ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h new file mode 100644 index 0000000000000000000000000000000000000000..7f6426ac7657192f6e0aa92bc25a828f106b4f7c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h @@ -0,0 +1,1072 @@ +/* Overflow-safe math functions + * Portable Snippets - https://github.com/nemequ/portable-snippets + * Created by Evan Nemerson + * + * To the extent possible under law, the authors have waived all + * copyright and related or neighboring rights to this code. For + * details, see the Creative Commons Zero 1.0 Universal license at + * https://creativecommons.org/publicdomain/zero/1.0/ + */ + +#if !defined(PSNIP_SAFE_H) +#define PSNIP_SAFE_H + +#if !defined(PSNIP_SAFE_FORCE_PORTABLE) +# if defined(__has_builtin) +# if __has_builtin(__builtin_add_overflow) && !defined(__ibmxl__) +# define PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__INTEL_COMPILER) +# define PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW +# endif +# if defined(__has_include) +# if __has_include() +# define PSNIP_SAFE_HAVE_INTSAFE_H +# endif +# elif defined(_WIN32) +# define PSNIP_SAFE_HAVE_INTSAFE_H +# endif +#endif /* !defined(PSNIP_SAFE_FORCE_PORTABLE) */ + +#if defined(__GNUC__) +# define PSNIP_SAFE_LIKELY(expr) __builtin_expect(!!(expr), 1) +# define PSNIP_SAFE_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#else +# define PSNIP_SAFE_LIKELY(expr) !!(expr) +# define PSNIP_SAFE_UNLIKELY(expr) !!(expr) +#endif /* defined(__GNUC__) */ + +#if !defined(PSNIP_SAFE_STATIC_INLINE) +# if defined(__GNUC__) +# define PSNIP_SAFE__COMPILER_ATTRIBUTES __attribute__((__unused__)) +# else +# define PSNIP_SAFE__COMPILER_ATTRIBUTES +# endif + +# if defined(HEDLEY_INLINE) +# define PSNIP_SAFE__INLINE HEDLEY_INLINE +# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define PSNIP_SAFE__INLINE inline +# elif defined(__GNUC_STDC_INLINE__) +# define PSNIP_SAFE__INLINE __inline__ +# elif defined(_MSC_VER) && _MSC_VER >= 1200 +# define PSNIP_SAFE__INLINE __inline +# else +# define PSNIP_SAFE__INLINE +# endif + +# define PSNIP_SAFE__FUNCTION PSNIP_SAFE__COMPILER_ATTRIBUTES static PSNIP_SAFE__INLINE +#endif + +// !defined(__cplusplus) added for Solaris support +#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define psnip_safe_bool _Bool +#else +# define psnip_safe_bool int +#endif + +#if !defined(PSNIP_SAFE_NO_FIXED) +/* For maximum portability include the exact-int module from + portable snippets. */ +# if \ + !defined(psnip_int64_t) || !defined(psnip_uint64_t) || \ + !defined(psnip_int32_t) || !defined(psnip_uint32_t) || \ + !defined(psnip_int16_t) || !defined(psnip_uint16_t) || \ + !defined(psnip_int8_t) || !defined(psnip_uint8_t) +# include +# if !defined(psnip_int64_t) +# define psnip_int64_t int64_t +# endif +# if !defined(psnip_uint64_t) +# define psnip_uint64_t uint64_t +# endif +# if !defined(psnip_int32_t) +# define psnip_int32_t int32_t +# endif +# if !defined(psnip_uint32_t) +# define psnip_uint32_t uint32_t +# endif +# if !defined(psnip_int16_t) +# define psnip_int16_t int16_t +# endif +# if !defined(psnip_uint16_t) +# define psnip_uint16_t uint16_t +# endif +# if !defined(psnip_int8_t) +# define psnip_int8_t int8_t +# endif +# if !defined(psnip_uint8_t) +# define psnip_uint8_t uint8_t +# endif +# endif +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ +#include +#include + +#if !defined(PSNIP_SAFE_SIZE_MAX) +# if defined(__SIZE_MAX__) +# define PSNIP_SAFE_SIZE_MAX __SIZE_MAX__ +# elif defined(PSNIP_EXACT_INT_HAVE_STDINT) +# include +# endif +#endif + +#if defined(PSNIP_SAFE_SIZE_MAX) +# define PSNIP_SAFE__SIZE_MAX_RT PSNIP_SAFE_SIZE_MAX +#else +# define PSNIP_SAFE__SIZE_MAX_RT (~((size_t) 0)) +#endif + +#if defined(PSNIP_SAFE_HAVE_INTSAFE_H) +/* In VS 10, stdint.h and intsafe.h both define (U)INTN_MIN/MAX, which + triggers warning C4005 (level 1). */ +# if defined(_MSC_VER) && (_MSC_VER == 1600) +# pragma warning(push) +# pragma warning(disable:4005) +# endif +# include +# if defined(_MSC_VER) && (_MSC_VER == 1600) +# pragma warning(pop) +# endif +#endif /* defined(PSNIP_SAFE_HAVE_INTSAFE_H) */ + +/* If there is a type larger than the one we're concerned with it's + * likely much faster to simply promote the operands, perform the + * requested operation, verify that the result falls within the + * original type, then cast the result back to the original type. */ + +#if !defined(PSNIP_SAFE_NO_PROMOTIONS) + +#define PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, op_name, op) \ + PSNIP_SAFE__FUNCTION psnip_safe_##name##_larger \ + psnip_safe_larger_##name##_##op_name (T a, T b) { \ + return ((psnip_safe_##name##_larger) a) op ((psnip_safe_##name##_larger) b); \ + } + +#define PSNIP_SAFE_DEFINE_LARGER_UNARY_OP(T, name, op_name, op) \ + PSNIP_SAFE__FUNCTION psnip_safe_##name##_larger \ + psnip_safe_larger_##name##_##op_name (T value) { \ + return (op ((psnip_safe_##name##_larger) value)); \ + } + +#define PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(T, name) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, add, +) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, sub, -) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mul, *) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, div, /) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mod, %) \ + PSNIP_SAFE_DEFINE_LARGER_UNARY_OP (T, name, neg, -) + +#define PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(T, name) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, add, +) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, sub, -) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mul, *) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, div, /) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mod, %) + +#define PSNIP_SAFE_IS_LARGER(ORIG_MAX, DEST_MAX) ((DEST_MAX / ORIG_MAX) >= ORIG_MAX) + +#if defined(__GNUC__) && ((__GNUC__ >= 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__SIZEOF_INT128__) && !defined(__ibmxl__) +#define PSNIP_SAFE_HAVE_128 +typedef __int128 psnip_safe_int128_t; +typedef unsigned __int128 psnip_safe_uint128_t; +#endif /* defined(__GNUC__) */ + +#if !defined(PSNIP_SAFE_NO_FIXED) +#define PSNIP_SAFE_HAVE_INT8_LARGER +#define PSNIP_SAFE_HAVE_UINT8_LARGER +typedef psnip_int16_t psnip_safe_int8_larger; +typedef psnip_uint16_t psnip_safe_uint8_larger; + +#define PSNIP_SAFE_HAVE_INT16_LARGER +typedef psnip_int32_t psnip_safe_int16_larger; +typedef psnip_uint32_t psnip_safe_uint16_larger; + +#define PSNIP_SAFE_HAVE_INT32_LARGER +typedef psnip_int64_t psnip_safe_int32_larger; +typedef psnip_uint64_t psnip_safe_uint32_larger; + +#if defined(PSNIP_SAFE_HAVE_128) +#define PSNIP_SAFE_HAVE_INT64_LARGER +typedef psnip_safe_int128_t psnip_safe_int64_larger; +typedef psnip_safe_uint128_t psnip_safe_uint64_larger; +#endif /* defined(PSNIP_SAFE_HAVE_128) */ +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ + +#define PSNIP_SAFE_HAVE_LARGER_SCHAR +#if PSNIP_SAFE_IS_LARGER(SCHAR_MAX, SHRT_MAX) +typedef short psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, INT_MAX) +typedef int psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, LONG_MAX) +typedef long psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, LLONG_MAX) +typedef long long psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (SCHAR_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_schar_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SCHAR +#endif + +#define PSNIP_SAFE_HAVE_LARGER_UCHAR +#if PSNIP_SAFE_IS_LARGER(UCHAR_MAX, USHRT_MAX) +typedef unsigned short psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, UINT_MAX) +typedef unsigned int psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffU) +typedef psnip_uint16_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (UCHAR_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_uchar_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_UCHAR +#endif + +#if CHAR_MIN == 0 && defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +#define PSNIP_SAFE_HAVE_LARGER_CHAR +typedef psnip_safe_uchar_larger psnip_safe_char_larger; +#elif CHAR_MIN < 0 && defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +#define PSNIP_SAFE_HAVE_LARGER_CHAR +typedef psnip_safe_schar_larger psnip_safe_char_larger; +#endif + +#define PSNIP_SAFE_HAVE_LARGER_SHRT +#if PSNIP_SAFE_IS_LARGER(SHRT_MAX, INT_MAX) +typedef int psnip_safe_short_larger; +#elif PSNIP_SAFE_IS_LARGER(SHRT_MAX, LONG_MAX) +typedef long psnip_safe_short_larger; +#elif PSNIP_SAFE_IS_LARGER(SHRT_MAX, LLONG_MAX) +typedef long long psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (SHRT_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_short_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SHRT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_USHRT +#if PSNIP_SAFE_IS_LARGER(USHRT_MAX, UINT_MAX) +typedef unsigned int psnip_safe_ushort_larger; +#elif PSNIP_SAFE_IS_LARGER(USHRT_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_ushort_larger; +#elif PSNIP_SAFE_IS_LARGER(USHRT_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (USHRT_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ushort_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_USHRT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_INT +#if PSNIP_SAFE_IS_LARGER(INT_MAX, LONG_MAX) +typedef long psnip_safe_int_larger; +#elif PSNIP_SAFE_IS_LARGER(INT_MAX, LLONG_MAX) +typedef long long psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (INT_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_int_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_INT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_UINT +#if PSNIP_SAFE_IS_LARGER(UINT_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_uint_larger; +#elif PSNIP_SAFE_IS_LARGER(UINT_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (UINT_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_uint_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_UINT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_LONG +#if PSNIP_SAFE_IS_LARGER(LONG_MAX, LLONG_MAX) +typedef long long psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (LONG_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_long_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_LONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_ULONG +#if PSNIP_SAFE_IS_LARGER(ULONG_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (ULONG_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ulong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_ULONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_LLONG +#if !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (LLONG_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_llong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_LLONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_ULLONG +#if !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (ULLONG_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ullong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_ULLONG +#endif + +#if defined(PSNIP_SAFE_SIZE_MAX) +#define PSNIP_SAFE_HAVE_LARGER_SIZE +#if PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, USHRT_MAX) +typedef unsigned short psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, UINT_MAX) +typedef unsigned int psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (PSNIP_SAFE_SIZE_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_size_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SIZE +#endif +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(signed char, schar) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned char, uchar) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +#if CHAR_MIN == 0 +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(char, char) +#else +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(char, char) +#endif +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SHORT) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(short, short) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_USHORT) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned short, ushort) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_INT) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(int, int) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_UINT) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned int, uint) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_LONG) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(long, long) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_ULONG) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned long, ulong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_LLONG) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(long long, llong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_ULLONG) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned long long, ullong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SIZE) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(size_t, size) +#endif + +#if !defined(PSNIP_SAFE_NO_FIXED) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int8_t, int8) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint8_t, uint8) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int16_t, int16) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint16_t, uint16) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int32_t, int32) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint32_t, uint32) +#if defined(PSNIP_SAFE_HAVE_128) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int64_t, int64) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint64_t, uint64) +#endif +#endif + +#endif /* !defined(PSNIP_SAFE_NO_PROMOTIONS) */ + +#define PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(T, name, op_name) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + return !__builtin_##op_name##_overflow(a, b, res); \ + } + +#define PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(T, name, op_name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + const psnip_safe_##name##_larger r = psnip_safe_larger_##name##_##op_name(a, b); \ + *res = (T) r; \ + return (r >= min) && (r <= max); \ + } + +#define PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(T, name, op_name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + const psnip_safe_##name##_larger r = psnip_safe_larger_##name##_##op_name(a, b); \ + *res = (T) r; \ + return (r <= max); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_ADD(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_add (T* res, T a, T b) { \ + psnip_safe_bool r = !( ((b > 0) && (a > (max - b))) || \ + ((b < 0) && (a < (min - b))) ); \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a + b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_ADD(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_add (T* res, T a, T b) { \ + *res = (T) (a + b); \ + return !PSNIP_SAFE_UNLIKELY((b > 0) && (a > (max - b))); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_SUB(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_sub (T* res, T a, T b) { \ + psnip_safe_bool r = !((b > 0 && a < (min + b)) || \ + (b < 0 && a > (max + b))); \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a - b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_SUB(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_sub (T* res, T a, T b) { \ + *res = a - b; \ + return !PSNIP_SAFE_UNLIKELY(b > a); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_MUL(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mul (T* res, T a, T b) { \ + psnip_safe_bool r = 1; \ + if (a > 0) { \ + if (b > 0) { \ + if (a > (max / b)) { \ + r = 0; \ + } \ + } else { \ + if (b < (min / a)) { \ + r = 0; \ + } \ + } \ + } else { \ + if (b > 0) { \ + if (a < (min / b)) { \ + r = 0; \ + } \ + } else { \ + if ( (a != 0) && (b < (max / a))) { \ + r = 0; \ + } \ + } \ + } \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a * b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_MUL(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mul (T* res, T a, T b) { \ + *res = (T) (a * b); \ + return !PSNIP_SAFE_UNLIKELY((a > 0) && (b > 0) && (a > (max / b))); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_DIV(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_div (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else if (PSNIP_SAFE_UNLIKELY(a == min && b == -1)) { \ + *res = min; \ + return 0; \ + } else { \ + *res = (T) (a / b); \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_DIV(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_div (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else { \ + *res = a / b; \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_MOD(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mod (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else if (PSNIP_SAFE_UNLIKELY(a == min && b == -1)) { \ + *res = min; \ + return 0; \ + } else { \ + *res = (T) (a % b); \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_MOD(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mod (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else { \ + *res = a % b; \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_NEG(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_neg (T* res, T value) { \ + psnip_safe_bool r = value != min; \ + *res = PSNIP_SAFE_LIKELY(r) ? -value : max; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_INTSAFE(T, name, op, isf) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op (T* res, T a, T b) { \ + return isf(a, b, res) == S_OK; \ + } + +#if CHAR_MIN == 0 +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, add, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, sub, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, mul, CHAR_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(char, char, CHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(char, char, CHAR_MAX) +#else /* CHAR_MIN != 0 */ +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, add, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, sub, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, mul, CHAR_MIN, CHAR_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(char, char, CHAR_MIN, CHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(char, char, CHAR_MIN, CHAR_MAX) +#endif + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, add, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, sub, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, mul, SCHAR_MIN, SCHAR_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(signed char, schar, SCHAR_MIN, SCHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(signed char, schar, SCHAR_MIN, SCHAR_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, add, UCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, sub, UCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, mul, UCHAR_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned char, uchar, UCHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned char, uchar, UCHAR_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SHORT) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, add, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, sub, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, mul, SHRT_MIN, SHRT_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(short, short, SHRT_MIN, SHRT_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(short, short, SHRT_MIN, SHRT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, add, UShortAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, sub, UShortSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, mul, UShortMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_USHORT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, add, USHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, sub, USHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, mul, USHRT_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned short, ushort, USHRT_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned short, ushort, USHRT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, add, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, sub, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, mul, INT_MIN, INT_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(int, int, INT_MIN, INT_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(int, int, INT_MIN, INT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, add, UIntAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, sub, UIntSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, mul, UIntMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, add, UINT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, sub, UINT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, mul, UINT_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned int, uint, UINT_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned int, uint, UINT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_LONG) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, add, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, sub, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, mul, LONG_MIN, LONG_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(long, long, LONG_MIN, LONG_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(long, long, LONG_MIN, LONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, add, ULongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, sub, ULongSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, mul, ULongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_ULONG) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, add, ULONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, sub, ULONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, mul, ULONG_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned long, ulong, ULONG_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned long, ulong, ULONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_LLONG) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, add, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, sub, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, mul, LLONG_MIN, LLONG_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(long long, llong, LLONG_MIN, LLONG_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(long long, llong, LLONG_MIN, LLONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, add, ULongLongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, sub, ULongLongSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, mul, ULongLongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_ULLONG) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, add, ULLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, sub, ULLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, mul, ULLONG_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned long long, ullong, ULLONG_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned long long, ullong, ULLONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, add, SizeTAdd) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, sub, SizeTSub) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, mul, SizeTMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SIZE) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, add, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, sub, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, mul, PSNIP_SAFE__SIZE_MAX_RT) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) + +#if !defined(PSNIP_SAFE_NO_FIXED) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT8) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, add, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, sub, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, mul, (-0x7fLL-1), 0x7f) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT8) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, add, 0xff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, sub, 0xff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, mul, 0xff) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint8_t, uint8, 0xff) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint8_t, uint8, 0xff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT16) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, add, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, sub, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, mul, (-32767-1), 0x7fff) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int16_t, int16, (-32767-1), 0x7fff) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int16_t, int16, (-32767-1), 0x7fff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, add, UShortAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, sub, UShortSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, mul, UShortMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT16) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, add, 0xffff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, sub, 0xffff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, mul, 0xffff) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint16_t, uint16, 0xffff) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint16_t, uint16, 0xffff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT32) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, add, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, sub, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, mul, (-0x7fffffffLL-1), 0x7fffffffLL) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, add, UIntAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, sub, UIntSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, mul, UIntMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT32) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, add, 0xffffffffUL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, sub, 0xffffffffUL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, mul, 0xffffffffUL) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint32_t, uint32, 0xffffffffUL) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint32_t, uint32, 0xffffffffUL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT64) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, add, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, sub, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, mul, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, add, ULongLongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, sub, ULongLongSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, mul, ULongLongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT64) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, add, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, sub, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, mul, 0xffffffffffffffffULL) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint64_t, uint64, 0xffffffffffffffffULL) + +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ + +#define PSNIP_SAFE_C11_GENERIC_SELECTION(res, op) \ + _Generic((*res), \ + char: psnip_safe_char_##op, \ + unsigned char: psnip_safe_uchar_##op, \ + short: psnip_safe_short_##op, \ + unsigned short: psnip_safe_ushort_##op, \ + int: psnip_safe_int_##op, \ + unsigned int: psnip_safe_uint_##op, \ + long: psnip_safe_long_##op, \ + unsigned long: psnip_safe_ulong_##op, \ + long long: psnip_safe_llong_##op, \ + unsigned long long: psnip_safe_ullong_##op) + +#define PSNIP_SAFE_C11_GENERIC_BINARY_OP(op, res, a, b) \ + PSNIP_SAFE_C11_GENERIC_SELECTION(res, op)(res, a, b) +#define PSNIP_SAFE_C11_GENERIC_UNARY_OP(op, res, v) \ + PSNIP_SAFE_C11_GENERIC_SELECTION(res, op)(res, v) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +#define psnip_safe_add(res, a, b) !__builtin_add_overflow(a, b, res) +#define psnip_safe_sub(res, a, b) !__builtin_sub_overflow(a, b, res) +#define psnip_safe_mul(res, a, b) !__builtin_mul_overflow(a, b, res) +#define psnip_safe_div(res, a, b) !__builtin_div_overflow(a, b, res) +#define psnip_safe_mod(res, a, b) !__builtin_mod_overflow(a, b, res) +#define psnip_safe_neg(res, v) PSNIP_SAFE_C11_GENERIC_UNARY_OP (neg, res, v) + +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +/* The are no fixed-length or size selections because they cause an + * error about _Generic specifying two compatible types. Hopefully + * this doesn't cause problems on exotic platforms, but if it does + * please let me know and I'll try to figure something out. */ + +#define psnip_safe_add(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(add, res, a, b) +#define psnip_safe_sub(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(sub, res, a, b) +#define psnip_safe_mul(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(mul, res, a, b) +#define psnip_safe_div(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(div, res, a, b) +#define psnip_safe_mod(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(mod, res, a, b) +#define psnip_safe_neg(res, v) PSNIP_SAFE_C11_GENERIC_UNARY_OP (neg, res, v) +#endif + +#if !defined(PSNIP_SAFE_HAVE_BUILTINS) && (defined(PSNIP_SAFE_EMULATE_NATIVE) || defined(PSNIP_BUILTIN_EMULATE_NATIVE)) +# define __builtin_sadd_overflow(a, b, res) (!psnip_safe_int_add(res, a, b)) +# define __builtin_saddl_overflow(a, b, res) (!psnip_safe_long_add(res, a, b)) +# define __builtin_saddll_overflow(a, b, res) (!psnip_safe_llong_add(res, a, b)) +# define __builtin_uadd_overflow(a, b, res) (!psnip_safe_uint_add(res, a, b)) +# define __builtin_uaddl_overflow(a, b, res) (!psnip_safe_ulong_add(res, a, b)) +# define __builtin_uaddll_overflow(a, b, res) (!psnip_safe_ullong_add(res, a, b)) + +# define __builtin_ssub_overflow(a, b, res) (!psnip_safe_int_sub(res, a, b)) +# define __builtin_ssubl_overflow(a, b, res) (!psnip_safe_long_sub(res, a, b)) +# define __builtin_ssubll_overflow(a, b, res) (!psnip_safe_llong_sub(res, a, b)) +# define __builtin_usub_overflow(a, b, res) (!psnip_safe_uint_sub(res, a, b)) +# define __builtin_usubl_overflow(a, b, res) (!psnip_safe_ulong_sub(res, a, b)) +# define __builtin_usubll_overflow(a, b, res) (!psnip_safe_ullong_sub(res, a, b)) + +# define __builtin_smul_overflow(a, b, res) (!psnip_safe_int_mul(res, a, b)) +# define __builtin_smull_overflow(a, b, res) (!psnip_safe_long_mul(res, a, b)) +# define __builtin_smulll_overflow(a, b, res) (!psnip_safe_llong_mul(res, a, b)) +# define __builtin_umul_overflow(a, b, res) (!psnip_safe_uint_mul(res, a, b)) +# define __builtin_umull_overflow(a, b, res) (!psnip_safe_ulong_mul(res, a, b)) +# define __builtin_umulll_overflow(a, b, res) (!psnip_safe_ullong_mul(res, a, b)) +#endif + +#endif /* !defined(PSNIP_SAFE_H) */ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/strptime.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/strptime.h new file mode 100644 index 0000000000000000000000000000000000000000..764a4440ee4973dd6506c3c5d467e5c5a260e428 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/strptime.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/visibility.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// A less featureful implementation of strptime() for platforms lacking +// a standard implementation (e.g. Windows). +ARROW_EXPORT char* arrow_strptime(const char* __restrict, const char* __restrict, + struct tm* __restrict); + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash.h new file mode 100644 index 0000000000000000000000000000000000000000..a33cdf8610dd6aa3b22bc24eef8ea3f871b8be9f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash.h @@ -0,0 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/vendored/xxhash/xxhash.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h new file mode 100644 index 0000000000000000000000000000000000000000..a18e8c762daaaaa0db8974232a876315ed6ca6f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h @@ -0,0 +1,6773 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (C) 2012-2021 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/*! + * @mainpage xxHash + * + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed + * limits. + * + * It is proposed in four flavors, in three families: + * 1. @ref XXH32_family + * - Classic 32-bit hash function. Simple, compact, and runs on almost all + * 32-bit and 64-bit systems. + * 2. @ref XXH64_family + * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most + * 64-bit systems (but _not_ 32-bit systems). + * 3. @ref XXH3_family + * - Modern 64-bit and 128-bit hash function family which features improved + * strength and performance across the board, especially on smaller data. + * It benefits greatly from SIMD and 64-bit without requiring it. + * + * Benchmarks + * --- + * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. + * The open source benchmark program is compiled with clang v10.0 using -O3 flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | + * | -------------------- | ------- | ----: | ---------------: | ------------------: | + * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | + * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | + * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | + * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | + * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | + * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | + * | RAM sequential read | | N/A | 28.0 GB/s | N/A | + * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | + * | City64 | | 64 | 22.0 GB/s | 76.6 | + * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | + * | City128 | | 128 | 21.7 GB/s | 57.7 | + * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | + * | XXH64() | | 64 | 19.4 GB/s | 71.0 | + * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | + * | Mum | | 64 | 18.0 GB/s | 67.0 | + * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | + * | XXH32() | | 32 | 9.7 GB/s | 71.9 | + * | City32 | | 32 | 9.1 GB/s | 66.0 | + * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | + * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | + * | SipHash* | | 64 | 3.0 GB/s | 43.2 | + * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | + * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | + * | FNV64 | | 64 | 1.2 GB/s | 62.7 | + * | Blake2* | | 256 | 1.1 GB/s | 5.1 | + * | SHA1* | | 160 | 0.8 GB/s | 5.6 | + * | MD5* | | 128 | 0.6 GB/s | 7.8 | + * @note + * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, + * even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic + * by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for small + * data. For more accurate information, see the wiki. + * - More benchmarks and strength tests are found on the wiki: + * https://github.com/Cyan4973/xxHash/wiki + * + * Usage + * ------ + * All xxHash variants use a similar API. Changing the algorithm is a trivial + * substitution. + * + * @pre + * For functions which take an input and length parameter, the following + * requirements are assumed: + * - The range from [`input`, `input + length`) is valid, readable memory. + * - The only exception is if the `length` is `0`, `input` may be `NULL`. + * - For C++, the objects must have the *TriviallyCopyable* property, as the + * functions access bytes directly as if it was an array of `unsigned char`. + * + * @anchor single_shot_example + * **Single Shot** + * + * These functions are stateless functions which hash a contiguous block of memory, + * immediately returning the result. They are the easiest and usually the fastest + * option. + * + * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which hashes a null terminated string with XXH32(). + * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * { + * // NULL pointers are only valid if the length is zero + * size_t length = (string == NULL) ? 0 : strlen(string); + * return XXH32(string, length, seed); + * } + * @endcode + * + * @anchor streaming_example + * **Streaming** + * + * These groups of functions allow incremental hashing of unknown size, even + * more than what would fit in a size_t. + * + * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() + * + * @code{.c} + * #include + * #include + * #include "xxhash.h" + * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). + * XXH64_hash_t hashFile(FILE* f) + * { + * // Allocate a state struct. Do not just use malloc() or new. + * XXH3_state_t* state = XXH3_createState(); + * assert(state != NULL && "Out of memory!"); + * // Reset the state to start a new hashing session. + * XXH3_64bits_reset(state); + * char buffer[4096]; + * size_t count; + * // Read the file in chunks + * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + * // Run update() as many times as necessary to process the data + * XXH3_64bits_update(state, buffer, count); + * } + * // Retrieve the finalized hash. This will not change the state. + * XXH64_hash_t result = XXH3_64bits_digest(state); + * // Free the state. Do not use free(). + * XXH3_freeState(state); + * return result; + * } + * @endcode + * + * @file xxhash.h + * xxHash prototypes and implementation + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* **************************** + * INLINE mode + ******************************/ +/*! + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Gives access to internal state declaration, required for static allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ +# define XXH_STATIC_LINKING_ONLY +/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ + +/*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ +# define XXH_IMPLEMENTATION +/* Do not undef XXH_IMPLEMENTATION for Doxygen */ + +/*! + * @brief Exposes the implementation and marks all functions as `inline`. + * + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * @code{.c} + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * @endcode + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ +# define XXH_INLINE_ALL +# undef XXH_INLINE_ALL +/*! + * @brief Exposes the implementation without marking functions as inline. + */ +# define XXH_PRIVATE_API +# undef XXH_PRIVATE_API +/*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* note: this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif + + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ + /* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ +# undef XXH_versionNumber + /* XXH32 */ +# undef XXH32 +# undef XXH32_createState +# undef XXH32_freeState +# undef XXH32_reset +# undef XXH32_update +# undef XXH32_digest +# undef XXH32_copyState +# undef XXH32_canonicalFromHash +# undef XXH32_hashFromCanonical + /* XXH64 */ +# undef XXH64 +# undef XXH64_createState +# undef XXH64_freeState +# undef XXH64_reset +# undef XXH64_update +# undef XXH64_digest +# undef XXH64_copyState +# undef XXH64_canonicalFromHash +# undef XXH64_hashFromCanonical + /* XXH3_64bits */ +# undef XXH3_64bits +# undef XXH3_64bits_withSecret +# undef XXH3_64bits_withSeed +# undef XXH3_64bits_withSecretandSeed +# undef XXH3_createState +# undef XXH3_freeState +# undef XXH3_copyState +# undef XXH3_64bits_reset +# undef XXH3_64bits_reset_withSeed +# undef XXH3_64bits_reset_withSecret +# undef XXH3_64bits_update +# undef XXH3_64bits_digest +# undef XXH3_generateSecret + /* XXH3_128bits */ +# undef XXH128 +# undef XXH3_128bits +# undef XXH3_128bits_withSeed +# undef XXH3_128bits_withSecret +# undef XXH3_128bits_reset +# undef XXH3_128bits_reset_withSeed +# undef XXH3_128bits_reset_withSecret +# undef XXH3_128bits_reset_withSecretandSeed +# undef XXH3_128bits_update +# undef XXH3_128bits_digest +# undef XXH128_isEqual +# undef XXH128_cmp +# undef XXH128_canonicalFromHash +# undef XXH128_hashFromCanonical + /* Finally, free the namespace itself */ +# undef XXH_NAMESPACE + + /* employ the namespace for XXH_INLINE_ALL */ +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. + */ +# define XXH_IPREF(Id) XXH_NAMESPACE ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +/*! @brief Marks a global symbol. */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* XXH32 */ +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +/* XXH64 */ +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +/* XXH3_64bits */ +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) +/* XXH3_128bits */ +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) +#endif + + +/* ************************************* +* Compiler specifics +***************************************/ + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#if defined (__GNUC__) +# define XXH_CONSTF __attribute__((const)) +# define XXH_PUREF __attribute__((pure)) +# define XXH_MALLOCF __attribute__((malloc)) +#else +# define XXH_CONSTF /* disable */ +# define XXH_PUREF +# define XXH_MALLOCF +#endif + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 8 +#define XXH_VERSION_RELEASE 2 +/*! @brief Version number, encoded as two digits each */ +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) + +/*! + * @brief Obtains the xxHash version. + * + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. + * + * @return @ref XXH_VERSION_NUMBER of the invoked library. + */ +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); + + +/* **************************** +* Common basic types +******************************/ +#include /* size_t */ +/*! + * @brief Exit code for the streaming API. + */ +typedef enum { + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ +} XXH_errorcode; + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* Don't show include */ +/*! + * @brief An unsigned 32-bit integer. + * + * Not necessarily defined to `uint32_t` but functionally equivalent. + */ +typedef uint32_t XXH32_hash_t; + +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint32_t XXH32_hash_t; + +#else +# include +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# elif ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform: need a 32-bit type" +# endif +#endif + +/*! + * @} + * + * @defgroup XXH32_family XXH32 family + * @ingroup public + * Contains functions used in the classic 32-bit xxHash algorithm. + * + * @note + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that the @ref XXH3_family provides competitive speed for both 32-bit + * and 64-bit systems, and offers true 64/128 bit hash results. + * + * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families + * @see @ref XXH32_impl for implementation details + * @{ + */ + +/*! + * @brief Calculates the 32-bit hash of @p input using xxHash32. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * See @ref single_shot_example "Single Shot Example" for an example. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 32-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 32-bit hash value. + * + * @see + * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); + +#ifndef XXH_NO_STREAM +/*! + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * @see streaming_example at the top of @ref xxhash.h for an example. + */ + +/*! + * @typedef struct XXH32_state_s XXH32_state_t + * @brief The opaque state struct for the XXH32 streaming API. + * + * @see XXH32_state_s for details. + */ +typedef struct XXH32_state_s XXH32_state_t; + +/*! + * @brief Allocates an @ref XXH32_state_t. + * + * Must be freed with XXH32_freeState(). + * @return An allocated XXH32_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); +/*! + * @brief Frees an @ref XXH32_state_t. + * + * Must be allocated with XXH32_createState(). + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Copies one @ref XXH32_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +/*! + * @brief Resets an @ref XXH32_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH32_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 32-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH32_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH32_state_t. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash32 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + */ + +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +typedef struct { + unsigned char digest[4]; /*!< Hash bytes, big endian */ +} XXH32_canonical_t; + +/*! + * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. + * + * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param hash The @ref XXH32_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); + +/*! + * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. + * + * @param src The @ref XXH32_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + + +/*! @cond Doxygen ignores this part */ +#ifdef __has_attribute +# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define XXH_HAS_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ +#define XXH_C23_VN 201711L +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) +# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define XXH_HAS_C_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define XXH_HAS_CPP_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute + * introduced in CPP17 and C23. + * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough + */ +#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_ATTRIBUTE(__fallthrough__) +# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) +#else +# define XXH_FALLTHROUGH /* fallthrough */ +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ +#if XXH_HAS_ATTRIBUTE(noescape) +# define XXH_NOESCAPE __attribute__((noescape)) +#else +# define XXH_NOESCAPE +#endif +/*! @endcond */ + + +/*! + * @} + * @ingroup public + * @{ + */ + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* don't include */ +/*! + * @brief An unsigned 64-bit integer. + * + * Not necessarily defined to `uint64_t` but functionally equivalent. + */ +typedef uint64_t XXH64_hash_t; +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t XXH64_hash_t; +#else +# include +# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL + /* LP64 ABI says uint64_t is unsigned long */ + typedef unsigned long XXH64_hash_t; +# else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +# endif +#endif + +/*! + * @} + * + * @defgroup XXH64_family XXH64 family + * @ingroup public + * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. + * + * @note + * XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. + */ + +/*! + * @brief Calculates the 64-bit hash of @p input using xxHash64. + * + * This function usually runs faster on 64-bit systems, but slower on 32-bit + * systems (see benchmark). + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit hash. + * + * @see + * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/*! + * @brief The opaque state struct for the XXH64 streaming API. + * + * @see XXH64_state_s for details. + */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + +/*! + * @brief Allocates an @ref XXH64_state_t. + * + * Must be freed with XXH64_freeState(). + * @return An allocated XXH64_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); + +/*! + * @brief Frees an @ref XXH64_state_t. + * + * Must be allocated with XXH64_createState(). + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH64_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +/*! + * @brief Resets an @ref XXH64_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH64_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH64_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH64_state_t. + * + * @note + * Calling XXH64_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash64 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ +/******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; + +/*! + * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. + * + * @param dst The @ref XXH64_canonical_t pointer to be stored to. + * @param hash The @ref XXH64_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); + +/*! + * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. + * + * @param src The @ref XXH64_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); + +#ifndef XXH_NO_XXH3 + +/*! + * @} + * ************************************************************************ + * @defgroup XXH3_family XXH3 family + * @ingroup public + * @{ + * + * XXH3 is a more recent hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: + * + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * Compared to XXH64, expect XXH3 to run approximately + * ~2x faster on large inputs and >3x faster on small ones, + * exact differences vary depending on platform. + * + * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, + * but does not require it. + * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 + * at competitive speeds, even without vector support. Further details are + * explained in the implementation. + * + * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD + * implementations for many common platforms: + * - AVX512 + * - AVX2 + * - SSE2 + * - ARM NEON + * - WebAssembly SIMD128 + * - POWER8 VSX + * - s390x ZVector + * This can be controlled via the @ref XXH_VECTOR macro, but it automatically + * selects the best version according to predefined macros. For the x86 family, an + * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. + * + * XXH3 implementation is portable: + * it has a generic C90 formulation that can be compiled on any platform, + * all implementations generate exactly the same hash value on all platforms. + * Starting from v0.8.0, it's also labelled "stable", meaning that + * any future version will also generate the same hash value. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * + * When only 64 bits are needed, prefer invoking the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + */ +/*-********************************************************************** +* XXH3 64-bit variant +************************************************************************/ + +/*! + * @brief 64-bit unseeded variant of XXH3. + * + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants + * @see + * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief 64-bit seeded variant of XXH3 + * + * This variant generates a custom secret on the fly based on default secret + * altered using the `seed` value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * @param input The data to hash + * @param length The length + * @param seed The 64-bit seed to alter the state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ +#define XXH3_SECRET_SIZE_MIN 136 + +/*! + * @brief 64-bit variant of XXH3 with a custom "secret". + * + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing "XXH3_generateSecret()" instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + */ + +/*! + * @brief The state struct for the XXH3 streaming API. + * + * @see XXH3_state_s for details. + */ +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH3_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); + +/*! + * XXH3_64bits_reset_withSecret(): + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 64-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* note : canonical representation of XXH3 is the same as XXH64 + * since they both produce XXH64_hash_t values */ + + +/*-********************************************************************** +* XXH3 128-bit variant +************************************************************************/ + +/*! + * @brief The return value from 128-bit hashes. + * + * Stored in little endian order, although the fields themselves are in native + * endianness. + */ +typedef struct { + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ +} XXH128_hash_t; + +/*! + * @brief Unseeded 128-bit variant of XXH3 + * + * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead + * for shorter inputs. + * + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants + * @see + * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); +/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + * + * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). + * Use already declared XXH3_createState() and XXH3_freeState(). + * + * All reset and streaming functions have same meaning as their 64-bit counterpart. + */ + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 128-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* Following helper functions make it possible to compare XXH128_hast_t values. + * Since XXH128_hash_t is a structure, this capability is not offered by the language. + * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + +/*! + * XXH128_isEqual(): + * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/*! + * @brief Compares two @ref XXH128_hash_t + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * @return: >0 if *h128_1 > *h128_2 + * =0 if *h128_1 == *h128_2 + * <0 if *h128_1 < *h128_2 + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); + + +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; + + +/*! + * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. + * + * @param dst The @ref XXH128_canonical_t pointer to be stored to. + * @param hash The @ref XXH128_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); + +/*! + * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. + * + * @param src The @ref XXH128_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); + + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ + +/*! + * @} + */ +#endif /* XXHASH_H_5627135585666179 */ + + + +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ + +/* + * These definitions are only present to allow static allocation + * of XXH states, on stack or in a struct, for example. + * Never **ever** access their members directly. + */ + +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ +struct XXH32_state_s { + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ +}; /* typedef'd to XXH32_state_t */ + + +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ +struct XXH64_state_s { + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ +}; /* typedef'd to XXH64_state_t */ + +#ifndef XXH_NO_XXH3 + +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ +/* In C++ alignas() is a keyword */ +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +/*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ +#define XXH3_INTERNALBUFFER_SIZE 256 + +/*! + * @internal + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ +#define XXH3_SECRET_DEFAULT_SIZE 192 + +/*! + * @internal + * @brief Structure for XXH3 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. + * Otherwise it is an opaque type. + * Never use this definition in combination with dynamic library. + * This allows fields to safely be changed in the future. + * + * @note ** This structure has a strict alignment requirement of 64 bytes!! ** + * Do not allocate this with `malloc()` or `new`, + * it will not be sufficiently aligned. + * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. + * + * Typedef'd to @ref XXH3_state_t. + * Do never access the members of this struct directly. + * + * @see XXH3_INITSTATE() for stack initialization. + * @see XXH3_createState(), XXH3_freeState(). + * @see XXH32_state_s, XXH64_state_s + */ +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ + XXH32_hash_t bufferedSize; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ + size_t nbStripesSoFar; + /*!< Number or stripes processed. */ + XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ + size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ + size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ + XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ + XXH64_hash_t reserved64; + /*!< Reserved field. */ + const unsigned char* extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER + +/*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret mode. + * This operation isn't necessary when the state is created with XXH3_createState(). + * Note that this doesn't prepare the state for a streaming operation, + * it's still necessary to use XXH3_NNbits_reset*() afterwards. + */ +#define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + } while(0) + + +/*! + * simple alias to pre-selected XXH3_128bits variant + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/*! + * XXH3_generateSecret(): + * + * Derive a high-entropy secret from any user-defined content, named customSeed. + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection + * than 64-bit seed, as it becomes much more difficult for an external actor to + * guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length @p secretSize into an + * already allocated buffer @p secretBuffer. + * + * The generated secret can then be used with any `*_withSecret()` variant. + * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), + * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() + * are part of this list. They all accept a `secret` parameter + * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can + * be employed to ensure proper quality. + * + * @p customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch + * of zeroes. The resulting `secret` will nonetheless provide all required qualities. + * + * @pre + * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + * + * Example code: + * @code{.c} + * #include + * #include + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Hashes argv[2] using the entropy from argv[1]. + * int main(int argc, char* argv[]) + * { + * char secret[XXH3_SECRET_SIZE_MIN]; + * if (argv != 3) { return 1; } + * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); + * XXH64_hash_t h = XXH3_64bits_withSecret( + * argv[2], strlen(argv[2]), + * secret, sizeof(secret) + * ); + * printf("%016llx\n", (unsigned long long) h); + * } + * @endcode + */ +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); + +/*! + * @brief Generate the same secret as the _withSeed() variants. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * + * Example C++ `std::string` hash class: + * @code{.cpp} + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Slow, seeds each time + * class HashSlow { + * XXH64_hash_t seed; + * public: + * HashSlow(XXH64_hash_t s) : seed{s} {} + * size_t operator()(const std::string& x) const { + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; + * } + * }; + * // Fast, caches the seeded secret for future uses. + * class HashFast { + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; + * public: + * HashFast(XXH64_hash_t s) { + * XXH3_generateSecret_fromSeed(secret, seed); + * } + * size_t operator()(const std::string& x) const { + * return size_t{ + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) + * }; + * } + * }; + * @endcode + * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes + * @param seed The seed to seed the state. + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); + +/*! + * These variants generate hash values using either + * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) + * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @p secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, via its impact to the seed. + * + * This is not guaranteed when using the secret directly in "small data" scenarios, + * because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#ifndef XXH_NO_STREAM +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#endif /* !XXH_NO_STREAM */ + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION +#endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be hosted inside xxhash.c. + * + * However, inlining requires implementation to be visible to the compiler, + * hence be included alongside the header. + * Previously, implementation was hosted inside xxhash.c, + * which was then #included when inlining was activated. + * This construction created issues with a few build and install systems, + * as it required xxhash.c to be stored in /include directory. + * + * xxHash implementation is now directly integrated within xxhash.h. + * As a consequence, xxhash.c is no longer needed in /include. + * + * xxhash.c is still available and is still useful. + * In a "normal" setup, when xxhash is not inlined, + * xxhash.h only exposes the prototypes and public symbols, + * while xxhash.c can be built into an object file xxhash.o + * which can then be linked into the final binary. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ + +/*! + * @defgroup tuning Tuning parameters + * @{ + * + * Various macros to control xxHash's behavior. + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. + */ +# define XXH_NO_LONG_LONG +# undef XXH_NO_LONG_LONG /* don't actually */ +/*! + * @brief Controls how unaligned memory is accessed. + * + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers will + * eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences an + * unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's the + * only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. However, some compilers + * will emit literal byteshifts even if the target supports unaligned access. + * + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may cause + * another to read garbage data or even crash. + * + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) + */ +# define XXH_FORCE_MEMORY_ACCESS 0 + +/*! + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. + * + * xxHash, when compiled, tends to result in a rather large binary size. This + * is mostly due to heavy usage to forced inlining and constant folding of the + * @ref XXH3_family to increase performance. + * + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed + * comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies the + * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, + * and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use the + * streaming API. + */ +# define XXH_SIZE_OPT 0 + +/*! + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() + * and XXH64() only). + * + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, + * but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips + * which are platforms known to offer good unaligned memory accesses performance. + * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +# define XXH_FORCE_ALIGN_CHECK 0 + +/*! + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. + */ +# define XXH_NO_INLINE_HINTS 0 + +/*! + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). + * + * However, if the secret size is not known, it doesn't have any benefit. This + * happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. + * + * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers + * that are *sometimes* force inline on -Og, and it is impossible to automatically + * detect this optimization level. + */ +# define XXH3_INLINE_SECRET 0 + +/*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the finalizer. + * This is generally preferable for performance, + * but depending on exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ +# define XXH32_ENDJMP 0 + +/*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use this. + */ +# define XXH_OLD_NAMES +# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + +/*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, disabling + * the streaming functions can improve code size significantly, especially with + * the @ref XXH3_family which tends to make constant folded copies of itself. + */ +# define XXH_NO_STREAM +# undef XXH_NO_STREAM /* don't actually */ +#endif /* XXH_DOXYGEN */ +/*! + * @} + */ + +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy + * which for some reason does unaligned loads. */ +# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +#ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ +# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) +# define XXH_SIZE_OPT 1 +# else +# define XXH_SIZE_OPT 0 +# endif +#endif + +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ +# if XXH_SIZE_OPT >= 1 || \ + defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +#ifndef XXH_NO_INLINE_HINTS +# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +#ifndef XXH3_INLINE_SECRET +# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ + || !defined(XXH_INLINE_ALL) +# define XXH3_INLINE_SECRET 0 +# else +# define XXH3_INLINE_SECRET 1 +# endif +#endif + +#ifndef XXH32_ENDJMP +/* generally preferable for performance */ +# define XXH32_ENDJMP 0 +#endif + +/*! + * @defgroup impl Implementation + * @{ + */ + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +#if defined(XXH_NO_STREAM) +/* nothing */ +#elif defined(XXH_NO_STDLIB) + +/* When requesting to disable any mention of stdlib, + * the library loses the ability to invoked malloc / free. + * In practice, it means that functions like `XXH*_createState()` + * will always fail, and return NULL. + * This flag is useful in situations where + * xxhash.h is integrated into some kernel, embedded or limited environment + * without access to dynamic allocation. + */ + +static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } +static void XXH_free(void* p) { (void)p; } + +#else + +/* + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ +static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } + +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ +static void XXH_free(void* p) { free(p); } + +#endif /* XXH_NO_STDLIB */ + +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} + +#include /* ULLONG_MAX */ + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined(_MSC_VER) /* Visual Studio */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +#else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +#endif + +#if XXH3_INLINE_SECRET +# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE +#else +# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE +#endif + + +/* ************************************* +* Debug +***************************************/ +/*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif +#endif + +#if (XXH_DEBUGLEVEL>=1) +# include /* note: can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# if defined(__INTEL_COMPILER) +# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) +# else +# define XXH_ASSERT(c) XXH_ASSUME(c) +# endif +#endif + +/* note: use after variable declarations */ +#ifndef XXH_STATIC_ASSERT +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) +# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# else +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) +# endif +# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) +#endif + +/*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ +#if defined(__GNUC__) || defined(__clang__) +# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) +#else +# define XXH_COMPILER_GUARD(var) ((void)0) +#endif + +/* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ +#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) +# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) +#else +# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) +#endif + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; +#endif +typedef XXH32_hash_t xxh_u32; + +#ifdef XXH_OLD_NAMES +# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif + +/* *** Memory access *** */ + +/*! + * @internal + * @fn xxh_u32 XXH_read32(const void* ptr) + * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit native endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32(const void* ptr) + * @brief Reads an unaligned 32-bit little endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readBE32(const void* ptr) + * @brief Reads an unaligned 32-bit big endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit big endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) + * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is + * always @ref XXH_alignment::XXH_unaligned. + * + * @param ptr The pointer to read from. + * @param align Whether @p ptr is aligned. + * @pre + * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte + * aligned. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* *** Endianness *** */ + +/*! + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. + */ +#ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +/*! + * @internal + * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. + * + * Most compilers will constant fold this. + */ +static int XXH_isLittleEndian(void) +{ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif + + + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 +#endif + + + +/* + * C23 and future versions have standard "unreachable()". + * Once it has been implemented reliably we can add it as an + * additional case: + * + * ``` + * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) + * # include + * # ifdef unreachable + * # define XXH_UNREACHABLE() unreachable() + * # endif + * #endif + * ``` + * + * Note C++23 also has std::unreachable() which can be detected + * as follows: + * ``` + * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) + * # include + * # define XXH_UNREACHABLE() std::unreachable() + * #endif + * ``` + * NB: `__cpp_lib_unreachable` is defined in the `` header. + * We don't use that as including `` in `extern "C"` blocks + * doesn't work on GCC12 + */ + +#if XXH_HAS_BUILTIN(__builtin_unreachable) +# define XXH_UNREACHABLE() __builtin_unreachable() + +#elif defined(_MSC_VER) +# define XXH_UNREACHABLE() __assume(0) + +#else +# define XXH_UNREACHABLE() +#endif + +#if XXH_HAS_BUILTIN(__builtin_assume) +# define XXH_ASSUME(c) __builtin_assume(c) +#else +# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } +#endif + +/*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) +#endif + +/*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* *************************** +* Memory reads +*****************************/ + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +typedef enum { + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ +} XXH_alignment; + +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} + +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} + + +/* ************************************* +* Misc +***************************************/ +/*! @ingroup public */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +/*! + * @} + * @defgroup XXH32_impl XXH32 implementation + * @ingroup impl + * + * Details on the XXH32 implementation. + * @{ + */ + /* #define instead of static const, to be used as initializers */ +#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ +#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ +#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ +#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ +#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif + +/*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) +{ + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; +#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing + * the loop. NEON is only faster on the A53, and with the newer cores, it is less + * than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. + */ + XXH_COMPILER_GUARD(acc); +#endif + return acc; +} + +/*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param hash The hash to avalanche. + * @return The avalanched hash. + */ +static xxh_u32 XXH32_avalanche(xxh_u32 hash) +{ + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + * @see XXH64_finalize(). + */ +static XXH_PUREF xxh_u32 +XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1 do { \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ +} while (0) + +#define XXH_PROCESS4 do { \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ +} while (0) + + if (ptr==NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { + len &= 15; + while (len >= 4) { + XXH_PROCESS4; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1; + --len; + } + return XXH32_avalanche(hash); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 8: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 4: XXH_PROCESS4; + return XXH32_avalanche(hash); + + case 13: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 9: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 5: XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 14: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 10: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 15: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 11: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 7: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 3: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 2: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 1: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 0: return XXH32_avalanche(hash); + } + XXH_ASSERT(0); + return hash; /* reaching this point is deemed impossible */ + } +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + xxh_u32 h32; + + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=16) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + XXH_PRIME32_5; + } + + h32 += (xxh_u32)len; + + return XXH32_finalize(h32, input, len&15, align); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) +{ +#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +#endif +} + + + +/******* Hash streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + + do { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) +{ + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v[0], 1) + + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + + XXH_rotl32(state->v[3], 18); + } else { + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! + * @ingroup XXH32_family + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * + * The canonical representation uses big endian convention, the same convention + * as human-readable numbers (large digits first). + * + * This way, hash values can be written into a file or buffer, remaining + * comparable across different systems. + * + * The following functions allow transformation of hash values to and from their + * canonical format. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ +/*! + * @} + * @ingroup impl + * @{ + */ +/******* Memory access *******/ + +typedef XXH64_hash_t xxh_u64; + +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + return *(const xxh_u64*) memPtr; +} + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static xxh_u64 XXH_swap64(xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); +} + +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); +} + + +/******* xxh64 *******/ +/*! + * @} + * @defgroup XXH64_impl XXH64 implementation + * @ingroup impl + * + * Details on the XXH64 implementation. + * @{ + */ +/* #define rather that static const, to be used as initializers */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ +#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif + +/*! @copydoc XXH32_round */ +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; + return acc; +} + +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; +} + +/*! @copydoc XXH32_avalanche */ +static xxh_u64 XXH64_avalanche(xxh_u64 hash) +{ + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ +static XXH_PUREF xxh_u64 +XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ + if (ptr==NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + hash ^= k1; + hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + } + if (len >= 4) { + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + } + while (len > 0) { + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; + --len; + } + return XXH64_avalanche(hash); +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + xxh_u64 h64; + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=32) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 31; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8*)input, len); + return XXH64_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + +#endif +} + +/******* Hash Streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH64_family*/ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + + do { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } else { + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; + } + + h64 += (xxh_u64) state->total_len; + + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#ifndef XXH_NO_XXH3 + +/* ********************************************************************* +* XXH3 +* New generation hash designed for speed on small keys and vectorization +************************************************************************ */ +/*! + * @} + * @defgroup XXH3_impl XXH3 implementation + * @ingroup impl + * @{ + */ + +/* === Compiler specifics === */ + +#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ +# define XXH_RESTRICT /* disable */ +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ + || (defined (__clang__)) \ + || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ + || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) +/* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ +# define XXH_RESTRICT __restrict +#else +# define XXH_RESTRICT /* disable */ +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ + || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ + || defined(__clang__) +# define XXH_likely(x) __builtin_expect(x, 1) +# define XXH_unlikely(x) __builtin_expect(x, 0) +#else +# define XXH_likely(x) (x) +# define XXH_unlikely(x) (x) +#endif + +#ifndef XXH_HAS_INCLUDE +# ifdef __has_include +# define XXH_HAS_INCLUDE(x) __has_include(x) +# else +# define XXH_HAS_INCLUDE(x) 0 +# endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# if defined(__ARM_FEATURE_SVE) +# include +# endif +# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ + || (defined(_M_ARM) && _M_ARM >= 7) \ + || defined(_M_ARM64) || defined(_M_ARM64EC) \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ +# define inline __inline__ /* circumvent a clang bug */ +# include +# undef inline +# elif defined(__AVX2__) +# include +# elif defined(__SSE2__) +# include +# endif +#endif + +#if defined(_MSC_VER) +# include +#endif + +/* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because the + * shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the lower + * 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and therefore, + * we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and 64-bit + * platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one + * notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand allowing free + * shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we will + * emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably need + * to specify -march, as you likely meant to compile for a newer architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ + +#ifdef XXH_DOXYGEN +/*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the best + * implementation. + */ +# define XXH_VECTOR XXH_SCALAR +/*! + * @ingroup tuning + * @brief Possible values for @ref XXH_VECTOR. + * + * Note that these are actually implemented as macros. + * + * If this is not defined, it is detected automatically. + * internal macro XXH_X86DISPATCH overrides this. + */ +enum XXH_VECTOR_TYPE /* fake enum */ { + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ +}; +/*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment required for said vector + * type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ +# define XXH_ACC_ALIGN 8 +#endif + +/* Actual definition */ +#ifndef XXH_DOXYGEN +# define XXH_SCALAR 0 +# define XXH_SSE2 1 +# define XXH_AVX2 2 +# define XXH_AVX512 3 +# define XXH_NEON 4 +# define XXH_VSX 5 +# define XXH_SVE 6 +#endif + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if defined(__ARM_FEATURE_SVE) +# define XXH_VECTOR XXH_SVE +# elif ( \ + defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ + ) && ( \ + defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ + ) +# define XXH_VECTOR XXH_NEON +# elif defined(__AVX512F__) +# define XXH_VECTOR XXH_AVX512 +# elif defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ + || (defined(__s390x__) && defined(__VEC__)) \ + && defined(__GNUC__) /* TODO: IBM XL */ +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ +#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) +# ifdef _MSC_VER +# pragma warning(once : 4606) +# else +# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." +# endif +# undef XXH_VECTOR +# define XXH_VECTOR XXH_SCALAR +#endif + +/* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ +#ifndef XXH_ACC_ALIGN +# if defined(XXH_X86DISPATCH) +# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ +# elif XXH_VECTOR == XXH_SCALAR /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == XXH_NEON /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_VSX /* vsx */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ +# define XXH_ACC_ALIGN 64 +# elif XXH_VECTOR == XXH_SVE /* sve */ +# define XXH_ACC_ALIGN 64 +# endif +#endif + +#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ + || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#elif XXH_VECTOR == XXH_SVE +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#else +# define XXH_SEC_ALIGN 8 +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define XXH_ALIASING __attribute__((may_alias)) +#else +# define XXH_ALIASING /* nothing */ +#endif + +/* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling resulting + * in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which + * only applies to Sandy and Ivy Bridge... which don't even support AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use either + * -O2 -mavx2 -march=haswell + * or + * -O2 -mavx2 -mno-avx256-split-unaligned-load + * for decent performance, or to use Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC into + * -O2, but the other one we can't control without "failed to inline always + * inline function due to target mismatch" warnings. + */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ +# pragma GCC push_options +# pragma GCC optimize("-O2") +#endif + +#if XXH_VECTOR == XXH_NEON + +/* + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 + * optimizes out the entire hashLong loop because of the aliasing violation. + * + * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, + * so the only option is to mark it as aliasing. + */ +typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; + +/*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. + * + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only + * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). + * + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it + * prohibits load-store optimizations. Therefore, a direct dereference is used. + * + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe + * unaligned load. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ +{ + return *(xxh_aliasing_uint64x2_t const *)ptr; +} +#else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) +{ + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); +} +#endif + +/*! + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. + * + * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with + * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` + * with `vmlal_u32`. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); + return acc; +} +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); +} +#else +/* Portable intrinsic versions */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); +} +/*! @copydoc XXH_vmlal_low_u32 + * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); +} +#endif + +/*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 + * + * This can be set to 2, 4, 6, or 8. + * + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. + * + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those + * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU + * bandwidth. + * + * This is even more noticeable on the more advanced cores like the Cortex-A76 which + * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. + * + * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes + * and 2 scalar lanes, which is chosen by default. + * + * This does not apply to Apple processors or 32-bit processors, which run better with + * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. + * + * This change benefits CPUs with large micro-op buffers without negatively affecting + * most other CPUs: + * + * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | + * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | + * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | + * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | + * + * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. + * + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning + * it effectively becomes worse 4. + * + * @see XXH3_accumulate_512_neon() + */ +# ifndef XXH3_NEON_LANES +# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ + && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 +# define XXH3_NEON_LANES 6 +# else +# define XXH3_NEON_LANES XXH_ACC_NB +# endif +# endif +#endif /* XXH_VECTOR == XXH_NEON */ + +/* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ +#if XXH_VECTOR == XXH_VSX +/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, + * and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ +# pragma push_macro("bool") +# pragma push_macro("vector") +# pragma push_macro("pixel") +/* silence potential macro redefined warnings */ +# undef bool +# undef vector +# undef pixel + +# if defined(__s390x__) +# include +# else +# include +# endif + +/* Restore the original macro values, if applicable. */ +# pragma pop_macro("pixel") +# pragma pop_macro("vector") +# pragma pop_macro("bool") + +typedef __vector unsigned long long xxh_u64x2; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; + +/* + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. + */ +typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; + +# ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +# endif /* !defined(XXH_VSX_BE) */ + +# if XXH_VSX_BE +# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) +# define XXH_vec_revb vec_revb +# else +/*! + * A polyfill for POWER9's vec_revb(). + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) +{ + xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +# endif +# endif /* XXH_VSX_BE */ + +/*! + * Performs an unaligned vector load and byte swaps it on big endian. + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) +{ + xxh_u64x2 ret; + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); +# if XXH_VSX_BE + ret = XXH_vec_revb(ret); +# endif + return ret; +} + +/* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a while, + * and they are endian dependent. Also, their meaning swap depending on version. + * */ +# if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ +# define XXH_vec_mulo vec_mulo +# define XXH_vec_mule vec_mule +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) +/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +# else +/* gcc needs inline assembly */ +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +# endif /* XXH_vec_mulo, XXH_vec_mule */ +#endif /* XXH_VECTOR == XXH_VSX */ + +#if XXH_VECTOR == XXH_SVE +#define ACCRND(acc, offset) \ +do { \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ +} while (0) +#endif /* XXH_VECTOR == XXH_SVE */ + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if XXH_SIZE_OPT >= 1 +# define XXH_PREFETCH(ptr) (void)(ptr) +# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + + +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +/*! Pseudorandom secret taken directly from FARSH. */ +XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + +static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ +static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ + +#ifdef XXH_OLD_NAMES +# define kSecret XXH3_kSecret +#endif + +#ifdef XXH_DOXYGEN +/*! + * @brief Calculates a 32-bit to 64-bit long multiply. + * + * Implemented as a macro. + * + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't + * need to (but it shouldn't need to anyways, it is about 7 instructions to do + * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we + * use that instead of the normal method. + * + * If you are compiling for platforms like Thumb-1 and don't have a better option, + * you may also want to write your own long multiply routine here. + * + * @param x, y Numbers to be multiplied + * @return 64-bit product of the low 32 bits of @p x and @p y. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64(xxh_u64 x, xxh_u64 y) +{ + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +} +#elif defined(_MSC_VER) && defined(_M_IX86) +# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) +#else +/* + * Downcast + upcast is usually better than masking on older compilers like + * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands + * and perform a full 64x64 multiply -- entirely redundant on 32-bit. + */ +# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) +#endif + +/*! + * @brief Calculates a 64->128-bit long multiply. + * + * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar + * version. + * + * @param lhs , rhs The 64-bit integers to be multiplied + * @return The 128-bit result represented in an @ref XXH128_hash_t. + */ +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this type + * despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; + + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. + */ +#elif defined(_M_ARM64) || defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(__umulh) +#endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; +#endif +} + +/*! + * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. + * + * The reason for the separate function is to prevent passing too many structs + * around by value. This will hopefully inline the multiply, but we don't force it. + * + * @param lhs , rhs The 64-bit integers to multiply + * @return The low 64 bits of the product XOR'd by the high 64 bits. + * @see XXH_mult64to128() + */ +static xxh_u64 +XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + +/*! Seems to produce slightly better code on GCC for some reason. */ +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +{ + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); +} + +/* + * This is a fast avalanche stage, + * suitable when input bits are already partially mixed + */ +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) +{ + h64 = XXH_xorshift64(h64, 37); + h64 *= PRIME_MX1; + h64 = XXH_xorshift64(h64, 32); + return h64; +} + +/* + * This is a stronger avalanche, + * inspired by Pelle Evensen's rrmxmx + * preferable when input has not been previously mixed + */ +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) +{ + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= PRIME_MX2; + h64 ^= (h64 >> 35) + len ; + h64 *= PRIME_MX2; + return XXH_xorshift64(h64, 28); +} + + +/* ========================================== + * Short keys + * ========================================== + * One of the shortcomings of XXH32 and XXH64 was that their performance was + * sub-optimal on short lengths. It used an iterative algorithm which strongly + * favored lengths that were a multiple of 4 or 8. + * + * Instead of iterating over individual inputs, we use a set of single shot + * functions which piece together a range of lengths and operate in constant time. + * + * Additionally, the number of multiplies has been significantly reduced. This + * reduces latency, especially when emulating 64-bit multiplies on 32-bit. + * + * Depending on the platform, this may or may not be faster than XXH32, but it + * is almost guaranteed to be faster than XXH64. + */ + +/* + * At very short lengths, there isn't enough input to fully hide secrets, or use + * the entire secret. + * + * There is also only a limited amount of mixing we can do before significantly + * impacting performance. + * + * Therefore, we use different sections of the secret and always mix two secret + * samples with an XOR. This should have no effect on performance on the + * seedless or withSeed variants because everything _should_ be constant folded + * by modern compilers. + * + * The XOR mixing hides individual parts of the secret and increases entropy. + * + * This adds an extra layer of strength for custom secrets. + */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; + xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); + } +} + +/* + * DISCLAIMER: There are known *seed-dependent* multicollisions here due to + * multiplication by zero, affecting hashes of lengths 17 to 240. + * + * However, they are very unlikely. + * + * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all + * unseeded non-cryptographic hashes, it does not attempt to defend itself + * against specially crafted inputs, only random inputs. + * + * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes + * cancelling out the secret is taken an arbitrary number of times (addressed + * in XXH3_accumulate_512), this collision is very unlikely with random inputs + * and/or proper seeding: + * + * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a + * function that is only called up to 16 times per hash with up to 240 bytes of + * input. + * + * This is not too bad for a non-cryptographic hash function, especially with + * only 64 bit outputs. + * + * The 128-bit variant (which trades some speed for strength) is NOT affected + * by this, although it is always a good idea to use a proper seed if you care + * about strength. + */ +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ +#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + XXH_COMPILER_GUARD(seed64); +#endif + { xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) + ); + } +} + +/* For mid range keys, XXH3 uses a Mum-hash variant. */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * XXH_PRIME64_1; +#if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); + acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); + } while (i-- != 0); +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3_mix16B(input+48, secret+96, seed); + acc += XXH3_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3_mix16B(input+32, secret+64, seed); + acc += XXH3_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3_mix16B(input+16, secret+32, seed); + acc += XXH3_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3_mix16B(input+0, secret+0, seed); + acc += XXH3_mix16B(input+len-16, secret+16, seed); +#endif + return XXH3_avalanche(acc); + } +} + +#define XXH3_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * XXH_PRIME64_1; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + for (i=0; i<8; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); + } + /* last bytes */ + acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); +#if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. + * In everywhere else, it uses scalar code. + * + * For 64->128-bit multiplies, even if the NEON was 100% optimal, it + * would still be slower than UMAAL (see XXH_mult64to128). + * + * Unfortunately, Clang doesn't handle the long multiplies properly and + * converts them to the nonexistent "vmulq_u64" intrinsic, which is then + * scalarized into an ugly mess of VMOV.32 instructions. + * + * This mess is difficult to avoid without turning autovectorization + * off completely, but they are usually relatively minor and/or not + * worth it to fix. + * + * This loop is the easiest to fix, as unlike XXH32, this pragma + * _actually works_ because it is a loop vectorization instead of an + * SLP vectorization. + */ + #pragma clang loop vectorize(disable) +#endif + for (i=8 ; i < nbRounds; i++) { + /* + * Prevents clang for unrolling the acc loop and interleaving with this one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } + return XXH3_avalanche(acc + acc_end); + } +} + + +/* ======= Long Keys ======= */ + +#define XXH_STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + +#ifdef XXH_OLD_NAMES +# define STRIPE_LEN XXH_STRIPE_LEN +# define ACC_NB XXH_ACC_NB +#endif + +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_() and it calls + * XXH3_accumulate_512_(). + * + * It may be useful to hand implement this function if the compiler fails to + * optimize the inline function. + */ +#define XXH3_ACCUMULATE_TEMPLATE(name) \ +void \ +XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ + const xxh_u8* XXH_RESTRICT input, \ + const xxh_u8* XXH_RESTRICT secret, \ + size_t nbStripes) \ +{ \ + size_t n; \ + for (n = 0; n < nbStripes; n++ ) { \ + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name( \ + acc, \ + in, \ + secret + n*XXH_SECRET_CONSUME_RATE); \ + } \ +} + + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + XXH_memcpy(dst, &v64, sizeof(v64)); +} + +/* Several intrinsic functions below are supposed to accept __int64 as argument, + * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) + typedef int64_t xxh_i64; +#else + /* the following type must have a width of 64-bit */ + typedef long long xxh_i64; +#endif + + +/* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as the product. + * + * This means that in the (relatively likely) case of a multiply by zero, the + * original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together in + * the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +#if (XXH_VECTOR == XXH_AVX512) \ + || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) + +#ifndef XXH_TARGET_AVX512 +# define XXH_TARGET_AVX512 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + __m512i* const xacc = (__m512i *) acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + + { + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512 (input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + } +} +XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) + +/* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into the + * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid + * extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { __m512i* const xacc = (__m512i*) acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); + __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); + + const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); + __m512i* const dest = ( __m512i*) customSecret; + int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); + for (i=0; i < nbRounds; ++i) { + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_AVX2) \ + || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) + +#ifndef XXH_TARGET_AVX2 +# define XXH_TARGET_AVX2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xinput = (const __m256i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); + + const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); + __m256i* dest = ( __m256i*) customSecret; + +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dest); +# endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); + } +} + +#endif + +/* x86dispatch always generates SSE2 */ +#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_SSE2 +# define XXH_TARGET_SSE2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xinput = (const __m128i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + +# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; + __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); +# else + __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); +# endif + int i; + + const void* const src16 = XXH3_kSecret; + __m128i* dst16 = (__m128i*) customSecret; +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dst16); +# endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); + + for (i=0; i < nbRounds; ++i) { + dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_NEON) + +/* forward declarations for the scalar routines */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, size_t lane); + +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, size_t lane); + +/*! + * @internal + * @brief The bulk processing loop for NEON and WASM SIMD128. + * + * The NEON code path is actually partially scalar when running on AArch64. This + * is to optimize the pipelining and can have up to 15% speedup depending on the + * CPU, and it also mitigates some GCC codegen issues. + * + * @see XXH3_NEON_LANES for configuring this and details about this optimization. + * + * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit + * integers instead of the other platforms which mask full 64-bit vectors, + * so the setup is more complicated than just shifting right. + * + * Additionally, there is an optimization for 4 lanes at once noted below. + * + * Since, as stated, the most optimal amount of lanes for Cortexes is 6, + * there needs to be *three* versions of the accumulate operation used + * for the remaining 2 lanes. + * + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap + * nearly perfectly. + */ + +XXH_FORCE_INLINE void +XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* xinput = (const uint8_t *) input; + uint8_t const* xsecret = (const uint8_t *) secret; + + size_t i; +#ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); +#endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } + i = 0; + /* 4 NEON lanes at a time. */ + for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32( + vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2) + ); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for most + * widening intrinsics, have a variant that works on both high half vectors + * for free on AArch64. A similar instruction is available on SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); + } + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64 (xacc[i], sum); + } + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; + uint8_t const* xsecret = (uint8_t const*) secret; + + size_t i; + /* WASM uses operator overloads and doesn't need these. */ +#ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); +#endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } + for (i=0; i < XXH3_NEON_LANES / 2; i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); + uint64x2_t data_vec = veorq_u64(acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* xacc[i] *= XXH_PRIME32_1 */ +#ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; +#else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector + * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits + * and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); +#endif + } + } +} +#endif + +#if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* presumed aligned */ + xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ + xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ + xxh_u64x2 const v32 = { 32, 32 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = xacc[i]; + acc_vec += product; + + /* swap high and low halves */ +#ifdef __s390x__ + acc_vec += vec_permi(data_vec, data_vec, 2); +#else + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); +#endif + xacc[i] = acc_vec; + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + const xxh_u8* const xsecret = (const xxh_u8*) secret; + /* constants */ + xxh_u64x2 const v32 = { 32, 32 }; + xxh_u64x2 const v47 = { 47, 47 }; + xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } +} + +XXH_FORCE_INLINE void +XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes) +{ + if (nbStripes != 0) { + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(vacc, 0); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } + } +} + +#endif + +/* scalar variants - universal */ + +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +/* + * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they + * emit an excess mask and a full 64-bit multiply-add (MADD X-form). + * + * While this might not seem like much, as AArch64 is a 64-bit architecture, only + * big Cortex designs have a full 64-bit multiplier. + * + * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit + * multiplies expand to 2-3 multiplies in microcode. This has a major penalty + * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. + * + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does + * not have this penalty and does the mask automatically. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); + return ret; +} +#else +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; +} +#endif + +/*! + * @internal + * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* xacc = (xxh_u64*) acc; + xxh_u8 const* xinput = (xxh_u8 const*) input; + xxh_u8 const* xsecret = (xxh_u8 const*) secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + { + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); + } +} + +/*! + * @internal + * @brief Processes a 64 byte block of data using the scalar path. + */ +XXH_FORCE_INLINE void +XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + size_t i; + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__arm__) || defined(__thumb2__)) \ + && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ + && XXH_SIZE_OPT <= 0 +# pragma GCC unroll 8 +#endif + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) + +/*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + XXH_ASSERT(lane < XXH_ACC_NB); + { + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[lane] = acc64; + } +} + +/*! + * @internal + * @brief Scrambles the accumulators after a large chunk has been read + */ +XXH_FORCE_INLINE void +XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + size_t i; + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } +} + +XXH_FORCE_INLINE void +XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8* kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + +#if defined(__GNUC__) && defined(__aarch64__) + /* + * UGLY HACK: + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes the compiler to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + XXH_COMPILER_GUARD(kSecretPtr); +#endif + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i=0; i < nbRounds; i++) { + /* + * The asm hack causes the compiler to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; + XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); + XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); + } } +} + + +typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); +typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); +typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + + +#if (XXH_VECTOR == XXH_AVX512) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_accumulate XXH3_accumulate_avx512 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + +#elif (XXH_VECTOR == XXH_AVX2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_accumulate XXH3_accumulate_avx2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + +#elif (XXH_VECTOR == XXH_SSE2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_accumulate XXH3_accumulate_sse2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + +#elif (XXH_VECTOR == XXH_NEON) + +#define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_accumulate XXH3_accumulate_neon +#define XXH3_scrambleAcc XXH3_scrambleAcc_neon +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_VSX) + +#define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_accumulate XXH3_accumulate_vsx +#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_SVE) +#define XXH3_accumulate_512 XXH3_accumulate_512_sve +#define XXH3_accumulate XXH3_accumulate_sve +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#else /* scalar */ + +#define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_accumulate XXH3_accumulate_scalar +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#endif + +#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ +# undef XXH3_initCustomSecret +# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar +#endif + +XXH_FORCE_INLINE void +XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); + + /* last stripe */ + { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ + XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); +#if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + XXH_COMPILER_GUARD(result64); +#endif + } + + return XXH3_avalanche(result64); +} + +#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, + const void* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator */ +#define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); +} + +/* + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction cache. + * Note that inside this no_inline function, we do inline the internal loop, + * and provide a statically defined secret size to allow optimization of vector loop. + */ +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * XXH3_hashLong_64b_withSeed(): + * Generate a custom key based on alteration of default XXH3_kSecret with the seed, + * and then use this key for long mode hashing. + * + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + * + * It's important for performance that XXH3_hashLong is not inlined. Not sure + * why (uop cache maybe?), but the difference is large and easily measurable. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, + XXH64_hash_t seed, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ +#if XXH_SIZE_OPT <= 0 + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); +#endif + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + + +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong64_f f_hashLong) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); +} + + +/* === Public entry point === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) +{ + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) +{ + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); +} + + +/* === XXH3 streaming === */ +#ifndef XXH_NO_STREAM +/* + * Malloc's a pointer that is always aligned to align. + * + * This must be freed with `XXH_alignedFree()`. + * + * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte + * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 + * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. + * + * This underalignment previously caused a rather obvious crash which went + * completely unnoticed due to XXH3_createState() not actually being tested. + * Credit to RedSpah for noticing this bug. + * + * The alignment is done manually: Functions like posix_memalign or _mm_malloc + * are avoided: To maintain portability, we would have to write a fallback + * like this anyways, and besides, testing for the existence of library + * functions without relying on external build tools is impossible. + * + * The method is simple: Overallocate, manually align, and store the offset + * to the original behind the returned pointer. + * + * Align must be a power of 2 and 8 <= align <= 128. + */ +static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) +{ + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); + if (base != NULL) { + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8* ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + return NULL; + } +} +/* + * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass + * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. + */ +static void XXH_alignedFree(void* p) +{ + if (p != NULL) { + xxh_u8* ptr = (xxh_u8*)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8* base = ptr - offset; + XXH_free(base); + } +} +/*! @ingroup XXH3_family */ +/*! + * @brief Allocate an @ref XXH3_state_t. + * + * Must be freed with XXH3_freeState(). + * @return An allocated XXH3_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) +{ + XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state==NULL) return NULL; + XXH3_INITSTATE(state); + return state; +} + +/*! @ingroup XXH3_family */ +/*! + * @brief Frees an @ref XXH3_state_t. + * + * Must be allocated with XXH3_createState(). + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) +{ + XXH_alignedFree(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) +{ + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char*)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->useSeed = (seed != 0); + statePtr->extSecret = (const unsigned char*)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_64bits_reset(statePtr); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) +{ + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; +} + +/*! + * @internal + * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). + * + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. + * + * @param acc Pointer to the 8 accumulator lanes + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* + * @param nbStripesPerBlock Number of stripes in a block + * @param input Input pointer + * @param nbStripes Number of stripes to process + * @param secret Secret pointer + * @param secretLimit Offset of the last block in @p secret + * @param f_acc Pointer to an XXH3_accumulate implementation + * @param f_scramble Pointer to an XXH3_scrambleAcc implementation + * @return Pointer past the end of @p input after processing + */ +XXH_FORCE_INLINE const xxh_u8 * +XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, + size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, + const xxh_u8* XXH_RESTRICT input, size_t nbStripes, + const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + } while (nbStripes >= nbStripesPerBlock); + *nbStripesSoFarPtr = 0; + } + /* Process a partial block */ + if (nbStripes > 0) { + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; + *nbStripesSoFarPtr += nbStripes; + } + /* Return end pointer */ + return input; +} + +#ifndef XXH3_STREAM_USE_STACK +# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ +# define XXH3_STREAM_USE_STACK 1 +# endif +#endif +/* + * Both XXH3_64bits_update and XXH3_128bits_update use this routine. + */ +XXH_FORCE_INLINE XXH_errorcode +XXH3_update(XXH3_state_t* XXH_RESTRICT const state, + const xxh_u8* XXH_RESTRICT input, size_t len, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + XXH_ASSERT(state != NULL); + { const xxh_u8* const bEnd = input + len; + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); +#else + xxh_u64* XXH_RESTRICT const acc = state->acc; +#endif + state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); + + /* small input : just fill in tmp buffer */ + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc, f_scramble); + state->bufferedSize = 0; + } + XXH_ASSERT(input < bEnd); + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + input = XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, nbStripes, + secret, state->secretLimit, + f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + + } + /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + XXH_memcpy(state->acc, acc, sizeof(acc)); +#endif + } + + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate, XXH3_scrambleAcc); +} + + +XXH_FORCE_INLINE void +XXH3_digest_long (XXH64_hash_t* acc, + const XXH3_state_t* state, + const unsigned char* secret) +{ + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8* lastStripePtr; + + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + XXH_memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + /* Consume remaining stripes then point to remaining data in buffer */ + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, + secret, state->secretLimit, + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; + } else { /* bufferedSize < XXH_STRIPE_LEN */ + /* Copy to temp buffer */ + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + lastStripePtr = lastStripe; + } + /* Last stripe */ + XXH3_accumulate_512(acc, + lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + } + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->useSeed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ + + +/* ========================================== + * XXH3 128 bits (a.k.a XXH128) + * ========================================== + * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, + * even without counting the significantly larger output size. + * + * For example, extra steps are taken to avoid the seed-dependent collisions + * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). + * + * This strength naturally comes at the cost of some speed, especially on short + * lengths. Note that longer hashes are about as fast as the 64-bit version + * due to it using only a slight modification of the 64-bit loop. + * + * XXH128 is also more oriented towards 64-bit machines. It is still extremely + * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). + */ + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= PRIME_MX2; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; + xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + } else { + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; + + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + } } +} + +/* + * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN + */ +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t h128; + xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); + xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche( seed ^ bitfliph); + return h128; + } } +} + +/* + * A bit slower than XXH3_mix16B, but handles multiply by zero better. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, + const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + +#if XXH_SIZE_OPT >= 1 + { + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); + } while (i-- != 0); + } +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); +#endif + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { XXH128_hash_t acc; + unsigned i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + i - 32, + seed); + } + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i=160; i <= len; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, + seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, + input + len - 16, + input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + (XXH64_hash_t)0 - seed); + + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + secretSize + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + } +} + +/* + * It's important for performance that XXH3_hashLong() is not inlined. + */ +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's important for performance to pass @p secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. + * + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, + XXH3_accumulate, XXH3_scrambleAcc); +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const void* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_128bits_internal(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} + + +/* === Public XXH128 API === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_128bits_internal(input, len, 0, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_128bits_internal(input, len, 0, + (const xxh_u8*)secret, secretSize, + XXH3_hashLong_128b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_internal(input, len, seed, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ +#ifndef XXH_NO_STREAM +/* + * All initialization and update functions are identical to 64-bit streaming variant. + * The only difference is the finalization routine. + */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + return XXH3_64bits_reset(statePtr); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSeed(statePtr, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_64bits_update(state, input, len); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } + } + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ +/* 128-bit utility functions */ + +#include /* memcmp, memcpy */ + +/* return : 1 is equal, 0 if different */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * @return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + + + +/* ========================================== + * Secret generators + * ========================================== + */ +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) +{ + XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); + XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) +{ +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); +#else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; +#endif + + if (customSeedSize == 0) { + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + } +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); +#else + if (customSeed == NULL) return XXH_ERROR; +#endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { size_t pos = 0; + while (pos < secretSize) { + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char*)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + } } + + { size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n=0; n +#include +#include + +#include "parquet/types.h" + +namespace parquet::benchmark { + +template +void GenerateBenchmarkData(uint32_t size, uint32_t seed, T* data, + std::vector* heap, uint32_t data_string_length); + +#define _GENERATE_BENCHMARK_DATA_DECL(KLASS) \ + template <> \ + void GenerateBenchmarkData(uint32_t size, uint32_t seed, KLASS* data, \ + std::vector* heap, uint32_t data_string_length); + +_GENERATE_BENCHMARK_DATA_DECL(int32_t) +_GENERATE_BENCHMARK_DATA_DECL(int64_t) +_GENERATE_BENCHMARK_DATA_DECL(float) +_GENERATE_BENCHMARK_DATA_DECL(double) +_GENERATE_BENCHMARK_DATA_DECL(ByteArray) +_GENERATE_BENCHMARK_DATA_DECL(FLBA) +_GENERATE_BENCHMARK_DATA_DECL(Int96) + +#undef _GENERATE_BENCHMARK_DATA_DECL + +} // namespace parquet::benchmark diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..cbd267dd1972dcde98382dda3c84a6a544ddc3e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/io/interfaces.h" +#include "parquet/properties.h" +#include "parquet/type_fwd.h" + +namespace parquet { + +class InternalFileDecryptor; +class BloomFilter; + +class PARQUET_EXPORT RowGroupBloomFilterReader { + public: + virtual ~RowGroupBloomFilterReader() = default; + + /// \brief Read bloom filter of a column chunk. + /// + /// \param[in] i column ordinal of the column chunk. + /// \returns bloom filter of the column or nullptr if it does not exist. + /// \throws ParquetException if the index is out of bound, or read bloom + /// filter failed. + virtual std::unique_ptr GetColumnBloomFilter(int i) = 0; +}; + +/// \brief Interface for reading the bloom filter for a Parquet file. +class PARQUET_EXPORT BloomFilterReader { + public: + virtual ~BloomFilterReader() = default; + + /// \brief Create a BloomFilterReader instance. + /// \returns a BloomFilterReader instance. + /// WARNING: The returned BloomFilterReader references to all the input parameters, so + /// it must not outlive all of the input parameters. Usually these input parameters + /// come from the same ParquetFileReader object, so it must not outlive the reader + /// that creates this BloomFilterReader. + static std::unique_ptr Make( + std::shared_ptr<::arrow::io::RandomAccessFile> input, + std::shared_ptr file_metadata, const ReaderProperties& properties, + std::shared_ptr file_decryptor = NULLPTR); + + /// \brief Get the bloom filter reader of a specific row group. + /// \param[in] i row group ordinal to get bloom filter reader. + /// \returns RowGroupBloomFilterReader of the specified row group. A nullptr may or may + /// not be returned if the bloom filter for the row group is unavailable. It + /// is the caller's responsibility to check the return value of follow-up calls + /// to the RowGroupBloomFilterReader. + /// \throws ParquetException if the index is out of bound. + virtual std::shared_ptr RowGroup(int i) = 0; +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h new file mode 100644 index 0000000000000000000000000000000000000000..905f805b8c9cc7d0ee71ed448647cb23e0e3c0e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h @@ -0,0 +1,171 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module defines an abstract interface for iterating through pages in a +// Parquet column chunk within a row group. It could be extended in the future +// to iterate through all data pages in all chunks in a file. + +#pragma once + +#include +#include +#include +#include + +#include "parquet/statistics.h" +#include "parquet/types.h" + +namespace parquet { + +// TODO: Parallel processing is not yet safe because of memory-ownership +// semantics (the PageReader may or may not own the memory referenced by a +// page) +// +// TODO(wesm): In the future Parquet implementations may store the crc code +// in format::PageHeader. parquet-mr currently does not, so we also skip it +// here, both on the read and write path +class Page { + public: + Page(const std::shared_ptr& buffer, PageType::type type) + : buffer_(buffer), type_(type) {} + + PageType::type type() const { return type_; } + + std::shared_ptr buffer() const { return buffer_; } + + // @returns: a pointer to the page's data + const uint8_t* data() const { return buffer_->data(); } + + // @returns: the total size in bytes of the page's data buffer + int32_t size() const { return static_cast(buffer_->size()); } + + private: + std::shared_ptr buffer_; + PageType::type type_; +}; + +/// \brief Base type for DataPageV1 and DataPageV2 including common attributes +class DataPage : public Page { + public: + int32_t num_values() const { return num_values_; } + Encoding::type encoding() const { return encoding_; } + int64_t uncompressed_size() const { return uncompressed_size_; } + const EncodedStatistics& statistics() const { return statistics_; } + /// Return the row ordinal within the row group to the first row in the data page. + /// Currently it is only present from data pages created by ColumnWriter in order + /// to collect page index. + std::optional first_row_index() const { return first_row_index_; } + + virtual ~DataPage() = default; + + protected: + DataPage(PageType::type type, const std::shared_ptr& buffer, int32_t num_values, + Encoding::type encoding, int64_t uncompressed_size, + const EncodedStatistics& statistics = EncodedStatistics(), + std::optional first_row_index = std::nullopt) + : Page(buffer, type), + num_values_(num_values), + encoding_(encoding), + uncompressed_size_(uncompressed_size), + statistics_(statistics), + first_row_index_(std::move(first_row_index)) {} + + int32_t num_values_; + Encoding::type encoding_; + int64_t uncompressed_size_; + EncodedStatistics statistics_; + /// Row ordinal within the row group to the first row in the data page. + std::optional first_row_index_; +}; + +class DataPageV1 : public DataPage { + public: + DataPageV1(const std::shared_ptr& buffer, int32_t num_values, + Encoding::type encoding, Encoding::type definition_level_encoding, + Encoding::type repetition_level_encoding, int64_t uncompressed_size, + const EncodedStatistics& statistics = EncodedStatistics(), + std::optional first_row_index = std::nullopt) + : DataPage(PageType::DATA_PAGE, buffer, num_values, encoding, uncompressed_size, + statistics, std::move(first_row_index)), + definition_level_encoding_(definition_level_encoding), + repetition_level_encoding_(repetition_level_encoding) {} + + Encoding::type repetition_level_encoding() const { return repetition_level_encoding_; } + + Encoding::type definition_level_encoding() const { return definition_level_encoding_; } + + private: + Encoding::type definition_level_encoding_; + Encoding::type repetition_level_encoding_; +}; + +class DataPageV2 : public DataPage { + public: + DataPageV2(const std::shared_ptr& buffer, int32_t num_values, int32_t num_nulls, + int32_t num_rows, Encoding::type encoding, + int32_t definition_levels_byte_length, int32_t repetition_levels_byte_length, + int64_t uncompressed_size, bool is_compressed = false, + const EncodedStatistics& statistics = EncodedStatistics(), + std::optional first_row_index = std::nullopt) + : DataPage(PageType::DATA_PAGE_V2, buffer, num_values, encoding, uncompressed_size, + statistics, std::move(first_row_index)), + num_nulls_(num_nulls), + num_rows_(num_rows), + definition_levels_byte_length_(definition_levels_byte_length), + repetition_levels_byte_length_(repetition_levels_byte_length), + is_compressed_(is_compressed) {} + + int32_t num_nulls() const { return num_nulls_; } + + int32_t num_rows() const { return num_rows_; } + + int32_t definition_levels_byte_length() const { return definition_levels_byte_length_; } + + int32_t repetition_levels_byte_length() const { return repetition_levels_byte_length_; } + + bool is_compressed() const { return is_compressed_; } + + private: + int32_t num_nulls_; + int32_t num_rows_; + int32_t definition_levels_byte_length_; + int32_t repetition_levels_byte_length_; + bool is_compressed_; +}; + +class DictionaryPage : public Page { + public: + DictionaryPage(const std::shared_ptr& buffer, int32_t num_values, + Encoding::type encoding, bool is_sorted = false) + : Page(buffer, PageType::DICTIONARY_PAGE), + num_values_(num_values), + encoding_(encoding), + is_sorted_(is_sorted) {} + + int32_t num_values() const { return num_values_; } + + Encoding::type encoding() const { return encoding_; } + + bool is_sorted() const { return is_sorted_; } + + private: + int32_t num_values_; + Encoding::type encoding_; + bool is_sorted_; +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_reader.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..086f6c0e55806ee85c5cbeb4f3df16f796d45608 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_reader.h @@ -0,0 +1,501 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "parquet/exception.h" +#include "parquet/level_conversion.h" +#include "parquet/metadata.h" +#include "parquet/platform.h" +#include "parquet/properties.h" +#include "parquet/schema.h" +#include "parquet/types.h" + +namespace arrow { + +class Array; +class ChunkedArray; + +namespace bit_util { +class BitReader; +} // namespace bit_util + +namespace util { +class RleDecoder; +} // namespace util + +} // namespace arrow + +namespace parquet { + +class Decryptor; +class Page; + +// 16 MB is the default maximum page header size +static constexpr uint32_t kDefaultMaxPageHeaderSize = 16 * 1024 * 1024; + +// 16 KB is the default expected page header size +static constexpr uint32_t kDefaultPageHeaderSize = 16 * 1024; + +// \brief DataPageStats stores encoded statistics and number of values/rows for +// a page. +struct PARQUET_EXPORT DataPageStats { + DataPageStats(const EncodedStatistics* encoded_statistics, int32_t num_values, + std::optional num_rows) + : encoded_statistics(encoded_statistics), + num_values(num_values), + num_rows(num_rows) {} + + // Encoded statistics extracted from the page header. + // Nullptr if there are no statistics in the page header. + const EncodedStatistics* encoded_statistics; + // Number of values stored in the page. Filled for both V1 and V2 data pages. + // For repeated fields, this can be greater than number of rows. For + // non-repeated fields, this will be the same as the number of rows. + int32_t num_values; + // Number of rows stored in the page. std::nullopt if not available. + std::optional num_rows; +}; + +class PARQUET_EXPORT LevelDecoder { + public: + LevelDecoder(); + ~LevelDecoder(); + + // Initialize the LevelDecoder state with new data + // and return the number of bytes consumed + int SetData(Encoding::type encoding, int16_t max_level, int num_buffered_values, + const uint8_t* data, int32_t data_size); + + void SetDataV2(int32_t num_bytes, int16_t max_level, int num_buffered_values, + const uint8_t* data); + + // Decodes a batch of levels into an array and returns the number of levels decoded + int Decode(int batch_size, int16_t* levels); + + private: + int bit_width_; + int num_values_remaining_; + Encoding::type encoding_; + std::unique_ptr<::arrow::util::RleDecoder> rle_decoder_; + std::unique_ptr<::arrow::bit_util::BitReader> bit_packed_decoder_; + int16_t max_level_; +}; + +struct CryptoContext { + CryptoContext(bool start_with_dictionary_page, int16_t rg_ordinal, int16_t col_ordinal, + std::shared_ptr meta, std::shared_ptr data) + : start_decrypt_with_dictionary_page(start_with_dictionary_page), + row_group_ordinal(rg_ordinal), + column_ordinal(col_ordinal), + meta_decryptor(std::move(meta)), + data_decryptor(std::move(data)) {} + CryptoContext() {} + + bool start_decrypt_with_dictionary_page = false; + int16_t row_group_ordinal = -1; + int16_t column_ordinal = -1; + std::shared_ptr meta_decryptor; + std::shared_ptr data_decryptor; +}; + +// Abstract page iterator interface. This way, we can feed column pages to the +// ColumnReader through whatever mechanism we choose +class PARQUET_EXPORT PageReader { + using DataPageFilter = std::function; + + public: + virtual ~PageReader() = default; + + static std::unique_ptr Open( + std::shared_ptr stream, int64_t total_num_values, + Compression::type codec, bool always_compressed = false, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(), + const CryptoContext* ctx = NULLPTR); + static std::unique_ptr Open(std::shared_ptr stream, + int64_t total_num_values, + Compression::type codec, + const ReaderProperties& properties, + bool always_compressed = false, + const CryptoContext* ctx = NULLPTR); + + // If data_page_filter is present (not null), NextPage() will call the + // callback function exactly once per page in the order the pages appear in + // the column. If the callback function returns true the page will be + // skipped. The callback will be called only if the page type is DATA_PAGE or + // DATA_PAGE_V2. Dictionary pages will not be skipped. + // Caller is responsible for checking that statistics are correct using + // ApplicationVersion::HasCorrectStatistics(). + // \note API EXPERIMENTAL + void set_data_page_filter(DataPageFilter data_page_filter) { + data_page_filter_ = std::move(data_page_filter); + } + + // @returns: shared_ptr(nullptr) on EOS, std::shared_ptr + // containing new Page otherwise + // + // The returned Page may contain references that aren't guaranteed to live + // beyond the next call to NextPage(). + virtual std::shared_ptr NextPage() = 0; + + virtual void set_max_page_header_size(uint32_t size) = 0; + + protected: + // Callback that decides if we should skip a page or not. + DataPageFilter data_page_filter_; +}; + +class PARQUET_EXPORT ColumnReader { + public: + virtual ~ColumnReader() = default; + + static std::shared_ptr Make( + const ColumnDescriptor* descr, std::unique_ptr pager, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + // Returns true if there are still values in this column. + virtual bool HasNext() = 0; + + virtual Type::type type() const = 0; + + virtual const ColumnDescriptor* descr() const = 0; + + // Get the encoding that can be exposed by this reader. If it returns + // dictionary encoding, then ReadBatchWithDictionary can be used to read data. + // + // \note API EXPERIMENTAL + virtual ExposedEncoding GetExposedEncoding() = 0; + + protected: + friend class RowGroupReader; + // Set the encoding that can be exposed by this reader. + // + // \note API EXPERIMENTAL + virtual void SetExposedEncoding(ExposedEncoding encoding) = 0; +}; + +// API to read values from a single column. This is a main client facing API. +template +class TypedColumnReader : public ColumnReader { + public: + typedef typename DType::c_type T; + + // Read a batch of repetition levels, definition levels, and values from the + // column. + // + // Since null values are not stored in the values, the number of values read + // may be less than the number of repetition and definition levels. With + // nested data this is almost certainly true. + // + // Set def_levels or rep_levels to nullptr if you want to skip reading them. + // This is only safe if you know through some other source that there are no + // undefined values. + // + // To fully exhaust a row group, you must read batches until the number of + // values read reaches the number of stored values according to the metadata. + // + // This API is the same for both V1 and V2 of the DataPage + // + // @returns: actual number of levels read (see values_read for number of values read) + virtual int64_t ReadBatch(int64_t batch_size, int16_t* def_levels, int16_t* rep_levels, + T* values, int64_t* values_read) = 0; + + /// Read a batch of repetition levels, definition levels, and values from the + /// column and leave spaces for null entries on the lowest level in the values + /// buffer. + /// + /// In comparison to ReadBatch the length of repetition and definition levels + /// is the same as of the number of values read for max_definition_level == 1. + /// In the case of max_definition_level > 1, the repetition and definition + /// levels are larger than the values but the values include the null entries + /// with definition_level == (max_definition_level - 1). + /// + /// To fully exhaust a row group, you must read batches until the number of + /// values read reaches the number of stored values according to the metadata. + /// + /// @param batch_size the number of levels to read + /// @param[out] def_levels The Parquet definition levels, output has + /// the length levels_read. + /// @param[out] rep_levels The Parquet repetition levels, output has + /// the length levels_read. + /// @param[out] values The values in the lowest nested level including + /// spacing for nulls on the lowest levels; output has the length + /// values_read. + /// @param[out] valid_bits Memory allocated for a bitmap that indicates if + /// the row is null or on the maximum definition level. For performance + /// reasons the underlying buffer should be able to store 1 bit more than + /// required. If this requires an additional byte, this byte is only read + /// but never written to. + /// @param valid_bits_offset The offset in bits of the valid_bits where the + /// first relevant bit resides. + /// @param[out] levels_read The number of repetition/definition levels that were read. + /// @param[out] values_read The number of values read, this includes all + /// non-null entries as well as all null-entries on the lowest level + /// (i.e. definition_level == max_definition_level - 1) + /// @param[out] null_count The number of nulls on the lowest levels. + /// (i.e. (values_read - null_count) is total number of non-null entries) + /// + /// \deprecated Since 4.0.0 + ARROW_DEPRECATED("Doesn't handle nesting correctly and unused outside of unit tests.") + virtual int64_t ReadBatchSpaced(int64_t batch_size, int16_t* def_levels, + int16_t* rep_levels, T* values, uint8_t* valid_bits, + int64_t valid_bits_offset, int64_t* levels_read, + int64_t* values_read, int64_t* null_count) = 0; + + // Skip reading values. This method will work for both repeated and + // non-repeated fields. Note that this method is skipping values and not + // records. This distinction is important for repeated fields, meaning that + // we are not skipping over the values to the next record. For example, + // consider the following two consecutive records containing one repeated field: + // {[1, 2, 3]}, {[4, 5]}. If we Skip(2), our next read value will be 3, which + // is inside the first record. + // Returns the number of values skipped. + virtual int64_t Skip(int64_t num_values_to_skip) = 0; + + // Read a batch of repetition levels, definition levels, and indices from the + // column. And read the dictionary if a dictionary page is encountered during + // reading pages. This API is similar to ReadBatch(), with ability to read + // dictionary and indices. It is only valid to call this method when the reader can + // expose dictionary encoding. (i.e., the reader's GetExposedEncoding() returns + // DICTIONARY). + // + // The dictionary is read along with the data page. When there's no data page, + // the dictionary won't be returned. + // + // @param batch_size The batch size to read + // @param[out] def_levels The Parquet definition levels. + // @param[out] rep_levels The Parquet repetition levels. + // @param[out] indices The dictionary indices. + // @param[out] indices_read The number of indices read. + // @param[out] dict The pointer to dictionary values. It will return nullptr if + // there's no data page. Each column chunk only has one dictionary page. The dictionary + // is owned by the reader, so the caller is responsible for copying the dictionary + // values before the reader gets destroyed. + // @param[out] dict_len The dictionary length. It will return 0 if there's no data + // page. + // @returns: actual number of levels read (see indices_read for number of + // indices read + // + // \note API EXPERIMENTAL + virtual int64_t ReadBatchWithDictionary(int64_t batch_size, int16_t* def_levels, + int16_t* rep_levels, int32_t* indices, + int64_t* indices_read, const T** dict, + int32_t* dict_len) = 0; +}; + +namespace internal { + +/// \brief Stateful column reader that delimits semantic records for both flat +/// and nested columns +/// +/// \note API EXPERIMENTAL +/// \since 1.3.0 +class PARQUET_EXPORT RecordReader { + public: + /// \brief Creates a record reader. + /// @param descr Column descriptor + /// @param leaf_info Level info, used to determine if a column is nullable or not + /// @param pool Memory pool to use for buffering values and rep/def levels + /// @param read_dictionary True if reading directly as Arrow dictionary-encoded + /// @param read_dense_for_nullable True if reading dense and not leaving space for null + /// values + static std::shared_ptr Make( + const ColumnDescriptor* descr, LevelInfo leaf_info, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(), + bool read_dictionary = false, bool read_dense_for_nullable = false); + + virtual ~RecordReader() = default; + + /// \brief Attempt to read indicated number of records from column chunk + /// Note that for repeated fields, a record may have more than one value + /// and all of them are read. If read_dense_for_nullable() it will + /// not leave any space for null values. Otherwise, it will read spaced. + /// \return number of records read + virtual int64_t ReadRecords(int64_t num_records) = 0; + + /// \brief Attempt to skip indicated number of records from column chunk. + /// Note that for repeated fields, a record may have more than one value + /// and all of them are skipped. + /// \return number of records skipped + virtual int64_t SkipRecords(int64_t num_records) = 0; + + /// \brief Pre-allocate space for data. Results in better flat read performance + virtual void Reserve(int64_t num_values) = 0; + + /// \brief Clear consumed values and repetition/definition levels as the + /// result of calling ReadRecords + /// For FLBA and ByteArray types, call GetBuilderChunks() to reset them. + virtual void Reset() = 0; + + /// \brief Transfer filled values buffer to caller. A new one will be + /// allocated in subsequent ReadRecords calls + virtual std::shared_ptr ReleaseValues() = 0; + + /// \brief Transfer filled validity bitmap buffer to caller. A new one will + /// be allocated in subsequent ReadRecords calls + virtual std::shared_ptr ReleaseIsValid() = 0; + + /// \brief Return true if the record reader has more internal data yet to + /// process + virtual bool HasMoreData() const = 0; + + /// \brief Advance record reader to the next row group. Must be set before + /// any records could be read/skipped. + /// \param[in] reader obtained from RowGroupReader::GetColumnPageReader + virtual void SetPageReader(std::unique_ptr reader) = 0; + + /// \brief Returns the underlying column reader's descriptor. + virtual const ColumnDescriptor* descr() const = 0; + + virtual void DebugPrintState() = 0; + + /// \brief Returns the dictionary owned by the current decoder. Throws an + /// exception if the current decoder is not for dictionary encoding. The caller is + /// responsible for casting the returned pointer to proper type depending on the + /// column's physical type. An example: + /// const ByteArray* dict = reinterpret_cast(ReadDictionary(&len)); + /// or: + /// const float* dict = reinterpret_cast(ReadDictionary(&len)); + /// \param[out] dictionary_length The number of dictionary entries. + virtual const void* ReadDictionary(int32_t* dictionary_length) = 0; + + /// \brief Decoded definition levels + int16_t* def_levels() const { + return reinterpret_cast(def_levels_->mutable_data()); + } + + /// \brief Decoded repetition levels + int16_t* rep_levels() const { + return reinterpret_cast(rep_levels_->mutable_data()); + } + + /// \brief Decoded values, including nulls, if any + /// FLBA and ByteArray types do not use this array and read into their own + /// builders. + uint8_t* values() const { return values_->mutable_data(); } + + /// \brief Number of values written, including space left for nulls if any. + /// If this Reader was constructed with read_dense_for_nullable(), there is no space for + /// nulls and null_count() will be 0. There is no read-ahead/buffering for values. For + /// FLBA and ByteArray types this value reflects the values written with the last + /// ReadRecords call since those readers will reset the values after each call. + int64_t values_written() const { return values_written_; } + + /// \brief Number of definition / repetition levels (from those that have + /// been decoded) that have been consumed inside the reader. + int64_t levels_position() const { return levels_position_; } + + /// \brief Number of definition / repetition levels that have been written + /// internally in the reader. This may be larger than values_written() because + /// for repeated fields we need to look at the levels in advance to figure out + /// the record boundaries. + int64_t levels_written() const { return levels_written_; } + + /// \brief Number of nulls in the leaf that we have read so far into the + /// values vector. This is only valid when !read_dense_for_nullable(). When + /// read_dense_for_nullable() it will always be 0. + int64_t null_count() const { return null_count_; } + + /// \brief True if the leaf values are nullable + bool nullable_values() const { return nullable_values_; } + + /// \brief True if reading directly as Arrow dictionary-encoded + bool read_dictionary() const { return read_dictionary_; } + + /// \brief True if reading dense for nullable columns. + bool read_dense_for_nullable() const { return read_dense_for_nullable_; } + + protected: + /// \brief Indicates if we can have nullable values. Note that repeated fields + /// may or may not be nullable. + bool nullable_values_; + + bool at_record_start_; + int64_t records_read_; + + /// \brief Stores values. These values are populated based on each ReadRecords + /// call. No extra values are buffered for the next call. SkipRecords will not + /// add any value to this buffer. + std::shared_ptr<::arrow::ResizableBuffer> values_; + /// \brief False for BYTE_ARRAY, in which case we don't allocate the values + /// buffer and we directly read into builder classes. + bool uses_values_; + + /// \brief Values that we have read into 'values_' + 'null_count_'. + int64_t values_written_; + int64_t values_capacity_; + int64_t null_count_; + + /// \brief Each bit corresponds to one element in 'values_' and specifies if it + /// is null or not null. Not set if read_dense_for_nullable_ is true. + std::shared_ptr<::arrow::ResizableBuffer> valid_bits_; + + /// \brief Buffer for definition levels. May contain more levels than + /// is actually read. This is because we read levels ahead to + /// figure out record boundaries for repeated fields. + /// For flat required fields, 'def_levels_' and 'rep_levels_' are not + /// populated. For non-repeated fields 'rep_levels_' is not populated. + /// 'def_levels_' and 'rep_levels_' must be of the same size if present. + std::shared_ptr<::arrow::ResizableBuffer> def_levels_; + /// \brief Buffer for repetition levels. Only populated for repeated + /// fields. + std::shared_ptr<::arrow::ResizableBuffer> rep_levels_; + + /// \brief Number of definition / repetition levels that have been written + /// internally in the reader. This may be larger than values_written() since + /// for repeated fields we need to look at the levels in advance to figure out + /// the record boundaries. + int64_t levels_written_; + /// \brief Position of the next level that should be consumed. + int64_t levels_position_; + int64_t levels_capacity_; + + bool read_dictionary_ = false; + // If true, we will not leave any space for the null values in the values_ + // vector. + bool read_dense_for_nullable_ = false; +}; + +class BinaryRecordReader : virtual public RecordReader { + public: + virtual std::vector> GetBuilderChunks() = 0; +}; + +/// \brief Read records directly to dictionary-encoded Arrow form (int32 +/// indices). Only valid for BYTE_ARRAY columns +class DictionaryRecordReader : virtual public RecordReader { + public: + virtual std::shared_ptr<::arrow::ChunkedArray> GetResult() = 0; +}; + +} // namespace internal + +using BoolReader = TypedColumnReader; +using Int32Reader = TypedColumnReader; +using Int64Reader = TypedColumnReader; +using Int96Reader = TypedColumnReader; +using FloatReader = TypedColumnReader; +using DoubleReader = TypedColumnReader; +using ByteArrayReader = TypedColumnReader; +using FixedLenByteArrayReader = TypedColumnReader; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h new file mode 100644 index 0000000000000000000000000000000000000000..a9953866fab22ee6db13a92578f85556ea6f99ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h @@ -0,0 +1,264 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include "parquet/column_reader.h" +#include "parquet/exception.h" +#include "parquet/platform.h" +#include "parquet/schema.h" +#include "parquet/types.h" + +namespace parquet { + +static constexpr int64_t DEFAULT_SCANNER_BATCH_SIZE = 128; + +class PARQUET_EXPORT Scanner { + public: + explicit Scanner(std::shared_ptr reader, + int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) + : batch_size_(batch_size), + level_offset_(0), + levels_buffered_(0), + value_buffer_(AllocateBuffer(pool)), + value_offset_(0), + values_buffered_(0), + reader_(std::move(reader)) { + def_levels_.resize( + descr()->max_definition_level() > 0 ? static_cast(batch_size_) : 0); + rep_levels_.resize( + descr()->max_repetition_level() > 0 ? static_cast(batch_size_) : 0); + } + + virtual ~Scanner() {} + + static std::shared_ptr Make( + std::shared_ptr col_reader, + int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + virtual void PrintNext(std::ostream& out, int width, bool with_levels = false) = 0; + + bool HasNext() { return level_offset_ < levels_buffered_ || reader_->HasNext(); } + + const ColumnDescriptor* descr() const { return reader_->descr(); } + + int64_t batch_size() const { return batch_size_; } + + void SetBatchSize(int64_t batch_size) { batch_size_ = batch_size; } + + protected: + int64_t batch_size_; + + std::vector def_levels_; + std::vector rep_levels_; + int level_offset_; + int levels_buffered_; + + std::shared_ptr value_buffer_; + int value_offset_; + int64_t values_buffered_; + std::shared_ptr reader_; +}; + +template +class PARQUET_TEMPLATE_CLASS_EXPORT TypedScanner : public Scanner { + public: + typedef typename DType::c_type T; + + explicit TypedScanner(std::shared_ptr reader, + int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) + : Scanner(std::move(reader), batch_size, pool) { + typed_reader_ = static_cast*>(reader_.get()); + int value_byte_size = type_traits::value_byte_size; + PARQUET_THROW_NOT_OK(value_buffer_->Resize(batch_size_ * value_byte_size)); + values_ = reinterpret_cast(value_buffer_->mutable_data()); + } + + virtual ~TypedScanner() {} + + bool NextLevels(int16_t* def_level, int16_t* rep_level) { + if (level_offset_ == levels_buffered_) { + levels_buffered_ = static_cast( + typed_reader_->ReadBatch(static_cast(batch_size_), def_levels_.data(), + rep_levels_.data(), values_, &values_buffered_)); + + value_offset_ = 0; + level_offset_ = 0; + if (!levels_buffered_) { + return false; + } + } + *def_level = descr()->max_definition_level() > 0 ? def_levels_[level_offset_] : 0; + *rep_level = descr()->max_repetition_level() > 0 ? rep_levels_[level_offset_] : 0; + level_offset_++; + return true; + } + + bool Next(T* val, int16_t* def_level, int16_t* rep_level, bool* is_null) { + if (level_offset_ == levels_buffered_) { + if (!HasNext()) { + // Out of data pages + return false; + } + } + + NextLevels(def_level, rep_level); + *is_null = *def_level < descr()->max_definition_level(); + + if (*is_null) { + return true; + } + + if (value_offset_ == values_buffered_) { + throw ParquetException("Value was non-null, but has not been buffered"); + } + *val = values_[value_offset_++]; + return true; + } + + // Returns true if there is a next value + bool NextValue(T* val, bool* is_null) { + if (level_offset_ == levels_buffered_) { + if (!HasNext()) { + // Out of data pages + return false; + } + } + + // Out of values + int16_t def_level = -1; + int16_t rep_level = -1; + NextLevels(&def_level, &rep_level); + *is_null = def_level < descr()->max_definition_level(); + + if (*is_null) { + return true; + } + + if (value_offset_ == values_buffered_) { + throw ParquetException("Value was non-null, but has not been buffered"); + } + *val = values_[value_offset_++]; + return true; + } + + virtual void PrintNext(std::ostream& out, int width, bool with_levels = false) { + T val{}; + int16_t def_level = -1; + int16_t rep_level = -1; + bool is_null = false; + char buffer[80]; + + if (!Next(&val, &def_level, &rep_level, &is_null)) { + throw ParquetException("No more values buffered"); + } + + if (with_levels) { + out << " D:" << def_level << " R:" << rep_level << " "; + if (!is_null) { + out << "V:"; + } + } + + if (is_null) { + std::string null_fmt = format_fwf(width); + snprintf(buffer, sizeof(buffer), null_fmt.c_str(), "NULL"); + } else { + FormatValue(&val, buffer, sizeof(buffer), width); + } + out << buffer; + } + + private: + // The ownership of this object is expressed through the reader_ variable in the base + TypedColumnReader* typed_reader_; + + inline void FormatValue(void* val, char* buffer, int bufsize, int width); + + T* values_; +}; + +template +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + snprintf(buffer, bufsize, fmt.c_str(), *reinterpret_cast(val)); +} + +template <> +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + std::string result = Int96ToString(*reinterpret_cast(val)); + snprintf(buffer, bufsize, fmt.c_str(), result.c_str()); +} + +template <> +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + std::string result = ByteArrayToString(*reinterpret_cast(val)); + snprintf(buffer, bufsize, fmt.c_str(), result.c_str()); +} + +template <> +inline void TypedScanner::FormatValue(void* val, char* buffer, int bufsize, + int width) { + std::string fmt = format_fwf(width); + std::string result = FixedLenByteArrayToString( + *reinterpret_cast(val), descr()->type_length()); + snprintf(buffer, bufsize, fmt.c_str(), result.c_str()); +} + +typedef TypedScanner BoolScanner; +typedef TypedScanner Int32Scanner; +typedef TypedScanner Int64Scanner; +typedef TypedScanner Int96Scanner; +typedef TypedScanner FloatScanner; +typedef TypedScanner DoubleScanner; +typedef TypedScanner ByteArrayScanner; +typedef TypedScanner FixedLenByteArrayScanner; + +template +int64_t ScanAll(int32_t batch_size, int16_t* def_levels, int16_t* rep_levels, + uint8_t* values, int64_t* values_buffered, + parquet::ColumnReader* reader) { + typedef typename RType::T Type; + auto typed_reader = static_cast(reader); + auto vals = reinterpret_cast(&values[0]); + return typed_reader->ReadBatch(batch_size, def_levels, rep_levels, vals, + values_buffered); +} + +int64_t PARQUET_EXPORT ScanAllValues(int32_t batch_size, int16_t* def_levels, + int16_t* rep_levels, uint8_t* values, + int64_t* values_buffered, + parquet::ColumnReader* reader); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_writer.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..a278670fa81c681a34958f27136654799895bb38 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/column_writer.h @@ -0,0 +1,307 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/compression.h" +#include "parquet/exception.h" +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace arrow { + +class Array; + +namespace bit_util { +class BitWriter; +} // namespace bit_util + +namespace util { +class RleEncoder; +class CodecOptions; +} // namespace util + +} // namespace arrow + +namespace parquet { + +struct ArrowWriteContext; +class ColumnChunkMetaDataBuilder; +class ColumnDescriptor; +class ColumnIndexBuilder; +class DataPage; +class DictionaryPage; +class Encryptor; +class OffsetIndexBuilder; +class WriterProperties; + +class PARQUET_EXPORT LevelEncoder { + public: + LevelEncoder(); + ~LevelEncoder(); + + static int MaxBufferSize(Encoding::type encoding, int16_t max_level, + int num_buffered_values); + + // Initialize the LevelEncoder. + void Init(Encoding::type encoding, int16_t max_level, int num_buffered_values, + uint8_t* data, int data_size); + + // Encodes a batch of levels from an array and returns the number of levels encoded + int Encode(int batch_size, const int16_t* levels); + + int32_t len() { + if (encoding_ != Encoding::RLE) { + throw ParquetException("Only implemented for RLE encoding"); + } + return rle_length_; + } + + private: + int bit_width_; + int rle_length_; + Encoding::type encoding_; + std::unique_ptr<::arrow::util::RleEncoder> rle_encoder_; + std::unique_ptr<::arrow::bit_util::BitWriter> bit_packed_encoder_; +}; + +class PARQUET_EXPORT PageWriter { + public: + virtual ~PageWriter() {} + + static std::unique_ptr Open( + std::shared_ptr sink, Compression::type codec, + ColumnChunkMetaDataBuilder* metadata, int16_t row_group_ordinal = -1, + int16_t column_chunk_ordinal = -1, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(), + bool buffered_row_group = false, + std::shared_ptr header_encryptor = NULLPTR, + std::shared_ptr data_encryptor = NULLPTR, + bool page_write_checksum_enabled = false, + // column_index_builder MUST outlive the PageWriter + ColumnIndexBuilder* column_index_builder = NULLPTR, + // offset_index_builder MUST outlive the PageWriter + OffsetIndexBuilder* offset_index_builder = NULLPTR, + const CodecOptions& codec_options = CodecOptions{}); + + ARROW_DEPRECATED("Deprecated in 13.0.0. Use CodecOptions-taking overload instead.") + static std::unique_ptr Open( + std::shared_ptr sink, Compression::type codec, + int compression_level, ColumnChunkMetaDataBuilder* metadata, + int16_t row_group_ordinal = -1, int16_t column_chunk_ordinal = -1, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(), + bool buffered_row_group = false, + std::shared_ptr header_encryptor = NULLPTR, + std::shared_ptr data_encryptor = NULLPTR, + bool page_write_checksum_enabled = false, + // column_index_builder MUST outlive the PageWriter + ColumnIndexBuilder* column_index_builder = NULLPTR, + // offset_index_builder MUST outlive the PageWriter + OffsetIndexBuilder* offset_index_builder = NULLPTR); + + // The Column Writer decides if dictionary encoding is used if set and + // if the dictionary encoding has fallen back to default encoding on reaching dictionary + // page limit + virtual void Close(bool has_dictionary, bool fallback) = 0; + + // Return the number of uncompressed bytes written (including header size) + virtual int64_t WriteDataPage(const DataPage& page) = 0; + + // Return the number of uncompressed bytes written (including header size) + virtual int64_t WriteDictionaryPage(const DictionaryPage& page) = 0; + + /// \brief The total number of bytes written as serialized data and + /// dictionary pages to the sink so far. + virtual int64_t total_compressed_bytes_written() const = 0; + + virtual bool has_compressor() = 0; + + virtual void Compress(const Buffer& src_buffer, ResizableBuffer* dest_buffer) = 0; +}; + +class PARQUET_EXPORT ColumnWriter { + public: + virtual ~ColumnWriter() = default; + + static std::shared_ptr Make(ColumnChunkMetaDataBuilder*, + std::unique_ptr, + const WriterProperties* properties); + + /// \brief Closes the ColumnWriter, commits any buffered values to pages. + /// \return Total size of the column in bytes + virtual int64_t Close() = 0; + + /// \brief The physical Parquet type of the column + virtual Type::type type() const = 0; + + /// \brief The schema for the column + virtual const ColumnDescriptor* descr() const = 0; + + /// \brief The number of rows written so far + virtual int64_t rows_written() const = 0; + + /// \brief The total size of the compressed pages + page headers. Values + /// are still buffered and not written to a pager yet + /// + /// So in un-buffered mode, it always returns 0 + virtual int64_t total_compressed_bytes() const = 0; + + /// \brief The total number of bytes written as serialized data and + /// dictionary pages to the ColumnChunk so far + /// These bytes are uncompressed bytes. + virtual int64_t total_bytes_written() const = 0; + + /// \brief The total number of bytes written as serialized data and + /// dictionary pages to the ColumnChunk so far. + /// If the column is uncompressed, the value would be equal to + /// total_bytes_written(). + virtual int64_t total_compressed_bytes_written() const = 0; + + /// \brief Estimated size of the values that are not written to a page yet. + virtual int64_t estimated_buffered_value_bytes() const = 0; + + /// \brief The file-level writer properties + virtual const WriterProperties* properties() = 0; + + /// \brief Write Apache Arrow columnar data directly to ColumnWriter. Returns + /// error status if the array data type is not compatible with the concrete + /// writer type. + /// + /// leaf_array is always a primitive (possibly dictionary encoded type). + /// Leaf_field_nullable indicates whether the leaf array is considered nullable + /// according to its schema in a Table or its parent array. + virtual ::arrow::Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels, + int64_t num_levels, const ::arrow::Array& leaf_array, + ArrowWriteContext* ctx, + bool leaf_field_nullable) = 0; +}; + +// API to write values to a single column. This is the main client facing API. +template +class TypedColumnWriter : public ColumnWriter { + public: + using T = typename DType::c_type; + + // Write a batch of repetition levels, definition levels, and values to the + // column. + // `num_values` is the number of logical leaf values. + // `def_levels` (resp. `rep_levels`) can be null if the column's max definition level + // (resp. max repetition level) is 0. + // If not null, each of `def_levels` and `rep_levels` must have at least + // `num_values`. + // + // The number of physical values written (taken from `values`) is returned. + // It can be smaller than `num_values` is there are some undefined values. + virtual int64_t WriteBatch(int64_t num_values, const int16_t* def_levels, + const int16_t* rep_levels, const T* values) = 0; + + /// Write a batch of repetition levels, definition levels, and values to the + /// column. + /// + /// In comparison to WriteBatch the length of repetition and definition levels + /// is the same as of the number of values read for max_definition_level == 1. + /// In the case of max_definition_level > 1, the repetition and definition + /// levels are larger than the values but the values include the null entries + /// with definition_level == (max_definition_level - 1). Thus we have to differentiate + /// in the parameters of this function if the input has the length of num_values or the + /// _number of rows in the lowest nesting level_. + /// + /// In the case that the most inner node in the Parquet is required, the _number of rows + /// in the lowest nesting level_ is equal to the number of non-null values. If the + /// inner-most schema node is optional, the _number of rows in the lowest nesting level_ + /// also includes all values with definition_level == (max_definition_level - 1). + /// + /// @param num_values number of levels to write. + /// @param def_levels The Parquet definition levels, length is num_values + /// @param rep_levels The Parquet repetition levels, length is num_values + /// @param valid_bits Bitmap that indicates if the row is null on the lowest nesting + /// level. The length is number of rows in the lowest nesting level. + /// @param valid_bits_offset The offset in bits of the valid_bits where the + /// first relevant bit resides. + /// @param values The values in the lowest nested level including + /// spacing for nulls on the lowest levels; input has the length + /// of the number of rows on the lowest nesting level. + virtual void WriteBatchSpaced(int64_t num_values, const int16_t* def_levels, + const int16_t* rep_levels, const uint8_t* valid_bits, + int64_t valid_bits_offset, const T* values) = 0; +}; + +using BoolWriter = TypedColumnWriter; +using Int32Writer = TypedColumnWriter; +using Int64Writer = TypedColumnWriter; +using Int96Writer = TypedColumnWriter; +using FloatWriter = TypedColumnWriter; +using DoubleWriter = TypedColumnWriter; +using ByteArrayWriter = TypedColumnWriter; +using FixedLenByteArrayWriter = TypedColumnWriter; + +namespace internal { + +/** + * Timestamp conversion constants + */ +constexpr int64_t kJulianEpochOffsetDays = INT64_C(2440588); + +template +inline void ArrowTimestampToImpalaTimestamp(const int64_t time, Int96* impala_timestamp) { + int64_t julian_days = (time / UnitPerDay) + kJulianEpochOffsetDays; + (*impala_timestamp).value[2] = (uint32_t)julian_days; + + int64_t last_day_units = time % UnitPerDay; + auto last_day_nanos = last_day_units * NanosecondsPerUnit; + // impala_timestamp will be unaligned every other entry so do memcpy instead + // of assign and reinterpret cast to avoid undefined behavior. + std::memcpy(impala_timestamp, &last_day_nanos, sizeof(int64_t)); +} + +constexpr int64_t kSecondsInNanos = INT64_C(1000000000); + +inline void SecondsToImpalaTimestamp(const int64_t seconds, Int96* impala_timestamp) { + ArrowTimestampToImpalaTimestamp(seconds, + impala_timestamp); +} + +constexpr int64_t kMillisecondsInNanos = kSecondsInNanos / INT64_C(1000); + +inline void MillisecondsToImpalaTimestamp(const int64_t milliseconds, + Int96* impala_timestamp) { + ArrowTimestampToImpalaTimestamp( + milliseconds, impala_timestamp); +} + +constexpr int64_t kMicrosecondsInNanos = kMillisecondsInNanos / INT64_C(1000); + +inline void MicrosecondsToImpalaTimestamp(const int64_t microseconds, + Int96* impala_timestamp) { + ArrowTimestampToImpalaTimestamp( + microseconds, impala_timestamp); +} + +constexpr int64_t kNanosecondsInNanos = INT64_C(1); + +inline void NanosecondsToImpalaTimestamp(const int64_t nanoseconds, + Int96* impala_timestamp) { + ArrowTimestampToImpalaTimestamp( + nanoseconds, impala_timestamp); +} + +} // namespace internal +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h new file mode 100644 index 0000000000000000000000000000000000000000..602009189595e6b4e7f78fa0e1ca1104bc1edd71 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h @@ -0,0 +1,471 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/spaced.h" + +#include "parquet/exception.h" +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace arrow { + +class Array; +class ArrayBuilder; +class BinaryArray; +class BinaryBuilder; +class BooleanBuilder; +class Int32Type; +class Int64Type; +class FloatType; +class DoubleType; +class FixedSizeBinaryType; +template +class NumericBuilder; +class FixedSizeBinaryBuilder; +template +class Dictionary32Builder; + +} // namespace arrow + +namespace parquet { + +template +class TypedEncoder; + +using BooleanEncoder = TypedEncoder; +using Int32Encoder = TypedEncoder; +using Int64Encoder = TypedEncoder; +using Int96Encoder = TypedEncoder; +using FloatEncoder = TypedEncoder; +using DoubleEncoder = TypedEncoder; +using ByteArrayEncoder = TypedEncoder; +using FLBAEncoder = TypedEncoder; + +template +class TypedDecoder; + +class BooleanDecoder; +using Int32Decoder = TypedDecoder; +using Int64Decoder = TypedDecoder; +using Int96Decoder = TypedDecoder; +using FloatDecoder = TypedDecoder; +using DoubleDecoder = TypedDecoder; +using ByteArrayDecoder = TypedDecoder; +class FLBADecoder; + +template +struct EncodingTraits; + +template <> +struct EncodingTraits { + using Encoder = BooleanEncoder; + using Decoder = BooleanDecoder; + + using ArrowType = ::arrow::BooleanType; + using Accumulator = ::arrow::BooleanBuilder; + struct DictAccumulator {}; +}; + +template <> +struct EncodingTraits { + using Encoder = Int32Encoder; + using Decoder = Int32Decoder; + + using ArrowType = ::arrow::Int32Type; + using Accumulator = ::arrow::NumericBuilder<::arrow::Int32Type>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::Int32Type>; +}; + +template <> +struct EncodingTraits { + using Encoder = Int64Encoder; + using Decoder = Int64Decoder; + + using ArrowType = ::arrow::Int64Type; + using Accumulator = ::arrow::NumericBuilder<::arrow::Int64Type>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::Int64Type>; +}; + +template <> +struct EncodingTraits { + using Encoder = Int96Encoder; + using Decoder = Int96Decoder; + + struct Accumulator {}; + struct DictAccumulator {}; +}; + +template <> +struct EncodingTraits { + using Encoder = FloatEncoder; + using Decoder = FloatDecoder; + + using ArrowType = ::arrow::FloatType; + using Accumulator = ::arrow::NumericBuilder<::arrow::FloatType>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::FloatType>; +}; + +template <> +struct EncodingTraits { + using Encoder = DoubleEncoder; + using Decoder = DoubleDecoder; + + using ArrowType = ::arrow::DoubleType; + using Accumulator = ::arrow::NumericBuilder<::arrow::DoubleType>; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::DoubleType>; +}; + +template <> +struct EncodingTraits { + using Encoder = ByteArrayEncoder; + using Decoder = ByteArrayDecoder; + + using ArrowType = ::arrow::BinaryType; + /// \brief Internal helper class for decoding BYTE_ARRAY data where we can + /// overflow the capacity of a single arrow::BinaryArray + struct Accumulator { + std::unique_ptr<::arrow::BinaryBuilder> builder; + std::vector> chunks; + }; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::BinaryType>; +}; + +template <> +struct EncodingTraits { + using Encoder = FLBAEncoder; + using Decoder = FLBADecoder; + + using ArrowType = ::arrow::FixedSizeBinaryType; + using Accumulator = ::arrow::FixedSizeBinaryBuilder; + using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::FixedSizeBinaryType>; +}; + +class ColumnDescriptor; + +// Untyped base for all encoders +class Encoder { + public: + virtual ~Encoder() = default; + + virtual int64_t EstimatedDataEncodedSize() = 0; + virtual std::shared_ptr FlushValues() = 0; + virtual Encoding::type encoding() const = 0; + + virtual void Put(const ::arrow::Array& values) = 0; + + virtual MemoryPool* memory_pool() const = 0; +}; + +// Base class for value encoders. Since encoders may or not have state (e.g., +// dictionary encoding) we use a class instance to maintain any state. +// +// Encode interfaces are internal, subject to change without deprecation. +template +class TypedEncoder : virtual public Encoder { + public: + typedef typename DType::c_type T; + + using Encoder::Put; + + virtual void Put(const T* src, int num_values) = 0; + + virtual void Put(const std::vector& src, int num_values = -1); + + virtual void PutSpaced(const T* src, int num_values, const uint8_t* valid_bits, + int64_t valid_bits_offset) = 0; +}; + +template +void TypedEncoder::Put(const std::vector& src, int num_values) { + if (num_values == -1) { + num_values = static_cast(src.size()); + } + Put(src.data(), num_values); +} + +template <> +inline void TypedEncoder::Put(const std::vector& src, int num_values) { + // NOTE(wesm): This stub is here only to satisfy the compiler; it is + // overridden later with the actual implementation +} + +// Base class for dictionary encoders +template +class DictEncoder : virtual public TypedEncoder { + public: + /// Writes out any buffered indices to buffer preceded by the bit width of this data. + /// Returns the number of bytes written. + /// If the supplied buffer is not big enough, returns -1. + /// buffer must be preallocated with buffer_len bytes. Use EstimatedDataEncodedSize() + /// to size buffer. + virtual int WriteIndices(uint8_t* buffer, int buffer_len) = 0; + + virtual int dict_encoded_size() const = 0; + + virtual int bit_width() const = 0; + + /// Writes out the encoded dictionary to buffer. buffer must be preallocated to + /// dict_encoded_size() bytes. + virtual void WriteDict(uint8_t* buffer) const = 0; + + virtual int num_entries() const = 0; + + /// \brief EXPERIMENTAL: Append dictionary indices into the encoder. It is + /// assumed (without any boundschecking) that the indices reference + /// preexisting dictionary values + /// \param[in] indices the dictionary index values. Only Int32Array currently + /// supported + virtual void PutIndices(const ::arrow::Array& indices) = 0; + + /// \brief EXPERIMENTAL: Append dictionary into encoder, inserting indices + /// separately. Currently throws exception if the current dictionary memo is + /// non-empty + /// \param[in] values the dictionary values. Only valid for certain + /// Parquet/Arrow type combinations, like BYTE_ARRAY/BinaryArray + virtual void PutDictionary(const ::arrow::Array& values) = 0; +}; + +// ---------------------------------------------------------------------- +// Value decoding + +class Decoder { + public: + virtual ~Decoder() = default; + + // Sets the data for a new page. This will be called multiple times on the same + // decoder and should reset all internal state. + virtual void SetData(int num_values, const uint8_t* data, int len) = 0; + + // Returns the number of values left (for the last call to SetData()). This is + // the number of values left in this page. + virtual int values_left() const = 0; + virtual Encoding::type encoding() const = 0; +}; + +template +class TypedDecoder : virtual public Decoder { + public: + using T = typename DType::c_type; + + /// \brief Decode values into a buffer + /// + /// Subclasses may override the more specialized Decode methods below. + /// + /// \param[in] buffer destination for decoded values + /// \param[in] max_values maximum number of values to decode + /// \return The number of values decoded. Should be identical to max_values except + /// at the end of the current data page. + virtual int Decode(T* buffer, int max_values) = 0; + + /// \brief Decode the values in this data page but leave spaces for null entries. + /// + /// \param[in] buffer destination for decoded values + /// \param[in] num_values size of the def_levels and buffer arrays including the number + /// of null slots + /// \param[in] null_count number of null slots + /// \param[in] valid_bits bitmap data indicating position of valid slots + /// \param[in] valid_bits_offset offset into valid_bits + /// \return The number of values decoded, including nulls. + virtual int DecodeSpaced(T* buffer, int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset) { + if (null_count > 0) { + int values_to_read = num_values - null_count; + int values_read = Decode(buffer, values_to_read); + if (values_read != values_to_read) { + throw ParquetException("Number of values / definition_levels read did not match"); + } + + return ::arrow::util::internal::SpacedExpand(buffer, num_values, null_count, + valid_bits, valid_bits_offset); + } else { + return Decode(buffer, num_values); + } + } + + /// \brief Decode into an ArrayBuilder or other accumulator + /// + /// This function assumes the definition levels were already decoded + /// as a validity bitmap in the given `valid_bits`. `null_count` + /// is the number of 0s in `valid_bits`. + /// As a space optimization, it is allowed for `valid_bits` to be null + /// if `null_count` is zero. + /// + /// \return number of values decoded + virtual int DecodeArrow(int num_values, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, + typename EncodingTraits::Accumulator* out) = 0; + + /// \brief Decode into an ArrayBuilder or other accumulator ignoring nulls + /// + /// \return number of values decoded + int DecodeArrowNonNull(int num_values, + typename EncodingTraits::Accumulator* out) { + return DecodeArrow(num_values, 0, /*valid_bits=*/NULLPTR, 0, out); + } + + /// \brief Decode into a DictionaryBuilder + /// + /// This function assumes the definition levels were already decoded + /// as a validity bitmap in the given `valid_bits`. `null_count` + /// is the number of 0s in `valid_bits`. + /// As a space optimization, it is allowed for `valid_bits` to be null + /// if `null_count` is zero. + /// + /// \return number of values decoded + virtual int DecodeArrow(int num_values, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, + typename EncodingTraits::DictAccumulator* builder) = 0; + + /// \brief Decode into a DictionaryBuilder ignoring nulls + /// + /// \return number of values decoded + int DecodeArrowNonNull(int num_values, + typename EncodingTraits::DictAccumulator* builder) { + return DecodeArrow(num_values, 0, /*valid_bits=*/NULLPTR, 0, builder); + } +}; + +template +class DictDecoder : virtual public TypedDecoder { + public: + using T = typename DType::c_type; + + virtual void SetDict(TypedDecoder* dictionary) = 0; + + /// \brief Insert dictionary values into the Arrow dictionary builder's memo, + /// but do not append any indices + virtual void InsertDictionary(::arrow::ArrayBuilder* builder) = 0; + + /// \brief Decode only dictionary indices and append to dictionary + /// builder. The builder must have had the dictionary from this decoder + /// inserted already. + /// + /// \warning Remember to reset the builder each time the dict decoder is initialized + /// with a new dictionary page + virtual int DecodeIndicesSpaced(int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, + ::arrow::ArrayBuilder* builder) = 0; + + /// \brief Decode only dictionary indices (no nulls) + /// + /// \warning Remember to reset the builder each time the dict decoder is initialized + /// with a new dictionary page + virtual int DecodeIndices(int num_values, ::arrow::ArrayBuilder* builder) = 0; + + /// \brief Decode only dictionary indices (no nulls). Same as above + /// DecodeIndices but target is an array instead of a builder. + /// + /// \note API EXPERIMENTAL + virtual int DecodeIndices(int num_values, int32_t* indices) = 0; + + /// \brief Get dictionary. The reader will call this API when it encounters a + /// new dictionary. + /// + /// @param[out] dictionary The pointer to dictionary values. Dictionary is owned by + /// the decoder and is destroyed when the decoder is destroyed. + /// @param[out] dictionary_length The dictionary length. + /// + /// \note API EXPERIMENTAL + virtual void GetDictionary(const T** dictionary, int32_t* dictionary_length) = 0; +}; + +// ---------------------------------------------------------------------- +// TypedEncoder specializations, traits, and factory functions + +class BooleanDecoder : virtual public TypedDecoder { + public: + using TypedDecoder::Decode; + + /// \brief Decode and bit-pack values into a buffer + /// + /// \param[in] buffer destination for decoded values + /// This buffer will contain bit-packed values. If + /// max_values is not a multiple of 8, the trailing bits + /// of the last byte will be undefined. + /// \param[in] max_values max values to decode. + /// \return The number of values decoded. Should be identical to max_values except + /// at the end of the current data page. + virtual int Decode(uint8_t* buffer, int max_values) = 0; +}; + +class FLBADecoder : virtual public TypedDecoder { + public: + using TypedDecoder::DecodeSpaced; + + // TODO(wesm): As possible follow-up to PARQUET-1508, we should examine if + // there is value in adding specialized read methods for + // FIXED_LEN_BYTE_ARRAY. If only Decimal data can occur with this data type + // then perhaps not +}; + +PARQUET_EXPORT +std::unique_ptr MakeEncoder( + Type::type type_num, Encoding::type encoding, bool use_dictionary = false, + const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + +template +std::unique_ptr::Encoder> MakeTypedEncoder( + Encoding::type encoding, bool use_dictionary = false, + const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + using OutType = typename EncodingTraits::Encoder; + std::unique_ptr base = + MakeEncoder(DType::type_num, encoding, use_dictionary, descr, pool); + return std::unique_ptr(dynamic_cast(base.release())); +} + +PARQUET_EXPORT +std::unique_ptr MakeDecoder( + Type::type type_num, Encoding::type encoding, const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + +namespace detail { + +PARQUET_EXPORT +std::unique_ptr MakeDictDecoder(Type::type type_num, + const ColumnDescriptor* descr, + ::arrow::MemoryPool* pool); + +} // namespace detail + +template +std::unique_ptr> MakeDictDecoder( + const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + using OutType = DictDecoder; + auto decoder = detail::MakeDictDecoder(DType::type_num, descr, pool); + return std::unique_ptr(dynamic_cast(decoder.release())); +} + +template +std::unique_ptr::Decoder> MakeTypedDecoder( + Encoding::type encoding, const ColumnDescriptor* descr = NULLPTR, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + using OutType = typename EncodingTraits::Decoder; + std::unique_ptr base = MakeDecoder(DType::type_num, encoding, descr, pool); + return std::unique_ptr(dynamic_cast(base.release())); +} + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..b59b59f95c2d8766f216a9cd923847d5483de4ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h @@ -0,0 +1,231 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/util/type_fwd.h" +#include "parquet/metadata.h" // IWYU pragma: keep +#include "parquet/platform.h" +#include "parquet/properties.h" + +namespace parquet { + +class ColumnReader; +class FileMetaData; +class PageIndexReader; +class BloomFilterReader; +class PageReader; +class RowGroupMetaData; + +namespace internal { +class RecordReader; +} + +class PARQUET_EXPORT RowGroupReader { + public: + // Forward declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct Contents { + virtual ~Contents() {} + virtual std::unique_ptr GetColumnPageReader(int i) = 0; + virtual const RowGroupMetaData* metadata() const = 0; + virtual const ReaderProperties* properties() const = 0; + }; + + explicit RowGroupReader(std::unique_ptr contents); + + // Returns the rowgroup metadata + const RowGroupMetaData* metadata() const; + + // Construct a ColumnReader for the indicated row group-relative + // column. Ownership is shared with the RowGroupReader. + std::shared_ptr Column(int i); + + // EXPERIMENTAL: Construct a RecordReader for the indicated column of the row group. + // Ownership is shared with the RowGroupReader. + std::shared_ptr RecordReader(int i, + bool read_dictionary = false); + + // Construct a ColumnReader, trying to enable exposed encoding. + // + // For dictionary encoding, currently we only support column chunks that are fully + // dictionary encoded, i.e., all data pages in the column chunk are dictionary encoded. + // If a column chunk uses dictionary encoding but then falls back to plain encoding, the + // encoding will not be exposed. + // + // The returned column reader provides an API GetExposedEncoding() for the + // users to check the exposed encoding and determine how to read the batches. + // + // \note API EXPERIMENTAL + std::shared_ptr ColumnWithExposeEncoding( + int i, ExposedEncoding encoding_to_expose); + + // Construct a RecordReader, trying to enable exposed encoding. + // + // For dictionary encoding, currently we only support column chunks that are + // fully dictionary encoded byte arrays. The caller should verify if the reader can read + // and expose the dictionary by checking the reader's read_dictionary(). If a column + // chunk uses dictionary encoding but then falls back to plain encoding, the returned + // reader will read decoded data without exposing the dictionary. + // + // \note API EXPERIMENTAL + std::shared_ptr RecordReaderWithExposeEncoding( + int i, ExposedEncoding encoding_to_expose); + + std::unique_ptr GetColumnPageReader(int i); + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; +}; + +class PARQUET_EXPORT ParquetFileReader { + public: + // Declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct PARQUET_EXPORT Contents { + static std::unique_ptr Open( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + static ::arrow::Future> OpenAsync( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + virtual ~Contents() = default; + // Perform any cleanup associated with the file contents + virtual void Close() = 0; + virtual std::shared_ptr GetRowGroup(int i) = 0; + virtual std::shared_ptr metadata() const = 0; + virtual std::shared_ptr GetPageIndexReader() = 0; + virtual BloomFilterReader& GetBloomFilterReader() = 0; + }; + + ParquetFileReader(); + ~ParquetFileReader(); + + // Create a file reader instance from an Arrow file object. Thread-safety is + // the responsibility of the file implementation + static std::unique_ptr Open( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + // API Convenience to open a serialized Parquet file on disk, using Arrow IO + // interfaces. + static std::unique_ptr OpenFile( + const std::string& path, bool memory_map = false, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + // Asynchronously open a file reader from an Arrow file object. + // Does not throw - all errors are reported through the Future. + static ::arrow::Future> OpenAsync( + std::shared_ptr<::arrow::io::RandomAccessFile> source, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + void Open(std::unique_ptr contents); + void Close(); + + // The RowGroupReader is owned by the FileReader + std::shared_ptr RowGroup(int i); + + // Returns the file metadata. Only one instance is ever created + std::shared_ptr metadata() const; + + /// Returns the PageIndexReader. Only one instance is ever created. + /// + /// If the file does not have the page index, nullptr may be returned. + /// Because it pays to check existence of page index in the file, it + /// is possible to return a non null value even if page index does + /// not exist. It is the caller's responsibility to check the return + /// value and follow-up calls to PageIndexReader. + /// + /// WARNING: The returned PageIndexReader must not outlive the ParquetFileReader. + /// Initialize GetPageIndexReader() is not thread-safety. + std::shared_ptr GetPageIndexReader(); + + /// Returns the BloomFilterReader. Only one instance is ever created. + /// + /// WARNING: The returned BloomFilterReader must not outlive the ParquetFileReader. + /// Initialize GetBloomFilterReader() is not thread-safety. + BloomFilterReader& GetBloomFilterReader(); + + /// Pre-buffer the specified column indices in all row groups. + /// + /// Readers can optionally call this to cache the necessary slices + /// of the file in-memory before deserialization. Arrow readers can + /// automatically do this via an option. This is intended to + /// increase performance when reading from high-latency filesystems + /// (e.g. Amazon S3). + /// + /// After calling this, creating readers for row groups/column + /// indices that were not buffered may fail. Creating multiple + /// readers for the a subset of the buffered regions is + /// acceptable. This may be called again to buffer a different set + /// of row groups/columns. + /// + /// If memory usage is a concern, note that data will remain + /// buffered in memory until either \a PreBuffer() is called again, + /// or the reader itself is destructed. Reading - and buffering - + /// only one row group at a time may be useful. + /// + /// This method may throw. + void PreBuffer(const std::vector& row_groups, + const std::vector& column_indices, + const ::arrow::io::IOContext& ctx, + const ::arrow::io::CacheOptions& options); + + /// Wait for the specified row groups and column indices to be pre-buffered. + /// + /// After the returned Future completes, reading the specified row + /// groups/columns will not block. + /// + /// PreBuffer must be called first. This method does not throw. + ::arrow::Future<> WhenBuffered(const std::vector& row_groups, + const std::vector& column_indices) const; + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; +}; + +// Read only Parquet file metadata +std::shared_ptr PARQUET_EXPORT +ReadMetaData(const std::shared_ptr<::arrow::io::RandomAccessFile>& source); + +/// \brief Scan all values in file. Useful for performance testing +/// \param[in] columns the column numbers to scan. If empty scans all +/// \param[in] column_batch_size number of values to read at a time when scanning column +/// \param[in] reader a ParquetFileReader instance +/// \return number of semantic rows in file +PARQUET_EXPORT +int64_t ScanFileContents(std::vector columns, const int32_t column_batch_size, + ParquetFileReader* reader); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/file_writer.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/file_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..31706af86dbde8c57aa47c9678b1f16cf8e69eb0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/file_writer.h @@ -0,0 +1,245 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "parquet/metadata.h" +#include "parquet/platform.h" +#include "parquet/properties.h" +#include "parquet/schema.h" + +namespace parquet { + +class ColumnWriter; + +// FIXME: copied from reader-internal.cc +static constexpr uint8_t kParquetMagic[4] = {'P', 'A', 'R', '1'}; +static constexpr uint8_t kParquetEMagic[4] = {'P', 'A', 'R', 'E'}; + +class PARQUET_EXPORT RowGroupWriter { + public: + // Forward declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct Contents { + virtual ~Contents() = default; + virtual int num_columns() const = 0; + virtual int64_t num_rows() const = 0; + + // to be used only with ParquetFileWriter::AppendRowGroup + virtual ColumnWriter* NextColumn() = 0; + // to be used only with ParquetFileWriter::AppendBufferedRowGroup + virtual ColumnWriter* column(int i) = 0; + + virtual int current_column() const = 0; + virtual void Close() = 0; + + /// \brief total uncompressed bytes written by the page writer + virtual int64_t total_bytes_written() const = 0; + /// \brief total bytes still compressed but not written by the page writer + virtual int64_t total_compressed_bytes() const = 0; + /// \brief total compressed bytes written by the page writer + virtual int64_t total_compressed_bytes_written() const = 0; + + virtual bool buffered() const = 0; + }; + + explicit RowGroupWriter(std::unique_ptr contents); + + /// Construct a ColumnWriter for the indicated row group-relative column. + /// + /// To be used only with ParquetFileWriter::AppendRowGroup + /// Ownership is solely within the RowGroupWriter. The ColumnWriter is only + /// valid until the next call to NextColumn or Close. As the contents are + /// directly written to the sink, once a new column is started, the contents + /// of the previous one cannot be modified anymore. + ColumnWriter* NextColumn(); + /// Index of currently written column. Equal to -1 if NextColumn() + /// has not been called yet. + int current_column(); + void Close(); + + int num_columns() const; + + /// Construct a ColumnWriter for the indicated row group column. + /// + /// To be used only with ParquetFileWriter::AppendBufferedRowGroup + /// Ownership is solely within the RowGroupWriter. The ColumnWriter is + /// valid until Close. The contents are buffered in memory and written to sink + /// on Close + ColumnWriter* column(int i); + + /** + * Number of rows that shall be written as part of this RowGroup. + */ + int64_t num_rows() const; + + /// \brief total uncompressed bytes written by the page writer + int64_t total_bytes_written() const; + /// \brief total bytes still compressed but not written by the page writer. + /// It will always return 0 from the SerializedPageWriter. + int64_t total_compressed_bytes() const; + /// \brief total compressed bytes written by the page writer + int64_t total_compressed_bytes_written() const; + + /// Returns whether the current RowGroupWriter is in the buffered mode and is created + /// by calling ParquetFileWriter::AppendBufferedRowGroup. + bool buffered() const; + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; +}; + +PARQUET_EXPORT +void WriteFileMetaData(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +PARQUET_EXPORT +void WriteMetaDataFile(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +PARQUET_EXPORT +void WriteEncryptedFileMetadata(const FileMetaData& file_metadata, + ArrowOutputStream* sink, + const std::shared_ptr& encryptor, + bool encrypt_footer); + +PARQUET_EXPORT +void WriteEncryptedFileMetadata(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink, + const std::shared_ptr& encryptor = NULLPTR, + bool encrypt_footer = false); +PARQUET_EXPORT +void WriteFileCryptoMetaData(const FileCryptoMetaData& crypto_metadata, + ::arrow::io::OutputStream* sink); + +class PARQUET_EXPORT ParquetFileWriter { + public: + // Forward declare a virtual class 'Contents' to aid dependency injection and more + // easily create test fixtures + // An implementation of the Contents class is defined in the .cc file + struct Contents { + Contents(std::shared_ptr<::parquet::schema::GroupNode> schema, + std::shared_ptr key_value_metadata) + : schema_(), key_value_metadata_(std::move(key_value_metadata)) { + schema_.Init(std::move(schema)); + } + virtual ~Contents() {} + // Perform any cleanup associated with the file contents + virtual void Close() = 0; + + virtual RowGroupWriter* AppendRowGroup() = 0; + virtual RowGroupWriter* AppendBufferedRowGroup() = 0; + + virtual int64_t num_rows() const = 0; + virtual int num_columns() const = 0; + virtual int num_row_groups() const = 0; + + virtual const std::shared_ptr& properties() const = 0; + + const std::shared_ptr& key_value_metadata() const { + return key_value_metadata_; + } + + virtual void AddKeyValueMetadata( + const std::shared_ptr& key_value_metadata) = 0; + + // Return const-pointer to make it clear that this object is not to be copied + const SchemaDescriptor* schema() const { return &schema_; } + + SchemaDescriptor schema_; + + /// This should be the only place this is stored. Everything else is a const reference + std::shared_ptr key_value_metadata_; + + const std::shared_ptr& metadata() const { return file_metadata_; } + std::shared_ptr file_metadata_; + }; + + ParquetFileWriter(); + ~ParquetFileWriter(); + + static std::unique_ptr Open( + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr schema, + std::shared_ptr properties = default_writer_properties(), + std::shared_ptr key_value_metadata = NULLPTR); + + void Open(std::unique_ptr contents); + void Close(); + + /// Construct a RowGroupWriter with an arbitrary number of rows. + /// + /// Ownership is solely within the ParquetFileWriter. The RowGroupWriter is only valid + /// until the next call to AppendRowGroup or AppendBufferedRowGroup or Close. + RowGroupWriter* AppendRowGroup(); + + /// Construct a RowGroupWriter that buffers all the values until the RowGroup is ready. + /// Use this if you want to write a RowGroup based on a certain size + /// + /// Ownership is solely within the ParquetFileWriter. The RowGroupWriter is only valid + /// until the next call to AppendRowGroup or AppendBufferedRowGroup or Close. + RowGroupWriter* AppendBufferedRowGroup(); + + /// \brief Add key-value metadata to the file. + /// \param[in] key_value_metadata the metadata to add. + /// \note This will overwrite any existing metadata with the same key. + /// \throw ParquetException if Close() has been called. + void AddKeyValueMetadata( + const std::shared_ptr& key_value_metadata); + + /// Number of columns. + /// + /// This number is fixed during the lifetime of the writer as it is determined via + /// the schema. + int num_columns() const; + + /// Number of rows in the yet started RowGroups. + /// + /// Changes on the addition of a new RowGroup. + int64_t num_rows() const; + + /// Number of started RowGroups. + int num_row_groups() const; + + /// Configuration passed to the writer, e.g. the used Parquet format version. + const std::shared_ptr& properties() const; + + /// Returns the file schema descriptor + const SchemaDescriptor* schema() const; + + /// Returns a column descriptor in schema + const ColumnDescriptor* descr(int i) const; + + /// Returns the file custom metadata + const std::shared_ptr& key_value_metadata() const; + + /// Returns the file metadata, only available after calling Close(). + const std::shared_ptr metadata() const; + + private: + // Holds a pointer to an instance of Contents implementation + std::unique_ptr contents_; + std::shared_ptr file_metadata_; +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/hasher.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/hasher.h new file mode 100644 index 0000000000000000000000000000000000000000..519eb459b9ca832d382bc4f9c764b64b47dbd6b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/hasher.h @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include "parquet/types.h" + +namespace parquet { +// Abstract class for hash +class Hasher { + public: + /// Compute hash for 32 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int32_t value) const = 0; + + /// Compute hash for 64 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int64_t value) const = 0; + + /// Compute hash for float value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(float value) const = 0; + + /// Compute hash for double value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(double value) const = 0; + + /// Compute hash for Int96 value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const Int96* value) const = 0; + + /// Compute hash for ByteArray value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const ByteArray* value) const = 0; + + /// Compute hash for fixed byte array value by using its plain encoding result. + /// + /// @param value the value address. + /// @param len the value length. + virtual uint64_t Hash(const FLBA* value, uint32_t len) const = 0; + + /// Batch compute hashes for 32 bits values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for 64 bits values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for float values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const float* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for double values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const double* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for Int96 values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const Int96* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for ByteArray values by using its plain encoding result. + /// + /// @param values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const ByteArray* values, int num_values, + uint64_t* hashes) const = 0; + + /// Batch compute hashes for fixed byte array values by using its plain encoding result. + /// + /// @param values the value address. + /// @param type_len the value length. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const = 0; + + virtual ~Hasher() = default; +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion.h new file mode 100644 index 0000000000000000000000000000000000000000..31de95be41c473814c52cd9d2f5902d63f1b944b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion.h @@ -0,0 +1,216 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/endian.h" +#include "parquet/platform.h" +#include "parquet/schema.h" + +namespace parquet::internal { + +struct PARQUET_EXPORT LevelInfo { + LevelInfo() + : null_slot_usage(1), def_level(0), rep_level(0), repeated_ancestor_def_level(0) {} + LevelInfo(int32_t null_slots, int32_t definition_level, int32_t repetition_level, + int32_t repeated_ancestor_definition_level) + : null_slot_usage(null_slots), + def_level(static_cast(definition_level)), + rep_level(static_cast(repetition_level)), + repeated_ancestor_def_level( + static_cast(repeated_ancestor_definition_level)) {} + + bool operator==(const LevelInfo& b) const { + return null_slot_usage == b.null_slot_usage && def_level == b.def_level && + rep_level == b.rep_level && + repeated_ancestor_def_level == b.repeated_ancestor_def_level; + } + + bool HasNullableValues() const { return repeated_ancestor_def_level < def_level; } + + // How many slots an undefined but present (i.e. null) element in + // parquet consumes when decoding to Arrow. + // "Slot" is used in the same context as the Arrow specification + // (i.e. a value holder). + // This is only ever >1 for descendents of FixedSizeList. + int32_t null_slot_usage = 1; + + // The definition level at which the value for the field + // is considered not null (definition levels greater than + // or equal to this value indicate a not-null + // value for the field). For list fields definition levels + // greater than or equal to this field indicate a present, + // possibly null, child value. + int16_t def_level = 0; + + // The repetition level corresponding to this element + // or the closest repeated ancestor. Any repetition + // level less than this indicates either a new list OR + // an empty list (which is determined in conjunction + // with definition levels). + int16_t rep_level = 0; + + // The definition level indicating the level at which the closest + // repeated ancestor is not empty. This is used to discriminate + // between a value less than |def_level| being null or excluded entirely. + // For instance if we have an arrow schema like: + // list(struct(f0: int)). Then then there are the following + // definition levels: + // 0 = null list + // 1 = present but empty list. + // 2 = a null value in the list + // 3 = a non null struct but null integer. + // 4 = a present integer. + // When reconstructing, the struct and integer arrays' + // repeated_ancestor_def_level would be 2. Any + // def_level < 2 indicates that there isn't a corresponding + // child value in the list. + // i.e. [null, [], [null], [{f0: null}], [{f0: 1}]] + // has the def levels [0, 1, 2, 3, 4]. The actual + // struct array is only of length 3: [not-set, set, set] and + // the int array is also of length 3: [N/A, null, 1]. + // + int16_t repeated_ancestor_def_level = 0; + + /// Increments levels according to the cardinality of node. + void Increment(const schema::Node& node) { + if (node.is_repeated()) { + IncrementRepeated(); + return; + } + if (node.is_optional()) { + IncrementOptional(); + return; + } + } + + /// Increments level for a optional node. + void IncrementOptional() { def_level++; } + + /// Increments levels for the repeated node. Returns + /// the previous ancestor_list_def_level. + int16_t IncrementRepeated() { + int16_t last_repeated_ancestor = repeated_ancestor_def_level; + + // Repeated fields add both a repetition and definition level. This is used + // to distinguish between an empty list and a list with an item in it. + ++rep_level; + ++def_level; + // For levels >= repeated_ancestor_def_level it indicates the list was + // non-null and had at least one element. This is important + // for later decoding because we need to add a slot for these + // values. for levels < current_def_level no slots are added + // to arrays. + repeated_ancestor_def_level = def_level; + return last_repeated_ancestor; + } + + // Calculates and returns LevelInfo for a column descriptor. + static LevelInfo ComputeLevelInfo(const ColumnDescriptor* descr) { + LevelInfo level_info; + level_info.def_level = descr->max_definition_level(); + level_info.rep_level = descr->max_repetition_level(); + + int16_t min_spaced_def_level = descr->max_definition_level(); + const ::parquet::schema::Node* node = descr->schema_node().get(); + while (node && !node->is_repeated()) { + if (node->is_optional()) { + min_spaced_def_level--; + } + node = node->parent(); + } + level_info.repeated_ancestor_def_level = min_spaced_def_level; + return level_info; + } + + friend std::ostream& operator<<(std::ostream& os, const LevelInfo& levels) { + // This print method is to silence valgrind issues. What's printed + // is not important because all asserts happen directly on + // members. + os << "{def=" << levels.def_level << ", rep=" << levels.rep_level + << ", repeated_ancestor_def=" << levels.repeated_ancestor_def_level; + if (levels.null_slot_usage > 1) { + os << ", null_slot_usage=" << levels.null_slot_usage; + } + os << "}"; + return os; + } +}; + +// Input/Output structure for reconstructed validity bitmaps. +struct PARQUET_EXPORT ValidityBitmapInputOutput { + // Input only. + // The maximum number of values_read expected (actual + // values read must be less than or equal to this value). + // If this number is exceeded methods will throw a + // ParquetException. Exceeding this limit indicates + // either a corrupt or incorrectly written file. + int64_t values_read_upper_bound = 0; + // Output only. The number of values added to the encountered + // (this is logically the count of the number of elements + // for an Arrow array). + int64_t values_read = 0; + // Input/Output. The number of nulls encountered. + int64_t null_count = 0; + // Output only. The validity bitmap to populate. Maybe be null only + // for DefRepLevelsToListInfo (if all that is needed is list offsets). + uint8_t* valid_bits = NULLPTR; + // Input only, offset into valid_bits to start at. + int64_t valid_bits_offset = 0; +}; + +// Converts def_levels to validity bitmaps for non-list arrays and structs that have +// at least one member that is not a list and has no list descendents. +// For lists use DefRepLevelsToList and structs where all descendants contain +// a list use DefRepLevelsToBitmap. +void PARQUET_EXPORT DefLevelsToBitmap(const int16_t* def_levels, int64_t num_def_levels, + LevelInfo level_info, + ValidityBitmapInputOutput* output); + +// Reconstructs a validity bitmap and list offsets for a list arrays based on +// def/rep levels. The first element of offsets will not be modified if rep_levels +// starts with a new list. The first element of offsets will be used when calculating +// the next offset. See documentation onf DefLevelsToBitmap for when to use this +// method vs the other ones in this file for reconstruction. +// +// Offsets must be sized to 1 + values_read_upper_bound. +void PARQUET_EXPORT DefRepLevelsToList(const int16_t* def_levels, + const int16_t* rep_levels, int64_t num_def_levels, + LevelInfo level_info, + ValidityBitmapInputOutput* output, + int32_t* offsets); +void PARQUET_EXPORT DefRepLevelsToList(const int16_t* def_levels, + const int16_t* rep_levels, int64_t num_def_levels, + LevelInfo level_info, + ValidityBitmapInputOutput* output, + int64_t* offsets); + +// Reconstructs a validity bitmap for a struct every member is a list or has +// a list descendant. See documentation on DefLevelsToBitmap for when more +// details on this method compared to the other ones defined above. +void PARQUET_EXPORT DefRepLevelsToBitmap(const int16_t* def_levels, + const int16_t* rep_levels, + int64_t num_def_levels, LevelInfo level_info, + ValidityBitmapInputOutput* output); + +// This is exposed to ensure we can properly test a software simulated pext function +// (i.e. it isn't hidden by runtime dispatch). +uint64_t PARQUET_EXPORT TestOnlyExtractBitsSoftware(uint64_t bitmap, uint64_t selection); + +} // namespace parquet::internal diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion_inc.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion_inc.h new file mode 100644 index 0000000000000000000000000000000000000000..d1ccedabfde5084ab5c02c2314098122c68ffa81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_conversion_inc.h @@ -0,0 +1,354 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#pragma once + +#include "parquet/level_conversion.h" + +#include +#include +#include + +#include "arrow/util/bit_run_reader.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_writer.h" +#include "arrow/util/logging.h" +#include "arrow/util/simd.h" +#include "parquet/exception.h" +#include "parquet/level_comparison.h" + +#ifndef PARQUET_IMPL_NAMESPACE +#error "PARQUET_IMPL_NAMESPACE must be defined" +#endif + +namespace parquet::internal::PARQUET_IMPL_NAMESPACE { + +// clang-format off +/* Python code to generate lookup table: + +kLookupBits = 5 +count = 0 +print('constexpr int kLookupBits = {};'.format(kLookupBits)) +print('constexpr uint8_t kPextTable[1 << kLookupBits][1 << kLookupBits] = {') +print(' ', end = '') +for mask in range(1 << kLookupBits): + for data in range(1 << kLookupBits): + bit_value = 0 + bit_len = 0 + for i in range(kLookupBits): + if mask & (1 << i): + bit_value |= (((data >> i) & 1) << bit_len) + bit_len += 1 + out = '0x{:02X},'.format(bit_value) + count += 1 + if count % (1 << kLookupBits) == 1: + print(' {') + if count % 8 == 1: + print(' ', end = '') + if count % 8 == 0: + print(out, end = '\n') + else: + print(out, end = ' ') + if count % (1 << kLookupBits) == 0: + print(' },', end = '') +print('\n};') + +*/ +// clang-format on + +constexpr int kLookupBits = 5; +constexpr uint8_t kPextTable[1 << kLookupBits][1 << kLookupBits] = { + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, + 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, + 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, + 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, + 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, + 0x01, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, + 0x02, 0x03, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, + 0x01, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, + 0x03, 0x03, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, + 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, + 0x03, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, + 0x03, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, + 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x04, 0x05, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, + 0x02, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, + 0x05, 0x06, 0x07, 0x06, 0x07, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, + 0x02, 0x03, 0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, + 0x05, 0x06, 0x06, 0x07, 0x07, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, + 0x03, 0x03, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, + 0x01, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, + 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, 0x02, 0x03, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, + 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, + 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x04, 0x05, + 0x06, 0x07, 0x04, 0x05, 0x06, 0x07, 0x04, 0x05, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, + 0x03, 0x03, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x00, 0x01, 0x00, + 0x01, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, 0x05, 0x06, 0x07, + 0x06, 0x07, 0x04, 0x05, 0x04, 0x05, 0x06, 0x07, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x00, 0x00, 0x01, + 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, + 0x07, 0x07, 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, + 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, + 0x03, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, 0x05, 0x04, 0x05, + 0x04, 0x05, 0x06, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06, 0x07, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, + 0x03, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, + 0x05, 0x05, 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x08, 0x09, + 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x0C, 0x0D, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, + 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, + 0x05, 0x05, 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, + }, + { + 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x02, 0x03, 0x04, 0x05, 0x04, + 0x05, 0x06, 0x07, 0x06, 0x07, 0x08, 0x09, 0x08, 0x09, 0x0A, 0x0B, + 0x0A, 0x0B, 0x0C, 0x0D, 0x0C, 0x0D, 0x0E, 0x0F, 0x0E, 0x0F, + }, + { + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x05, + 0x05, 0x06, 0x06, 0x07, 0x07, 0x08, 0x08, 0x09, 0x09, 0x0A, 0x0A, + 0x0B, 0x0B, 0x0C, 0x0C, 0x0D, 0x0D, 0x0E, 0x0E, 0x0F, 0x0F, + }, + { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, + 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, + }, +}; + +inline uint64_t ExtractBitsSoftware(uint64_t bitmap, uint64_t select_bitmap) { + // A software emulation of _pext_u64 + + // These checks should be inline and are likely to be common cases. + if (select_bitmap == ~uint64_t{0}) { + return bitmap; + } else if (select_bitmap == 0) { + return 0; + } + + // Fallback to lookup table method + uint64_t bit_value = 0; + int bit_len = 0; + constexpr uint8_t kLookupMask = (1U << kLookupBits) - 1; + while (select_bitmap != 0) { + const auto mask_len = ARROW_POPCOUNT32(select_bitmap & kLookupMask); + const uint64_t value = kPextTable[select_bitmap & kLookupMask][bitmap & kLookupMask]; + bit_value |= (value << bit_len); + bit_len += mask_len; + bitmap >>= kLookupBits; + select_bitmap >>= kLookupBits; + } + return bit_value; +} + +#ifdef ARROW_HAVE_BMI2 + +// Use _pext_u64 on 64-bit builds, _pext_u32 on 32-bit builds, +#if UINTPTR_MAX == 0xFFFFFFFF + +using extract_bitmap_t = uint32_t; +inline extract_bitmap_t ExtractBits(extract_bitmap_t bitmap, + extract_bitmap_t select_bitmap) { + return _pext_u32(bitmap, select_bitmap); +} + +#else + +using extract_bitmap_t = uint64_t; +inline extract_bitmap_t ExtractBits(extract_bitmap_t bitmap, + extract_bitmap_t select_bitmap) { + return _pext_u64(bitmap, select_bitmap); +} + +#endif + +#else // !defined(ARROW_HAVE_BMI2) + +// Use 64-bit pext emulation when BMI2 isn't available. +using extract_bitmap_t = uint64_t; +inline extract_bitmap_t ExtractBits(extract_bitmap_t bitmap, + extract_bitmap_t select_bitmap) { + return ExtractBitsSoftware(bitmap, select_bitmap); +} + +#endif + +static constexpr int64_t kExtractBitsSize = 8 * sizeof(extract_bitmap_t); + +template +int64_t DefLevelsBatchToBitmap(const int16_t* def_levels, const int64_t batch_size, + int64_t upper_bound_remaining, LevelInfo level_info, + ::arrow::internal::FirstTimeBitmapWriter* writer) { + DCHECK_LE(batch_size, kExtractBitsSize); + + // Greater than level_info.def_level - 1 implies >= the def_level + auto defined_bitmap = static_cast( + internal::GreaterThanBitmap(def_levels, batch_size, level_info.def_level - 1)); + + if (has_repeated_parent) { + // Greater than level_info.repeated_ancestor_def_level - 1 implies >= the + // repeated_ancestor_def_level + auto present_bitmap = static_cast(internal::GreaterThanBitmap( + def_levels, batch_size, level_info.repeated_ancestor_def_level - 1)); + auto selected_bits = ExtractBits(defined_bitmap, present_bitmap); + int64_t selected_count = ::arrow::bit_util::PopCount(present_bitmap); + if (ARROW_PREDICT_FALSE(selected_count > upper_bound_remaining)) { + throw ParquetException("Values read exceeded upper bound"); + } + writer->AppendWord(selected_bits, selected_count); + return ::arrow::bit_util::PopCount(selected_bits); + } else { + if (ARROW_PREDICT_FALSE(batch_size > upper_bound_remaining)) { + std::stringstream ss; + ss << "Values read exceeded upper bound"; + throw ParquetException(ss.str()); + } + + writer->AppendWord(defined_bitmap, batch_size); + return ::arrow::bit_util::PopCount(defined_bitmap); + } +} + +template +void DefLevelsToBitmapSimd(const int16_t* def_levels, int64_t num_def_levels, + LevelInfo level_info, ValidityBitmapInputOutput* output) { + ::arrow::internal::FirstTimeBitmapWriter writer( + output->valid_bits, + /*start_offset=*/output->valid_bits_offset, + /*length=*/output->values_read_upper_bound); + int64_t set_count = 0; + output->values_read = 0; + int64_t values_read_remaining = output->values_read_upper_bound; + while (num_def_levels > kExtractBitsSize) { + set_count += DefLevelsBatchToBitmap( + def_levels, kExtractBitsSize, values_read_remaining, level_info, &writer); + def_levels += kExtractBitsSize; + num_def_levels -= kExtractBitsSize; + values_read_remaining = output->values_read_upper_bound - writer.position(); + } + set_count += DefLevelsBatchToBitmap( + def_levels, num_def_levels, values_read_remaining, level_info, &writer); + + output->values_read = writer.position(); + output->null_count += output->values_read - set_count; + writer.Finish(); +} + +} // namespace parquet::internal::PARQUET_IMPL_NAMESPACE diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/metadata.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..640b898024346473a341658fc5ccd9ad4c5091c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/metadata.h @@ -0,0 +1,566 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/encryption/type_fwd.h" +#include "parquet/platform.h" +#include "parquet/properties.h" +#include "parquet/schema.h" +#include "parquet/types.h" + +namespace parquet { + +class ColumnDescriptor; +class EncodedStatistics; +class FileCryptoMetaData; +class Statistics; +class SchemaDescriptor; + +namespace schema { + +class ColumnPath; + +} // namespace schema + +using KeyValueMetadata = ::arrow::KeyValueMetadata; + +class PARQUET_EXPORT ApplicationVersion { + public: + // Known Versions with Issues + static const ApplicationVersion& PARQUET_251_FIXED_VERSION(); + static const ApplicationVersion& PARQUET_816_FIXED_VERSION(); + static const ApplicationVersion& PARQUET_CPP_FIXED_STATS_VERSION(); + static const ApplicationVersion& PARQUET_MR_FIXED_STATS_VERSION(); + static const ApplicationVersion& PARQUET_CPP_10353_FIXED_VERSION(); + + // Application that wrote the file. e.g. "IMPALA" + std::string application_; + // Build name + std::string build_; + + // Version of the application that wrote the file, expressed as + // (..). Unmatched parts default to 0. + // "1.2.3" => {1, 2, 3} + // "1.2" => {1, 2, 0} + // "1.2-cdh5" => {1, 2, 0} + struct { + int major; + int minor; + int patch; + std::string unknown; + std::string pre_release; + std::string build_info; + } version; + + ApplicationVersion() = default; + explicit ApplicationVersion(const std::string& created_by); + ApplicationVersion(std::string application, int major, int minor, int patch); + + // Returns true if version is strictly less than other_version + bool VersionLt(const ApplicationVersion& other_version) const; + + // Returns true if version is strictly equal with other_version + bool VersionEq(const ApplicationVersion& other_version) const; + + // Checks if the Version has the correct statistics for a given column + bool HasCorrectStatistics(Type::type primitive, EncodedStatistics& statistics, + SortOrder::type sort_order = SortOrder::SIGNED) const; +}; + +class PARQUET_EXPORT ColumnCryptoMetaData { + public: + static std::unique_ptr Make(const uint8_t* metadata); + ~ColumnCryptoMetaData(); + + bool Equals(const ColumnCryptoMetaData& other) const; + + std::shared_ptr path_in_schema() const; + bool encrypted_with_footer_key() const; + const std::string& key_metadata() const; + + private: + explicit ColumnCryptoMetaData(const uint8_t* metadata); + + class ColumnCryptoMetaDataImpl; + std::unique_ptr impl_; +}; + +/// \brief Public struct for Thrift PageEncodingStats in ColumnChunkMetaData +struct PageEncodingStats { + PageType::type page_type; + Encoding::type encoding; + int32_t count; +}; + +/// \brief Public struct for location to page index in ColumnChunkMetaData. +struct IndexLocation { + /// File offset of the given index, in bytes + int64_t offset; + /// Length of the given index, in bytes + int32_t length; +}; + +/// \brief ColumnChunkMetaData is a proxy around format::ColumnChunkMetaData. +class PARQUET_EXPORT ColumnChunkMetaData { + public: + // API convenience to get a MetaData accessor + + ARROW_DEPRECATED("Use the ReaderProperties-taking overload") + static std::unique_ptr Make( + const void* metadata, const ColumnDescriptor* descr, + const ApplicationVersion* writer_version, int16_t row_group_ordinal = -1, + int16_t column_ordinal = -1, + std::shared_ptr file_decryptor = NULLPTR); + + static std::unique_ptr Make( + const void* metadata, const ColumnDescriptor* descr, + const ReaderProperties& properties = default_reader_properties(), + const ApplicationVersion* writer_version = NULLPTR, int16_t row_group_ordinal = -1, + int16_t column_ordinal = -1, + std::shared_ptr file_decryptor = NULLPTR); + + ~ColumnChunkMetaData(); + + bool Equals(const ColumnChunkMetaData& other) const; + + // column chunk + int64_t file_offset() const; + + // parameter is only used when a dataset is spread across multiple files + const std::string& file_path() const; + + // column metadata + bool is_metadata_set() const; + Type::type type() const; + int64_t num_values() const; + std::shared_ptr path_in_schema() const; + bool is_stats_set() const; + std::shared_ptr statistics() const; + + Compression::type compression() const; + // Indicate if the ColumnChunk compression is supported by the current + // compiled parquet library. + bool can_decompress() const; + + const std::vector& encodings() const; + const std::vector& encoding_stats() const; + std::optional bloom_filter_offset() const; + std::optional bloom_filter_length() const; + bool has_dictionary_page() const; + int64_t dictionary_page_offset() const; + int64_t data_page_offset() const; + bool has_index_page() const; + int64_t index_page_offset() const; + int64_t total_compressed_size() const; + int64_t total_uncompressed_size() const; + std::unique_ptr crypto_metadata() const; + std::optional GetColumnIndexLocation() const; + std::optional GetOffsetIndexLocation() const; + + private: + explicit ColumnChunkMetaData( + const void* metadata, const ColumnDescriptor* descr, int16_t row_group_ordinal, + int16_t column_ordinal, const ReaderProperties& properties, + const ApplicationVersion* writer_version = NULLPTR, + std::shared_ptr file_decryptor = NULLPTR); + // PIMPL Idiom + class ColumnChunkMetaDataImpl; + std::unique_ptr impl_; +}; + +/// \brief RowGroupMetaData is a proxy around format::RowGroupMetaData. +class PARQUET_EXPORT RowGroupMetaData { + public: + ARROW_DEPRECATED("Use the ReaderProperties-taking overload") + static std::unique_ptr Make( + const void* metadata, const SchemaDescriptor* schema, + const ApplicationVersion* writer_version, + std::shared_ptr file_decryptor = NULLPTR); + + /// \brief Create a RowGroupMetaData from a serialized thrift message. + static std::unique_ptr Make( + const void* metadata, const SchemaDescriptor* schema, + const ReaderProperties& properties = default_reader_properties(), + const ApplicationVersion* writer_version = NULLPTR, + std::shared_ptr file_decryptor = NULLPTR); + + ~RowGroupMetaData(); + + bool Equals(const RowGroupMetaData& other) const; + + /// \brief The number of columns in this row group. The order must match the + /// parent's column ordering. + int num_columns() const; + + /// \brief Return the ColumnChunkMetaData of the corresponding column ordinal. + /// + /// WARNING, the returned object references memory location in it's parent + /// (RowGroupMetaData) object. Hence, the parent must outlive the returned + /// object. + /// + /// \param[in] index of the ColumnChunkMetaData to retrieve. + /// + /// \throws ParquetException if the index is out of bound. + std::unique_ptr ColumnChunk(int index) const; + + /// \brief Number of rows in this row group. + int64_t num_rows() const; + + /// \brief Total byte size of all the uncompressed column data in this row group. + int64_t total_byte_size() const; + + /// \brief Total byte size of all the compressed (and potentially encrypted) + /// column data in this row group. + /// + /// This information is optional and may be 0 if omitted. + int64_t total_compressed_size() const; + + /// \brief Byte offset from beginning of file to first page (data or + /// dictionary) in this row group + /// + /// The file_offset field that this method exposes is optional. This method + /// will return 0 if that field is not set to a meaningful value. + int64_t file_offset() const; + // Return const-pointer to make it clear that this object is not to be copied + const SchemaDescriptor* schema() const; + // Indicate if all of the RowGroup's ColumnChunks can be decompressed. + bool can_decompress() const; + // Sorting columns of the row group if any. + std::vector sorting_columns() const; + + private: + explicit RowGroupMetaData( + const void* metadata, const SchemaDescriptor* schema, + const ReaderProperties& properties, + const ApplicationVersion* writer_version = NULLPTR, + std::shared_ptr file_decryptor = NULLPTR); + // PIMPL Idiom + class RowGroupMetaDataImpl; + std::unique_ptr impl_; +}; + +class FileMetaDataBuilder; + +/// \brief FileMetaData is a proxy around format::FileMetaData. +class PARQUET_EXPORT FileMetaData { + public: + ARROW_DEPRECATED("Use the ReaderProperties-taking overload") + static std::shared_ptr Make( + const void* serialized_metadata, uint32_t* inout_metadata_len, + std::shared_ptr file_decryptor); + + /// \brief Create a FileMetaData from a serialized thrift message. + static std::shared_ptr Make( + const void* serialized_metadata, uint32_t* inout_metadata_len, + const ReaderProperties& properties = default_reader_properties(), + std::shared_ptr file_decryptor = NULLPTR); + + ~FileMetaData(); + + bool Equals(const FileMetaData& other) const; + + /// \brief The number of parquet "leaf" columns. + /// + /// Parquet thrift definition requires that nested schema elements are + /// flattened. This method returns the number of columns in the flattened + /// version. + /// For instance, if the schema looks like this : + /// 0 foo.bar + /// foo.bar.baz 0 + /// foo.bar.baz2 1 + /// foo.qux 2 + /// 1 foo2 3 + /// 2 foo3 4 + /// This method will return 5, because there are 5 "leaf" fields (so 5 + /// flattened fields) + int num_columns() const; + + /// \brief The number of flattened schema elements. + /// + /// Parquet thrift definition requires that nested schema elements are + /// flattened. This method returns the total number of elements in the + /// flattened list. + int num_schema_elements() const; + + /// \brief The total number of rows. + /// + /// If the FileMetaData was obtained by calling `SubSet()`, this is the total + /// number of rows in the selected row groups. + int64_t num_rows() const; + + /// \brief The number of row groups in the file. + /// + /// If the FileMetaData was obtained by calling `SubSet()`, this is the number + /// of selected row groups. + int num_row_groups() const; + + /// \brief Return the RowGroupMetaData of the corresponding row group ordinal. + /// + /// WARNING, the returned object references memory location in it's parent + /// (FileMetaData) object. Hence, the parent must outlive the returned object. + /// + /// \param[in] index of the RowGroup to retrieve. + /// + /// \throws ParquetException if the index is out of bound. + std::unique_ptr RowGroup(int index) const; + + /// \brief Return the "version" of the file + /// + /// WARNING: The value returned by this method is unreliable as 1) the Parquet + /// file metadata stores the version as a single integer and 2) some producers + /// are known to always write a hardcoded value. Therefore, you cannot use + /// this value to know which features are used in the file. + ParquetVersion::type version() const; + + /// \brief Return the application's user-agent string of the writer. + const std::string& created_by() const; + + /// \brief Return the application's version of the writer. + const ApplicationVersion& writer_version() const; + + /// \brief Size of the original thrift encoded metadata footer. + uint32_t size() const; + + /// \brief Indicate if all of the FileMetaData's RowGroups can be decompressed. + /// + /// This will return false if any of the RowGroup's page is compressed with a + /// compression format which is not compiled in the current parquet library. + bool can_decompress() const; + + bool is_encryption_algorithm_set() const; + EncryptionAlgorithm encryption_algorithm() const; + const std::string& footer_signing_key_metadata() const; + + /// \brief Verify signature of FileMetaData when file is encrypted but footer + /// is not encrypted (plaintext footer). + bool VerifySignature(const void* signature); + + void WriteTo(::arrow::io::OutputStream* dst, + const std::shared_ptr& encryptor = NULLPTR) const; + + /// \brief Return Thrift-serialized representation of the metadata as a + /// string + std::string SerializeToString() const; + + // Return const-pointer to make it clear that this object is not to be copied + const SchemaDescriptor* schema() const; + + const std::shared_ptr& key_value_metadata() const; + + /// \brief Set a path to all ColumnChunk for all RowGroups. + /// + /// Commonly used by systems (Dask, Spark) who generates an metadata-only + /// parquet file. The path is usually relative to said index file. + /// + /// \param[in] path to set. + void set_file_path(const std::string& path); + + /// \brief Merge row groups from another metadata file into this one. + /// + /// The schema of the input FileMetaData must be equal to the + /// schema of this object. + /// + /// This is used by systems who creates an aggregate metadata-only file by + /// concatenating the row groups of multiple files. This newly created + /// metadata file acts as an index of all available row groups. + /// + /// \param[in] other FileMetaData to merge the row groups from. + /// + /// \throws ParquetException if schemas are not equal. + void AppendRowGroups(const FileMetaData& other); + + /// \brief Return a FileMetaData containing a subset of the row groups in this + /// FileMetaData. + std::shared_ptr Subset(const std::vector& row_groups) const; + + private: + friend FileMetaDataBuilder; + friend class SerializedFile; + + explicit FileMetaData(const void* serialized_metadata, uint32_t* metadata_len, + const ReaderProperties& properties, + std::shared_ptr file_decryptor = NULLPTR); + + void set_file_decryptor(std::shared_ptr file_decryptor); + + // PIMPL Idiom + FileMetaData(); + class FileMetaDataImpl; + std::unique_ptr impl_; +}; + +class PARQUET_EXPORT FileCryptoMetaData { + public: + // API convenience to get a MetaData accessor + static std::shared_ptr Make( + const uint8_t* serialized_metadata, uint32_t* metadata_len, + const ReaderProperties& properties = default_reader_properties()); + ~FileCryptoMetaData(); + + EncryptionAlgorithm encryption_algorithm() const; + const std::string& key_metadata() const; + + void WriteTo(::arrow::io::OutputStream* dst) const; + + private: + friend FileMetaDataBuilder; + FileCryptoMetaData(const uint8_t* serialized_metadata, uint32_t* metadata_len, + const ReaderProperties& properties); + + // PIMPL Idiom + FileCryptoMetaData(); + class FileCryptoMetaDataImpl; + std::unique_ptr impl_; +}; + +// Builder API +class PARQUET_EXPORT ColumnChunkMetaDataBuilder { + public: + // API convenience to get a MetaData reader + static std::unique_ptr Make( + std::shared_ptr props, const ColumnDescriptor* column); + + static std::unique_ptr Make( + std::shared_ptr props, const ColumnDescriptor* column, + void* contents); + + ~ColumnChunkMetaDataBuilder(); + + // column chunk + // Used when a dataset is spread across multiple files + void set_file_path(const std::string& path); + // column metadata + void SetStatistics(const EncodedStatistics& stats); + // get the column descriptor + const ColumnDescriptor* descr() const; + + int64_t total_compressed_size() const; + // commit the metadata + + void Finish(int64_t num_values, int64_t dictionary_page_offset, + int64_t index_page_offset, int64_t data_page_offset, + int64_t compressed_size, int64_t uncompressed_size, bool has_dictionary, + bool dictionary_fallback, + const std::map& dict_encoding_stats_, + const std::map& data_encoding_stats_, + const std::shared_ptr& encryptor = NULLPTR); + + // The metadata contents, suitable for passing to ColumnChunkMetaData::Make + const void* contents() const; + + // For writing metadata at end of column chunk + void WriteTo(::arrow::io::OutputStream* sink); + + private: + explicit ColumnChunkMetaDataBuilder(std::shared_ptr props, + const ColumnDescriptor* column); + explicit ColumnChunkMetaDataBuilder(std::shared_ptr props, + const ColumnDescriptor* column, void* contents); + // PIMPL Idiom + class ColumnChunkMetaDataBuilderImpl; + std::unique_ptr impl_; +}; + +class PARQUET_EXPORT RowGroupMetaDataBuilder { + public: + // API convenience to get a MetaData reader + static std::unique_ptr Make( + std::shared_ptr props, const SchemaDescriptor* schema_, + void* contents); + + ~RowGroupMetaDataBuilder(); + + ColumnChunkMetaDataBuilder* NextColumnChunk(); + int num_columns(); + int64_t num_rows(); + int current_column() const; + + void set_num_rows(int64_t num_rows); + + // commit the metadata + void Finish(int64_t total_bytes_written, int16_t row_group_ordinal = -1); + + private: + explicit RowGroupMetaDataBuilder(std::shared_ptr props, + const SchemaDescriptor* schema_, void* contents); + // PIMPL Idiom + class RowGroupMetaDataBuilderImpl; + std::unique_ptr impl_; +}; + +/// \brief Public struct for location to all page indexes in a parquet file. +struct PageIndexLocation { + /// Alias type of page index location of a row group. The index location + /// is located by column ordinal. If the column does not have the page index, + /// its value is set to std::nullopt. + using RowGroupIndexLocation = std::vector>; + /// Alias type of page index location of a parquet file. The index location + /// is located by the row group ordinal. + using FileIndexLocation = std::map; + /// Row group column index locations which uses row group ordinal as the key. + FileIndexLocation column_index_location; + /// Row group offset index locations which uses row group ordinal as the key. + FileIndexLocation offset_index_location; +}; + +class PARQUET_EXPORT FileMetaDataBuilder { + public: + ARROW_DEPRECATED("Deprecated in 12.0.0. Use overload without KeyValueMetadata instead.") + static std::unique_ptr Make( + const SchemaDescriptor* schema, std::shared_ptr props, + std::shared_ptr key_value_metadata); + + // API convenience to get a MetaData builder + static std::unique_ptr Make( + const SchemaDescriptor* schema, std::shared_ptr props); + + ~FileMetaDataBuilder(); + + // The prior RowGroupMetaDataBuilder (if any) is destroyed + RowGroupMetaDataBuilder* AppendRowGroup(); + + // Update location to all page indexes in the parquet file + void SetPageIndexLocation(const PageIndexLocation& location); + + // Complete the Thrift structure + std::unique_ptr Finish( + const std::shared_ptr& key_value_metadata = NULLPTR); + + // crypto metadata + std::unique_ptr GetCryptoMetaData(); + + private: + explicit FileMetaDataBuilder( + const SchemaDescriptor* schema, std::shared_ptr props, + std::shared_ptr key_value_metadata = NULLPTR); + // PIMPL Idiom + class FileMetaDataBuilderImpl; + std::unique_ptr impl_; +}; + +PARQUET_EXPORT std::string ParquetVersionToString(ParquetVersion::type ver); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/page_index.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/page_index.h new file mode 100644 index 0000000000000000000000000000000000000000..d45c59cab223f13a217c37ed33c6ab3984881b62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/page_index.h @@ -0,0 +1,372 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/io/interfaces.h" +#include "parquet/encryption/type_fwd.h" +#include "parquet/types.h" + +#include +#include + +namespace parquet { + +class EncodedStatistics; +struct PageIndexLocation; + +/// \brief ColumnIndex is a proxy around format::ColumnIndex. +class PARQUET_EXPORT ColumnIndex { + public: + /// \brief Create a ColumnIndex from a serialized thrift message. + static std::unique_ptr Make(const ColumnDescriptor& descr, + const void* serialized_index, + uint32_t index_len, + const ReaderProperties& properties, + Decryptor* decryptor = NULLPTR); + + virtual ~ColumnIndex() = default; + + /// \brief A bitmap with a bit set for each data page that has only null values. + /// + /// The length of this vector is equal to the number of data pages in the column. + virtual const std::vector& null_pages() const = 0; + + /// \brief A vector of encoded lower bounds for each data page in this column. + /// + /// `null_pages` should be inspected first, as only pages with non-null values + /// may have their lower bounds populated. + virtual const std::vector& encoded_min_values() const = 0; + + /// \brief A vector of encoded upper bounds for each data page in this column. + /// + /// `null_pages` should be inspected first, as only pages with non-null values + /// may have their upper bounds populated. + virtual const std::vector& encoded_max_values() const = 0; + + /// \brief The ordering of lower and upper bounds. + /// + /// The boundary order applies across all lower bounds, and all upper bounds, + /// respectively. However, the order between lower bounds and upper bounds + /// cannot be derived from this. + virtual BoundaryOrder::type boundary_order() const = 0; + + /// \brief Whether per-page null count information is available. + virtual bool has_null_counts() const = 0; + + /// \brief An optional vector with the number of null values in each data page. + /// + /// `has_null_counts` should be called first to determine if this information is + /// available. + virtual const std::vector& null_counts() const = 0; + + /// \brief A vector of page indices for non-null pages. + virtual const std::vector& non_null_page_indices() const = 0; +}; + +/// \brief Typed implementation of ColumnIndex. +template +class PARQUET_EXPORT TypedColumnIndex : public ColumnIndex { + public: + using T = typename DType::c_type; + + /// \brief A vector of lower bounds for each data page in this column. + /// + /// This is like `encoded_min_values`, but with the values decoded according to + /// the column's physical type. + /// `min_values` and `max_values` can be used together with `boundary_order` + /// in order to prune some data pages when searching for specific values. + virtual const std::vector& min_values() const = 0; + + /// \brief A vector of upper bounds for each data page in this column. + /// + /// Just like `min_values`, but for upper bounds instead of lower bounds. + virtual const std::vector& max_values() const = 0; +}; + +using BoolColumnIndex = TypedColumnIndex; +using Int32ColumnIndex = TypedColumnIndex; +using Int64ColumnIndex = TypedColumnIndex; +using FloatColumnIndex = TypedColumnIndex; +using DoubleColumnIndex = TypedColumnIndex; +using ByteArrayColumnIndex = TypedColumnIndex; +using FLBAColumnIndex = TypedColumnIndex; + +/// \brief PageLocation is a proxy around format::PageLocation. +struct PARQUET_EXPORT PageLocation { + /// File offset of the data page. + int64_t offset; + /// Total compressed size of the data page and header. + int32_t compressed_page_size; + /// Row id of the first row in the page within the row group. + int64_t first_row_index; +}; + +/// \brief OffsetIndex is a proxy around format::OffsetIndex. +class PARQUET_EXPORT OffsetIndex { + public: + /// \brief Create a OffsetIndex from a serialized thrift message. + static std::unique_ptr Make(const void* serialized_index, + uint32_t index_len, + const ReaderProperties& properties, + Decryptor* decryptor = NULLPTR); + + virtual ~OffsetIndex() = default; + + /// \brief A vector of locations for each data page in this column. + virtual const std::vector& page_locations() const = 0; +}; + +/// \brief Interface for reading the page index for a Parquet row group. +class PARQUET_EXPORT RowGroupPageIndexReader { + public: + virtual ~RowGroupPageIndexReader() = default; + + /// \brief Read column index of a column chunk. + /// + /// \param[in] i column ordinal of the column chunk. + /// \returns column index of the column or nullptr if it does not exist. + /// \throws ParquetException if the index is out of bound. + virtual std::shared_ptr GetColumnIndex(int32_t i) = 0; + + /// \brief Read offset index of a column chunk. + /// + /// \param[in] i column ordinal of the column chunk. + /// \returns offset index of the column or nullptr if it does not exist. + /// \throws ParquetException if the index is out of bound. + virtual std::shared_ptr GetOffsetIndex(int32_t i) = 0; +}; + +struct PageIndexSelection { + /// Specifies whether to read the column index. + bool column_index = false; + /// Specifies whether to read the offset index. + bool offset_index = false; +}; + +PARQUET_EXPORT +std::ostream& operator<<(std::ostream& out, const PageIndexSelection& params); + +struct RowGroupIndexReadRange { + /// Base start and total size of column index of all column chunks in a row group. + /// If none of the column chunks have column index, it is set to std::nullopt. + std::optional<::arrow::io::ReadRange> column_index = std::nullopt; + /// Base start and total size of offset index of all column chunks in a row group. + /// If none of the column chunks have offset index, it is set to std::nullopt. + std::optional<::arrow::io::ReadRange> offset_index = std::nullopt; +}; + +/// \brief Interface for reading the page index for a Parquet file. +class PARQUET_EXPORT PageIndexReader { + public: + virtual ~PageIndexReader() = default; + + /// \brief Create a PageIndexReader instance. + /// \returns a PageIndexReader instance. + /// WARNING: The returned PageIndexReader references to all the input parameters, so + /// it must not outlive all of the input parameters. Usually these input parameters + /// come from the same ParquetFileReader object, so it must not outlive the reader + /// that creates this PageIndexReader. + static std::shared_ptr Make( + ::arrow::io::RandomAccessFile* input, std::shared_ptr file_metadata, + const ReaderProperties& properties, + InternalFileDecryptor* file_decryptor = NULLPTR); + + /// \brief Get the page index reader of a specific row group. + /// \param[in] i row group ordinal to get page index reader. + /// \returns RowGroupPageIndexReader of the specified row group. A nullptr may or may + /// not be returned if the page index for the row group is unavailable. It is + /// the caller's responsibility to check the return value of follow-up calls + /// to the RowGroupPageIndexReader. + /// \throws ParquetException if the index is out of bound. + virtual std::shared_ptr RowGroup(int i) = 0; + + /// \brief Advise the reader which part of page index will be read later. + /// + /// The PageIndexReader can optionally prefetch and cache page index that + /// may be read later to get better performance. + /// + /// The contract of this function is as below: + /// 1) If WillNeed() has not been called for a specific row group and the page index + /// exists, follow-up calls to get column index or offset index of all columns in + /// this row group SHOULD NOT FAIL, but the performance may not be optimal. + /// 2) If WillNeed() has been called for a specific row group, follow-up calls to get + /// page index are limited to columns and index type requested by WillNeed(). + /// So it MAY FAIL if columns that are not requested by WillNeed() are requested. + /// 3) Later calls to WillNeed() MAY OVERRIDE previous calls of same row groups. + /// For example, + /// 1) If WillNeed() is not called for row group 0, then follow-up calls to read + /// column index and/or offset index of all columns of row group 0 should not + /// fail if its page index exists. + /// 2) If WillNeed() is called for columns 0 and 1 for row group 0, then follow-up + /// call to read page index of column 2 for row group 0 MAY FAIL even if its + /// page index exists. + /// 3) If WillNeed() is called for row group 0 with offset index only, then + /// follow-up call to read column index of row group 0 MAY FAIL even if + /// the column index of this column exists. + /// 4) If WillNeed() is called for columns 0 and 1 for row group 0, then later + /// call to WillNeed() for columns 1 and 2 for row group 0. The later one + /// overrides previous call and only columns 1 and 2 of row group 0 are allowed + /// to access. + /// + /// \param[in] row_group_indices list of row group ordinal to read page index later. + /// \param[in] column_indices list of column ordinal to read page index later. If it is + /// empty, it means all columns in the row group will be read. + /// \param[in] selection which kind of page index is required later. + virtual void WillNeed(const std::vector& row_group_indices, + const std::vector& column_indices, + const PageIndexSelection& selection) = 0; + + /// \brief Advise the reader page index of these row groups will not be read anymore. + /// + /// The PageIndexReader implementation has the opportunity to cancel any prefetch or + /// release resource that are related to these row groups. + /// + /// \param[in] row_group_indices list of row group ordinal that whose page index will + /// not be accessed anymore. + virtual void WillNotNeed(const std::vector& row_group_indices) = 0; + + /// \brief Determine the column index and offset index ranges for the given row group. + /// + /// \param[in] row_group_metadata row group metadata to get column chunk metadata. + /// \param[in] columns list of column ordinals to get page index. If the list is empty, + /// it means all columns in the row group. + /// \returns RowGroupIndexReadRange of the specified row group. Throws ParquetException + /// if the selected column ordinal is out of bound or metadata of page index + /// is corrupted. + static RowGroupIndexReadRange DeterminePageIndexRangesInRowGroup( + const RowGroupMetaData& row_group_metadata, const std::vector& columns); +}; + +/// \brief Interface for collecting column index of data pages in a column chunk. +class PARQUET_EXPORT ColumnIndexBuilder { + public: + /// \brief API convenience to create a ColumnIndexBuilder. + static std::unique_ptr Make(const ColumnDescriptor* descr); + + virtual ~ColumnIndexBuilder() = default; + + /// \brief Add statistics of a data page. + /// + /// If the ColumnIndexBuilder has seen any corrupted statistics, it will + /// not update statistics anymore. + /// + /// \param stats Page statistics in the encoded form. + virtual void AddPage(const EncodedStatistics& stats) = 0; + + /// \brief Complete the column index. + /// + /// Once called, AddPage() can no longer be called. + /// WriteTo() and Build() can only called after Finish() has been called. + virtual void Finish() = 0; + + /// \brief Serialize the column index thrift message. + /// + /// If the ColumnIndexBuilder has seen any corrupted statistics, it will + /// not write any data to the sink. + /// + /// \param[out] sink output stream to write the serialized message. + /// \param[in] encryptor encryptor to encrypt the serialized column index. + virtual void WriteTo(::arrow::io::OutputStream* sink, + Encryptor* encryptor = NULLPTR) const = 0; + + /// \brief Create a ColumnIndex directly. + /// + /// \return If the ColumnIndexBuilder has seen any corrupted statistics, it simply + /// returns nullptr. Otherwise the column index is built and returned. + virtual std::unique_ptr Build() const = 0; +}; + +/// \brief Interface for collecting offset index of data pages in a column chunk. +class PARQUET_EXPORT OffsetIndexBuilder { + public: + /// \brief API convenience to create a OffsetIndexBuilder. + static std::unique_ptr Make(); + + virtual ~OffsetIndexBuilder() = default; + + /// \brief Add page location of a data page. + virtual void AddPage(int64_t offset, int32_t compressed_page_size, + int64_t first_row_index) = 0; + + /// \brief Add page location of a data page. + void AddPage(const PageLocation& page_location) { + AddPage(page_location.offset, page_location.compressed_page_size, + page_location.first_row_index); + } + + /// \brief Complete the offset index. + /// + /// In the buffered row group mode, data pages are flushed into memory + /// sink and the OffsetIndexBuilder has only collected the relative offset + /// which requires adjustment once they are flushed to the file. + /// + /// \param final_position Final stream offset to add for page offset adjustment. + virtual void Finish(int64_t final_position) = 0; + + /// \brief Serialize the offset index thrift message. + /// + /// \param[out] sink output stream to write the serialized message. + /// \param[in] encryptor encryptor to encrypt the serialized offset index. + virtual void WriteTo(::arrow::io::OutputStream* sink, + Encryptor* encryptor = NULLPTR) const = 0; + + /// \brief Create an OffsetIndex directly. + virtual std::unique_ptr Build() const = 0; +}; + +/// \brief Interface for collecting page index of a parquet file. +class PARQUET_EXPORT PageIndexBuilder { + public: + /// \brief API convenience to create a PageIndexBuilder. + static std::unique_ptr Make( + const SchemaDescriptor* schema, InternalFileEncryptor* file_encryptor = NULLPTR); + + virtual ~PageIndexBuilder() = default; + + /// \brief Start a new row group. + virtual void AppendRowGroup() = 0; + + /// \brief Get the ColumnIndexBuilder from column ordinal. + /// + /// \param i Column ordinal. + /// \return ColumnIndexBuilder for the column and its memory ownership belongs to + /// the PageIndexBuilder. + virtual ColumnIndexBuilder* GetColumnIndexBuilder(int32_t i) = 0; + + /// \brief Get the OffsetIndexBuilder from column ordinal. + /// + /// \param i Column ordinal. + /// \return OffsetIndexBuilder for the column and its memory ownership belongs to + /// the PageIndexBuilder. + virtual OffsetIndexBuilder* GetOffsetIndexBuilder(int32_t i) = 0; + + /// \brief Complete the page index builder and no more write is allowed. + virtual void Finish() = 0; + + /// \brief Serialize the page index thrift message. + /// + /// Only valid column indexes and offset indexes are serialized and their locations + /// are set. + /// + /// \param[out] sink The output stream to write the page index. + /// \param[out] location The location of all page index to the start of sink. + virtual void WriteTo(::arrow::io::OutputStream* sink, + PageIndexLocation* location) const = 0; +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/parquet_version.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/parquet_version.h new file mode 100644 index 0000000000000000000000000000000000000000..77dc38ac6141fa21c23d7256fac408c9d55b3fad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/parquet_version.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef PARQUET_VERSION_H +#define PARQUET_VERSION_H + +#define PARQUET_VERSION_MAJOR 16 +#define PARQUET_VERSION_MINOR 0 +#define PARQUET_VERSION_PATCH 0 + +#define PARQUET_SO_VERSION "1600" +#define PARQUET_FULL_SO_VERSION "1600.0.0" + +// define the parquet created by version +#define CREATED_BY_VERSION "parquet-cpp-arrow version 16.0.0" + +#endif // PARQUET_VERSION_H diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/pch.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..59e64bfc6e64d2091949af2fee840548695cc498 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/pch.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "parquet/encoding.h" +#include "parquet/exception.h" +#include "parquet/metadata.h" +#include "parquet/properties.h" +#include "parquet/schema.h" +#include "parquet/types.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/platform.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..b085e57cd9918e3c156bb6f6dc367f931d156fc2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/platform.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/buffer.h" // IWYU pragma: export +#include "arrow/io/interfaces.h" // IWYU pragma: export +#include "arrow/status.h" // IWYU pragma: export +#include "arrow/type_fwd.h" // IWYU pragma: export +#include "arrow/util/macros.h" // IWYU pragma: export + +#if defined(_WIN32) || defined(__CYGWIN__) + +#if defined(_MSC_VER) +#pragma warning(push) +// Disable warning for STL types usage in DLL interface +// https://web.archive.org/web/20130317015847/http://connect.microsoft.com/VisualStudio/feedback/details/696593/vc-10-vs-2010-basic-string-exports +#pragma warning(disable : 4275 4251) +// Disable diamond inheritance warnings +#pragma warning(disable : 4250) +// Disable macro redefinition warnings +#pragma warning(disable : 4005) +// Disable extern before exported template warnings +#pragma warning(disable : 4910) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef PARQUET_STATIC +#define PARQUET_EXPORT +#elif defined(PARQUET_EXPORTING) +#define PARQUET_EXPORT __declspec(dllexport) +#else +#define PARQUET_EXPORT __declspec(dllimport) +#endif + +#define PARQUET_NO_EXPORT + +#else // Not Windows +#ifndef PARQUET_EXPORT +#define PARQUET_EXPORT __attribute__((visibility("default"))) +#endif +#ifndef PARQUET_NO_EXPORT +#define PARQUET_NO_EXPORT __attribute__((visibility("hidden"))) +#endif +#endif // Non-Windows + +// This is a complicated topic, some reading on it: +// http://www.codesynthesis.com/~boris/blog/2010/01/18/dll-export-cxx-templates/ +#if defined(_MSC_VER) || defined(__clang__) +#define PARQUET_TEMPLATE_CLASS_EXPORT +#define PARQUET_TEMPLATE_EXPORT PARQUET_EXPORT +#else +#define PARQUET_TEMPLATE_CLASS_EXPORT PARQUET_EXPORT +#define PARQUET_TEMPLATE_EXPORT +#endif + +#define PARQUET_DISALLOW_COPY_AND_ASSIGN ARROW_DISALLOW_COPY_AND_ASSIGN + +#define PARQUET_NORETURN ARROW_NORETURN +#define PARQUET_DEPRECATED ARROW_DEPRECATED + +// If ARROW_VALGRIND set when compiling unit tests, also define +// PARQUET_VALGRIND +#ifdef ARROW_VALGRIND +#define PARQUET_VALGRIND +#endif + +namespace parquet { + +using Buffer = ::arrow::Buffer; +using Codec = ::arrow::util::Codec; +using CodecOptions = ::arrow::util::CodecOptions; +using Compression = ::arrow::Compression; +using MemoryPool = ::arrow::MemoryPool; +using MutableBuffer = ::arrow::MutableBuffer; +using ResizableBuffer = ::arrow::ResizableBuffer; +using ResizableBuffer = ::arrow::ResizableBuffer; +using ArrowInputFile = ::arrow::io::RandomAccessFile; +using ArrowInputStream = ::arrow::io::InputStream; +using ArrowOutputStream = ::arrow::io::OutputStream; + +constexpr int64_t kDefaultOutputStreamSize = 1024; + +constexpr int16_t kNonPageOrdinal = static_cast(-1); + +PARQUET_EXPORT +std::shared_ptr<::arrow::io::BufferOutputStream> CreateOutputStream( + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + +PARQUET_EXPORT +std::shared_ptr AllocateBuffer( + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(), int64_t size = 0); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/printer.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/printer.h new file mode 100644 index 0000000000000000000000000000000000000000..6bdf5b456fa6b2d5f478e3880762810f52fbab65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/printer.h @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "parquet/platform.h" + +namespace parquet { + +class ParquetFileReader; + +class PARQUET_EXPORT ParquetFilePrinter { + private: + ParquetFileReader* fileReader; + + public: + explicit ParquetFilePrinter(ParquetFileReader* reader) : fileReader(reader) {} + ~ParquetFilePrinter() {} + + void DebugPrint(std::ostream& stream, std::list selected_columns, + bool print_values = false, bool format_dump = false, + bool print_key_value_metadata = false, + const char* filename = "No Name"); + + void JSONPrint(std::ostream& stream, std::list selected_columns, + const char* filename = "No Name"); +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/properties.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/properties.h new file mode 100644 index 0000000000000000000000000000000000000000..4d3acb491e3906c4183dfcb0a8d9637a47fd082f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/properties.h @@ -0,0 +1,1183 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/type.h" +#include "arrow/util/compression.h" +#include "arrow/util/type_fwd.h" +#include "parquet/encryption/encryption.h" +#include "parquet/exception.h" +#include "parquet/parquet_version.h" +#include "parquet/platform.h" +#include "parquet/schema.h" +#include "parquet/type_fwd.h" +#include "parquet/types.h" + +namespace parquet { + +/// Controls serialization format of data pages. parquet-format v2.0.0 +/// introduced a new data page metadata type DataPageV2 and serialized page +/// structure (for example, encoded levels are no longer compressed). Prior to +/// the completion of PARQUET-457 in 2020, this library did not implement +/// DataPageV2 correctly, so if you use the V2 data page format, you may have +/// forward compatibility issues (older versions of the library will be unable +/// to read the files). Note that some Parquet implementations do not implement +/// DataPageV2 at all. +enum class ParquetDataPageVersion { V1, V2 }; + +/// Align the default buffer size to a small multiple of a page size. +constexpr int64_t kDefaultBufferSize = 4096 * 4; + +constexpr int32_t kDefaultThriftStringSizeLimit = 100 * 1000 * 1000; +// Structs in the thrift definition are relatively large (at least 300 bytes). +// This limits total memory to the same order of magnitude as +// kDefaultStringSizeLimit. +constexpr int32_t kDefaultThriftContainerSizeLimit = 1000 * 1000; + +class PARQUET_EXPORT ReaderProperties { + public: + explicit ReaderProperties(MemoryPool* pool = ::arrow::default_memory_pool()) + : pool_(pool) {} + + MemoryPool* memory_pool() const { return pool_; } + + std::shared_ptr GetStream(std::shared_ptr source, + int64_t start, int64_t num_bytes); + + /// Buffered stream reading allows the user to control the memory usage of + /// parquet readers. This ensure that all `RandomAccessFile::ReadAt` calls are + /// wrapped in a buffered reader that uses a fix sized buffer (of size + /// `buffer_size()`) instead of the full size of the ReadAt. + /// + /// The primary reason for this control knobs is for resource control and not + /// performance. + bool is_buffered_stream_enabled() const { return buffered_stream_enabled_; } + /// Enable buffered stream reading. + void enable_buffered_stream() { buffered_stream_enabled_ = true; } + /// Disable buffered stream reading. + void disable_buffered_stream() { buffered_stream_enabled_ = false; } + + bool read_dense_for_nullable() const { return read_dense_for_nullable_; } + void enable_read_dense_for_nullable() { read_dense_for_nullable_ = true; } + void disable_read_dense_for_nullable() { read_dense_for_nullable_ = false; } + + /// Return the size of the buffered stream buffer. + int64_t buffer_size() const { return buffer_size_; } + /// Set the size of the buffered stream buffer in bytes. + void set_buffer_size(int64_t size) { buffer_size_ = size; } + + /// \brief Return the size limit on thrift strings. + /// + /// This limit helps prevent space and time bombs in files, but may need to + /// be increased in order to read files with especially large headers. + int32_t thrift_string_size_limit() const { return thrift_string_size_limit_; } + /// Set the size limit on thrift strings. + void set_thrift_string_size_limit(int32_t size) { thrift_string_size_limit_ = size; } + + /// \brief Return the size limit on thrift containers. + /// + /// This limit helps prevent space and time bombs in files, but may need to + /// be increased in order to read files with especially large headers. + int32_t thrift_container_size_limit() const { return thrift_container_size_limit_; } + /// Set the size limit on thrift containers. + void set_thrift_container_size_limit(int32_t size) { + thrift_container_size_limit_ = size; + } + + /// Set the decryption properties. + void file_decryption_properties(std::shared_ptr decryption) { + file_decryption_properties_ = std::move(decryption); + } + /// Return the decryption properties. + const std::shared_ptr& file_decryption_properties() const { + return file_decryption_properties_; + } + + bool page_checksum_verification() const { return page_checksum_verification_; } + void set_page_checksum_verification(bool check_crc) { + page_checksum_verification_ = check_crc; + } + + private: + MemoryPool* pool_; + int64_t buffer_size_ = kDefaultBufferSize; + int32_t thrift_string_size_limit_ = kDefaultThriftStringSizeLimit; + int32_t thrift_container_size_limit_ = kDefaultThriftContainerSizeLimit; + bool buffered_stream_enabled_ = false; + bool page_checksum_verification_ = false; + // Used with a RecordReader. + bool read_dense_for_nullable_ = false; + std::shared_ptr file_decryption_properties_; +}; + +ReaderProperties PARQUET_EXPORT default_reader_properties(); + +static constexpr int64_t kDefaultDataPageSize = 1024 * 1024; +static constexpr bool DEFAULT_IS_DICTIONARY_ENABLED = true; +static constexpr int64_t DEFAULT_DICTIONARY_PAGE_SIZE_LIMIT = kDefaultDataPageSize; +static constexpr int64_t DEFAULT_WRITE_BATCH_SIZE = 1024; +static constexpr int64_t DEFAULT_MAX_ROW_GROUP_LENGTH = 1024 * 1024; +static constexpr bool DEFAULT_ARE_STATISTICS_ENABLED = true; +static constexpr int64_t DEFAULT_MAX_STATISTICS_SIZE = 4096; +static constexpr Encoding::type DEFAULT_ENCODING = Encoding::UNKNOWN; +static const char DEFAULT_CREATED_BY[] = CREATED_BY_VERSION; +static constexpr Compression::type DEFAULT_COMPRESSION_TYPE = Compression::UNCOMPRESSED; +static constexpr bool DEFAULT_IS_PAGE_INDEX_ENABLED = false; + +class PARQUET_EXPORT ColumnProperties { + public: + ColumnProperties(Encoding::type encoding = DEFAULT_ENCODING, + Compression::type codec = DEFAULT_COMPRESSION_TYPE, + bool dictionary_enabled = DEFAULT_IS_DICTIONARY_ENABLED, + bool statistics_enabled = DEFAULT_ARE_STATISTICS_ENABLED, + size_t max_stats_size = DEFAULT_MAX_STATISTICS_SIZE, + bool page_index_enabled = DEFAULT_IS_PAGE_INDEX_ENABLED) + : encoding_(encoding), + codec_(codec), + dictionary_enabled_(dictionary_enabled), + statistics_enabled_(statistics_enabled), + max_stats_size_(max_stats_size), + page_index_enabled_(page_index_enabled) {} + + void set_encoding(Encoding::type encoding) { encoding_ = encoding; } + + void set_compression(Compression::type codec) { codec_ = codec; } + + void set_dictionary_enabled(bool dictionary_enabled) { + dictionary_enabled_ = dictionary_enabled; + } + + void set_statistics_enabled(bool statistics_enabled) { + statistics_enabled_ = statistics_enabled; + } + + void set_max_statistics_size(size_t max_stats_size) { + max_stats_size_ = max_stats_size; + } + + void set_compression_level(int compression_level) { + if (!codec_options_) { + codec_options_ = std::make_shared(); + } + codec_options_->compression_level = compression_level; + } + + void set_codec_options(const std::shared_ptr& codec_options) { + codec_options_ = codec_options; + } + + void set_page_index_enabled(bool page_index_enabled) { + page_index_enabled_ = page_index_enabled; + } + + Encoding::type encoding() const { return encoding_; } + + Compression::type compression() const { return codec_; } + + bool dictionary_enabled() const { return dictionary_enabled_; } + + bool statistics_enabled() const { return statistics_enabled_; } + + size_t max_statistics_size() const { return max_stats_size_; } + + int compression_level() const { + if (!codec_options_) { + return ::arrow::util::kUseDefaultCompressionLevel; + } + return codec_options_->compression_level; + } + + const std::shared_ptr& codec_options() const { return codec_options_; } + + bool page_index_enabled() const { return page_index_enabled_; } + + private: + Encoding::type encoding_; + Compression::type codec_; + bool dictionary_enabled_; + bool statistics_enabled_; + size_t max_stats_size_; + std::shared_ptr codec_options_; + bool page_index_enabled_; +}; + +class PARQUET_EXPORT WriterProperties { + public: + class Builder { + public: + Builder() + : pool_(::arrow::default_memory_pool()), + dictionary_pagesize_limit_(DEFAULT_DICTIONARY_PAGE_SIZE_LIMIT), + write_batch_size_(DEFAULT_WRITE_BATCH_SIZE), + max_row_group_length_(DEFAULT_MAX_ROW_GROUP_LENGTH), + pagesize_(kDefaultDataPageSize), + version_(ParquetVersion::PARQUET_2_6), + data_page_version_(ParquetDataPageVersion::V1), + created_by_(DEFAULT_CREATED_BY), + store_decimal_as_integer_(false), + page_checksum_enabled_(false) {} + + explicit Builder(const WriterProperties& properties) + : pool_(properties.memory_pool()), + dictionary_pagesize_limit_(properties.dictionary_pagesize_limit()), + write_batch_size_(properties.write_batch_size()), + max_row_group_length_(properties.max_row_group_length()), + pagesize_(properties.data_pagesize()), + version_(properties.version()), + data_page_version_(properties.data_page_version()), + created_by_(properties.created_by()), + store_decimal_as_integer_(properties.store_decimal_as_integer()), + page_checksum_enabled_(properties.page_checksum_enabled()), + sorting_columns_(properties.sorting_columns()), + default_column_properties_(properties.default_column_properties()) {} + + virtual ~Builder() {} + + /// Specify the memory pool for the writer. Default default_memory_pool. + Builder* memory_pool(MemoryPool* pool) { + pool_ = pool; + return this; + } + + /// Enable dictionary encoding in general for all columns. Default + /// enabled. + Builder* enable_dictionary() { + default_column_properties_.set_dictionary_enabled(true); + return this; + } + + /// Disable dictionary encoding in general for all columns. Default + /// enabled. + Builder* disable_dictionary() { + default_column_properties_.set_dictionary_enabled(false); + return this; + } + + /// Enable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* enable_dictionary(const std::string& path) { + dictionary_enabled_[path] = true; + return this; + } + + /// Enable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* enable_dictionary(const std::shared_ptr& path) { + return this->enable_dictionary(path->ToDotString()); + } + + /// Disable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* disable_dictionary(const std::string& path) { + dictionary_enabled_[path] = false; + return this; + } + + /// Disable dictionary encoding for column specified by `path`. Default + /// enabled. + Builder* disable_dictionary(const std::shared_ptr& path) { + return this->disable_dictionary(path->ToDotString()); + } + + /// Specify the dictionary page size limit per row group. Default 1MB. + Builder* dictionary_pagesize_limit(int64_t dictionary_psize_limit) { + dictionary_pagesize_limit_ = dictionary_psize_limit; + return this; + } + + /// Specify the write batch size while writing batches of Arrow values + /// into Parquet. Default 1024. + Builder* write_batch_size(int64_t write_batch_size) { + write_batch_size_ = write_batch_size; + return this; + } + + /// Specify the max number of rows to put in a single row group. + /// Default 1Mi rows. + Builder* max_row_group_length(int64_t max_row_group_length) { + max_row_group_length_ = max_row_group_length; + return this; + } + + /// Specify the data page size. + /// Default 1MB. + Builder* data_pagesize(int64_t pg_size) { + pagesize_ = pg_size; + return this; + } + + /// Specify the data page version. + /// Default V1. + Builder* data_page_version(ParquetDataPageVersion data_page_version) { + data_page_version_ = data_page_version; + return this; + } + + /// Specify the Parquet file version. + /// Default PARQUET_2_6. + Builder* version(ParquetVersion::type version) { + version_ = version; + return this; + } + + Builder* created_by(const std::string& created_by) { + created_by_ = created_by; + return this; + } + + Builder* enable_page_checksum() { + page_checksum_enabled_ = true; + return this; + } + + Builder* disable_page_checksum() { + page_checksum_enabled_ = false; + return this; + } + + /// \brief Define the encoding that is used when we don't utilise dictionary encoding. + // + /// This either apply if dictionary encoding is disabled or if we fallback + /// as the dictionary grew too large. + Builder* encoding(Encoding::type encoding_type) { + if (encoding_type == Encoding::PLAIN_DICTIONARY || + encoding_type == Encoding::RLE_DICTIONARY) { + throw ParquetException("Can't use dictionary encoding as fallback encoding"); + } + + default_column_properties_.set_encoding(encoding_type); + return this; + } + + /// \brief Define the encoding that is used when we don't utilise dictionary encoding. + // + /// This either apply if dictionary encoding is disabled or if we fallback + /// as the dictionary grew too large. + Builder* encoding(const std::string& path, Encoding::type encoding_type) { + if (encoding_type == Encoding::PLAIN_DICTIONARY || + encoding_type == Encoding::RLE_DICTIONARY) { + throw ParquetException("Can't use dictionary encoding as fallback encoding"); + } + + encodings_[path] = encoding_type; + return this; + } + + /// \brief Define the encoding that is used when we don't utilise dictionary encoding. + // + /// This either apply if dictionary encoding is disabled or if we fallback + /// as the dictionary grew too large. + Builder* encoding(const std::shared_ptr& path, + Encoding::type encoding_type) { + return this->encoding(path->ToDotString(), encoding_type); + } + + /// Specify compression codec in general for all columns. + /// Default UNCOMPRESSED. + Builder* compression(Compression::type codec) { + default_column_properties_.set_compression(codec); + return this; + } + + /// Specify max statistics size to store min max value. + /// Default 4KB. + Builder* max_statistics_size(size_t max_stats_sz) { + default_column_properties_.set_max_statistics_size(max_stats_sz); + return this; + } + + /// Specify compression codec for the column specified by `path`. + /// Default UNCOMPRESSED. + Builder* compression(const std::string& path, Compression::type codec) { + codecs_[path] = codec; + return this; + } + + /// Specify compression codec for the column specified by `path`. + /// Default UNCOMPRESSED. + Builder* compression(const std::shared_ptr& path, + Compression::type codec) { + return this->compression(path->ToDotString(), codec); + } + + /// \brief Specify the default compression level for the compressor in + /// every column. In case a column does not have an explicitly specified + /// compression level, the default one would be used. + /// + /// The provided compression level is compressor specific. The user would + /// have to familiarize oneself with the available levels for the selected + /// compressor. If the compressor does not allow for selecting different + /// compression levels, calling this function would not have any effect. + /// Parquet and Arrow do not validate the passed compression level. If no + /// level is selected by the user or if the special + /// std::numeric_limits::min() value is passed, then Arrow selects the + /// compression level. + /// + /// If other compressor-specific options need to be set in addition to the compression + /// level, use the codec_options method. + Builder* compression_level(int compression_level) { + default_column_properties_.set_compression_level(compression_level); + return this; + } + + /// \brief Specify a compression level for the compressor for the column + /// described by path. + /// + /// The provided compression level is compressor specific. The user would + /// have to familiarize oneself with the available levels for the selected + /// compressor. If the compressor does not allow for selecting different + /// compression levels, calling this function would not have any effect. + /// Parquet and Arrow do not validate the passed compression level. If no + /// level is selected by the user or if the special + /// std::numeric_limits::min() value is passed, then Arrow selects the + /// compression level. + Builder* compression_level(const std::string& path, int compression_level) { + if (!codec_options_[path]) { + codec_options_[path] = std::make_shared(); + } + codec_options_[path]->compression_level = compression_level; + return this; + } + + /// \brief Specify a compression level for the compressor for the column + /// described by path. + /// + /// The provided compression level is compressor specific. The user would + /// have to familiarize oneself with the available levels for the selected + /// compressor. If the compressor does not allow for selecting different + /// compression levels, calling this function would not have any effect. + /// Parquet and Arrow do not validate the passed compression level. If no + /// level is selected by the user or if the special + /// std::numeric_limits::min() value is passed, then Arrow selects the + /// compression level. + Builder* compression_level(const std::shared_ptr& path, + int compression_level) { + return this->compression_level(path->ToDotString(), compression_level); + } + + /// \brief Specify the default codec options for the compressor in + /// every column. + /// + /// The codec options allow configuring the compression level as well + /// as other codec-specific options. + Builder* codec_options( + const std::shared_ptr<::arrow::util::CodecOptions>& codec_options) { + default_column_properties_.set_codec_options(codec_options); + return this; + } + + /// \brief Specify the codec options for the compressor for the column + /// described by path. + Builder* codec_options( + const std::string& path, + const std::shared_ptr<::arrow::util::CodecOptions>& codec_options) { + codec_options_[path] = codec_options; + return this; + } + + /// \brief Specify the codec options for the compressor for the column + /// described by path. + Builder* codec_options( + const std::shared_ptr& path, + const std::shared_ptr<::arrow::util::CodecOptions>& codec_options) { + return this->codec_options(path->ToDotString(), codec_options); + } + + /// Define the file encryption properties. + /// Default NULL. + Builder* encryption( + std::shared_ptr file_encryption_properties) { + file_encryption_properties_ = std::move(file_encryption_properties); + return this; + } + + /// Enable statistics in general. + /// Default enabled. + Builder* enable_statistics() { + default_column_properties_.set_statistics_enabled(true); + return this; + } + + /// Disable statistics in general. + /// Default enabled. + Builder* disable_statistics() { + default_column_properties_.set_statistics_enabled(false); + return this; + } + + /// Enable statistics for the column specified by `path`. + /// Default enabled. + Builder* enable_statistics(const std::string& path) { + statistics_enabled_[path] = true; + return this; + } + + /// Enable statistics for the column specified by `path`. + /// Default enabled. + Builder* enable_statistics(const std::shared_ptr& path) { + return this->enable_statistics(path->ToDotString()); + } + + /// Define the sorting columns. + /// Default empty. + /// + /// If sorting columns are set, user should ensure that records + /// are sorted by sorting columns. Otherwise, the storing data + /// will be inconsistent with sorting_columns metadata. + Builder* set_sorting_columns(std::vector sorting_columns) { + sorting_columns_ = std::move(sorting_columns); + return this; + } + + /// Disable statistics for the column specified by `path`. + /// Default enabled. + Builder* disable_statistics(const std::string& path) { + statistics_enabled_[path] = false; + return this; + } + + /// Disable statistics for the column specified by `path`. + /// Default enabled. + Builder* disable_statistics(const std::shared_ptr& path) { + return this->disable_statistics(path->ToDotString()); + } + + /// Allow decimals with 1 <= precision <= 18 to be stored as integers. + /// + /// In Parquet, DECIMAL can be stored in any of the following physical types: + /// - int32: for 1 <= precision <= 9. + /// - int64: for 10 <= precision <= 18. + /// - fixed_len_byte_array: precision is limited by the array size. + /// Length n can store <= floor(log_10(2^(8*n - 1) - 1)) base-10 digits. + /// - binary: precision is unlimited. The minimum number of bytes to store + /// the unscaled value is used. + /// + /// By default, this is DISABLED and all decimal types annotate fixed_len_byte_array. + /// + /// When enabled, the C++ writer will use following physical types to store decimals: + /// - int32: for 1 <= precision <= 9. + /// - int64: for 10 <= precision <= 18. + /// - fixed_len_byte_array: for precision > 18. + /// + /// As a consequence, decimal columns stored in integer types are more compact. + Builder* enable_store_decimal_as_integer() { + store_decimal_as_integer_ = true; + return this; + } + + /// Disable decimal logical type with 1 <= precision <= 18 to be stored + /// as integer physical type. + /// + /// Default disabled. + Builder* disable_store_decimal_as_integer() { + store_decimal_as_integer_ = false; + return this; + } + + /// Enable writing page index in general for all columns. Default disabled. + /// + /// Writing statistics to the page index disables the old method of writing + /// statistics to each data page header. + /// The page index makes filtering more efficient than the page header, as + /// it gathers all the statistics for a Parquet file in a single place, + /// avoiding scattered I/O. + /// + /// Please check the link below for more details: + /// https://github.com/apache/parquet-format/blob/master/PageIndex.md + Builder* enable_write_page_index() { + default_column_properties_.set_page_index_enabled(true); + return this; + } + + /// Disable writing page index in general for all columns. Default disabled. + Builder* disable_write_page_index() { + default_column_properties_.set_page_index_enabled(false); + return this; + } + + /// Enable writing page index for column specified by `path`. Default disabled. + Builder* enable_write_page_index(const std::string& path) { + page_index_enabled_[path] = true; + return this; + } + + /// Enable writing page index for column specified by `path`. Default disabled. + Builder* enable_write_page_index(const std::shared_ptr& path) { + return this->enable_write_page_index(path->ToDotString()); + } + + /// Disable writing page index for column specified by `path`. Default disabled. + Builder* disable_write_page_index(const std::string& path) { + page_index_enabled_[path] = false; + return this; + } + + /// Disable writing page index for column specified by `path`. Default disabled. + Builder* disable_write_page_index(const std::shared_ptr& path) { + return this->disable_write_page_index(path->ToDotString()); + } + + /// \brief Build the WriterProperties with the builder parameters. + /// \return The WriterProperties defined by the builder. + std::shared_ptr build() { + std::unordered_map column_properties; + auto get = [&](const std::string& key) -> ColumnProperties& { + auto it = column_properties.find(key); + if (it == column_properties.end()) + return column_properties[key] = default_column_properties_; + else + return it->second; + }; + + for (const auto& item : encodings_) get(item.first).set_encoding(item.second); + for (const auto& item : codecs_) get(item.first).set_compression(item.second); + for (const auto& item : codec_options_) + get(item.first).set_codec_options(item.second); + for (const auto& item : dictionary_enabled_) + get(item.first).set_dictionary_enabled(item.second); + for (const auto& item : statistics_enabled_) + get(item.first).set_statistics_enabled(item.second); + for (const auto& item : page_index_enabled_) + get(item.first).set_page_index_enabled(item.second); + + return std::shared_ptr(new WriterProperties( + pool_, dictionary_pagesize_limit_, write_batch_size_, max_row_group_length_, + pagesize_, version_, created_by_, page_checksum_enabled_, + std::move(file_encryption_properties_), default_column_properties_, + column_properties, data_page_version_, store_decimal_as_integer_, + std::move(sorting_columns_))); + } + + private: + MemoryPool* pool_; + int64_t dictionary_pagesize_limit_; + int64_t write_batch_size_; + int64_t max_row_group_length_; + int64_t pagesize_; + ParquetVersion::type version_; + ParquetDataPageVersion data_page_version_; + std::string created_by_; + bool store_decimal_as_integer_; + bool page_checksum_enabled_; + + std::shared_ptr file_encryption_properties_; + + // If empty, there is no sorting columns. + std::vector sorting_columns_; + + // Settings used for each column unless overridden in any of the maps below + ColumnProperties default_column_properties_; + std::unordered_map encodings_; + std::unordered_map codecs_; + std::unordered_map> codec_options_; + std::unordered_map dictionary_enabled_; + std::unordered_map statistics_enabled_; + std::unordered_map page_index_enabled_; + }; + + inline MemoryPool* memory_pool() const { return pool_; } + + inline int64_t dictionary_pagesize_limit() const { return dictionary_pagesize_limit_; } + + inline int64_t write_batch_size() const { return write_batch_size_; } + + inline int64_t max_row_group_length() const { return max_row_group_length_; } + + inline int64_t data_pagesize() const { return pagesize_; } + + inline ParquetDataPageVersion data_page_version() const { + return parquet_data_page_version_; + } + + inline ParquetVersion::type version() const { return parquet_version_; } + + inline std::string created_by() const { return parquet_created_by_; } + + inline bool store_decimal_as_integer() const { return store_decimal_as_integer_; } + + inline bool page_checksum_enabled() const { return page_checksum_enabled_; } + + inline Encoding::type dictionary_index_encoding() const { + if (parquet_version_ == ParquetVersion::PARQUET_1_0) { + return Encoding::PLAIN_DICTIONARY; + } else { + return Encoding::RLE_DICTIONARY; + } + } + + inline Encoding::type dictionary_page_encoding() const { + if (parquet_version_ == ParquetVersion::PARQUET_1_0) { + return Encoding::PLAIN_DICTIONARY; + } else { + return Encoding::PLAIN; + } + } + + const ColumnProperties& column_properties( + const std::shared_ptr& path) const { + auto it = column_properties_.find(path->ToDotString()); + if (it != column_properties_.end()) return it->second; + return default_column_properties_; + } + + Encoding::type encoding(const std::shared_ptr& path) const { + return column_properties(path).encoding(); + } + + Compression::type compression(const std::shared_ptr& path) const { + return column_properties(path).compression(); + } + + int compression_level(const std::shared_ptr& path) const { + return column_properties(path).compression_level(); + } + + const std::shared_ptr codec_options( + const std::shared_ptr& path) const { + return column_properties(path).codec_options(); + } + + bool dictionary_enabled(const std::shared_ptr& path) const { + return column_properties(path).dictionary_enabled(); + } + + const std::vector& sorting_columns() const { return sorting_columns_; } + + bool statistics_enabled(const std::shared_ptr& path) const { + return column_properties(path).statistics_enabled(); + } + + size_t max_statistics_size(const std::shared_ptr& path) const { + return column_properties(path).max_statistics_size(); + } + + bool page_index_enabled(const std::shared_ptr& path) const { + return column_properties(path).page_index_enabled(); + } + + bool page_index_enabled() const { + if (default_column_properties_.page_index_enabled()) { + return true; + } + for (const auto& item : column_properties_) { + if (item.second.page_index_enabled()) { + return true; + } + } + return false; + } + + inline FileEncryptionProperties* file_encryption_properties() const { + return file_encryption_properties_.get(); + } + + std::shared_ptr column_encryption_properties( + const std::string& path) const { + if (file_encryption_properties_) { + return file_encryption_properties_->column_encryption_properties(path); + } else { + return NULLPTR; + } + } + + // \brief Return the default column properties + const ColumnProperties& default_column_properties() const { + return default_column_properties_; + } + + private: + explicit WriterProperties( + MemoryPool* pool, int64_t dictionary_pagesize_limit, int64_t write_batch_size, + int64_t max_row_group_length, int64_t pagesize, ParquetVersion::type version, + const std::string& created_by, bool page_write_checksum_enabled, + std::shared_ptr file_encryption_properties, + const ColumnProperties& default_column_properties, + const std::unordered_map& column_properties, + ParquetDataPageVersion data_page_version, bool store_short_decimal_as_integer, + std::vector sorting_columns) + : pool_(pool), + dictionary_pagesize_limit_(dictionary_pagesize_limit), + write_batch_size_(write_batch_size), + max_row_group_length_(max_row_group_length), + pagesize_(pagesize), + parquet_data_page_version_(data_page_version), + parquet_version_(version), + parquet_created_by_(created_by), + store_decimal_as_integer_(store_short_decimal_as_integer), + page_checksum_enabled_(page_write_checksum_enabled), + file_encryption_properties_(file_encryption_properties), + sorting_columns_(std::move(sorting_columns)), + default_column_properties_(default_column_properties), + column_properties_(column_properties) {} + + MemoryPool* pool_; + int64_t dictionary_pagesize_limit_; + int64_t write_batch_size_; + int64_t max_row_group_length_; + int64_t pagesize_; + ParquetDataPageVersion parquet_data_page_version_; + ParquetVersion::type parquet_version_; + std::string parquet_created_by_; + bool store_decimal_as_integer_; + bool page_checksum_enabled_; + + std::shared_ptr file_encryption_properties_; + + std::vector sorting_columns_; + + ColumnProperties default_column_properties_; + std::unordered_map column_properties_; +}; + +PARQUET_EXPORT const std::shared_ptr& default_writer_properties(); + +// ---------------------------------------------------------------------- +// Properties specific to Apache Arrow columnar read and write + +static constexpr bool kArrowDefaultUseThreads = false; + +// Default number of rows to read when using ::arrow::RecordBatchReader +static constexpr int64_t kArrowDefaultBatchSize = 64 * 1024; + +/// EXPERIMENTAL: Properties for configuring FileReader behavior. +class PARQUET_EXPORT ArrowReaderProperties { + public: + explicit ArrowReaderProperties(bool use_threads = kArrowDefaultUseThreads) + : use_threads_(use_threads), + read_dict_indices_(), + batch_size_(kArrowDefaultBatchSize), + pre_buffer_(true), + cache_options_(::arrow::io::CacheOptions::LazyDefaults()), + coerce_int96_timestamp_unit_(::arrow::TimeUnit::NANO) {} + + /// \brief Set whether to use the IO thread pool to parse columns in parallel. + /// + /// Default is false. + void set_use_threads(bool use_threads) { use_threads_ = use_threads; } + /// Return whether will use multiple threads. + bool use_threads() const { return use_threads_; } + + /// \brief Set whether to read a particular column as dictionary encoded. + /// + /// If the file metadata contains a serialized Arrow schema, then ... + //// + /// This is only supported for columns with a Parquet physical type of + /// BYTE_ARRAY, such as string or binary types. + void set_read_dictionary(int column_index, bool read_dict) { + if (read_dict) { + read_dict_indices_.insert(column_index); + } else { + read_dict_indices_.erase(column_index); + } + } + /// Return whether the column at the index will be read as dictionary. + bool read_dictionary(int column_index) const { + if (read_dict_indices_.find(column_index) != read_dict_indices_.end()) { + return true; + } else { + return false; + } + } + + /// \brief Set the maximum number of rows to read into a record batch. + /// + /// Will only be fewer rows when there are no more rows in the file. + /// Note that some APIs such as ReadTable may ignore this setting. + void set_batch_size(int64_t batch_size) { batch_size_ = batch_size; } + /// Return the batch size in rows. + /// + /// Note that some APIs such as ReadTable may ignore this setting. + int64_t batch_size() const { return batch_size_; } + + /// Enable read coalescing (default false). + /// + /// When enabled, the Arrow reader will pre-buffer necessary regions + /// of the file in-memory. This is intended to improve performance on + /// high-latency filesystems (e.g. Amazon S3). + void set_pre_buffer(bool pre_buffer) { pre_buffer_ = pre_buffer; } + /// Return whether read coalescing is enabled. + bool pre_buffer() const { return pre_buffer_; } + + /// Set options for read coalescing. This can be used to tune the + /// implementation for characteristics of different filesystems. + void set_cache_options(::arrow::io::CacheOptions options) { cache_options_ = options; } + /// Return the options for read coalescing. + const ::arrow::io::CacheOptions& cache_options() const { return cache_options_; } + + /// Set execution context for read coalescing. + void set_io_context(const ::arrow::io::IOContext& ctx) { io_context_ = ctx; } + /// Return the execution context used for read coalescing. + const ::arrow::io::IOContext& io_context() const { return io_context_; } + + /// Set timestamp unit to use for deprecated INT96-encoded timestamps + /// (default is NANO). + void set_coerce_int96_timestamp_unit(::arrow::TimeUnit::type unit) { + coerce_int96_timestamp_unit_ = unit; + } + + ::arrow::TimeUnit::type coerce_int96_timestamp_unit() const { + return coerce_int96_timestamp_unit_; + } + + private: + bool use_threads_; + std::unordered_set read_dict_indices_; + int64_t batch_size_; + bool pre_buffer_; + ::arrow::io::IOContext io_context_; + ::arrow::io::CacheOptions cache_options_; + ::arrow::TimeUnit::type coerce_int96_timestamp_unit_; +}; + +/// EXPERIMENTAL: Constructs the default ArrowReaderProperties +PARQUET_EXPORT +ArrowReaderProperties default_arrow_reader_properties(); + +class PARQUET_EXPORT ArrowWriterProperties { + public: + enum EngineVersion { + V1, // Supports only nested lists. + V2 // Full support for all nesting combinations + }; + class Builder { + public: + Builder() + : write_timestamps_as_int96_(false), + coerce_timestamps_enabled_(false), + coerce_timestamps_unit_(::arrow::TimeUnit::SECOND), + truncated_timestamps_allowed_(false), + store_schema_(false), + compliant_nested_types_(true), + engine_version_(V2), + use_threads_(kArrowDefaultUseThreads), + executor_(NULLPTR) {} + virtual ~Builder() = default; + + /// \brief Disable writing legacy int96 timestamps (default disabled). + Builder* disable_deprecated_int96_timestamps() { + write_timestamps_as_int96_ = false; + return this; + } + + /// \brief Enable writing legacy int96 timestamps (default disabled). + /// + /// May be turned on to write timestamps compatible with older Parquet writers. + /// This takes precedent over coerce_timestamps. + Builder* enable_deprecated_int96_timestamps() { + write_timestamps_as_int96_ = true; + return this; + } + + /// \brief Coerce all timestamps to the specified time unit. + /// \param unit time unit to truncate to. + /// For Parquet versions 1.0 and 2.4, nanoseconds are casted to microseconds. + Builder* coerce_timestamps(::arrow::TimeUnit::type unit) { + coerce_timestamps_enabled_ = true; + coerce_timestamps_unit_ = unit; + return this; + } + + /// \brief Allow loss of data when truncating timestamps. + /// + /// This is disallowed by default and an error will be returned. + Builder* allow_truncated_timestamps() { + truncated_timestamps_allowed_ = true; + return this; + } + + /// \brief Disallow loss of data when truncating timestamps (default). + Builder* disallow_truncated_timestamps() { + truncated_timestamps_allowed_ = false; + return this; + } + + /// \brief EXPERIMENTAL: Write binary serialized Arrow schema to the file, + /// to enable certain read options (like "read_dictionary") to be set + /// automatically + Builder* store_schema() { + store_schema_ = true; + return this; + } + + /// \brief When enabled, will not preserve Arrow field names for list types. + /// + /// Instead of using the field names Arrow uses for the values array of + /// list types (default "item"), will use "element", as is specified in + /// the Parquet spec. + /// + /// This is enabled by default. + Builder* enable_compliant_nested_types() { + compliant_nested_types_ = true; + return this; + } + + /// Preserve Arrow list field name. + Builder* disable_compliant_nested_types() { + compliant_nested_types_ = false; + return this; + } + + /// Set the version of the Parquet writer engine. + Builder* set_engine_version(EngineVersion version) { + engine_version_ = version; + return this; + } + + /// \brief Set whether to use multiple threads to write columns + /// in parallel in the buffered row group mode. + /// + /// WARNING: If writing multiple files in parallel in the same + /// executor, deadlock may occur if use_threads is true. Please + /// disable it in this case. + /// + /// Default is false. + Builder* set_use_threads(bool use_threads) { + use_threads_ = use_threads; + return this; + } + + /// \brief Set the executor to write columns in parallel in the + /// buffered row group mode. + /// + /// Default is nullptr and the default cpu executor will be used. + Builder* set_executor(::arrow::internal::Executor* executor) { + executor_ = executor; + return this; + } + + /// Create the final properties. + std::shared_ptr build() { + return std::shared_ptr(new ArrowWriterProperties( + write_timestamps_as_int96_, coerce_timestamps_enabled_, coerce_timestamps_unit_, + truncated_timestamps_allowed_, store_schema_, compliant_nested_types_, + engine_version_, use_threads_, executor_)); + } + + private: + bool write_timestamps_as_int96_; + + bool coerce_timestamps_enabled_; + ::arrow::TimeUnit::type coerce_timestamps_unit_; + bool truncated_timestamps_allowed_; + + bool store_schema_; + bool compliant_nested_types_; + EngineVersion engine_version_; + + bool use_threads_; + ::arrow::internal::Executor* executor_; + }; + + bool support_deprecated_int96_timestamps() const { return write_timestamps_as_int96_; } + + bool coerce_timestamps_enabled() const { return coerce_timestamps_enabled_; } + ::arrow::TimeUnit::type coerce_timestamps_unit() const { + return coerce_timestamps_unit_; + } + + bool truncated_timestamps_allowed() const { return truncated_timestamps_allowed_; } + + bool store_schema() const { return store_schema_; } + + /// \brief Enable nested type naming according to the parquet specification. + /// + /// Older versions of arrow wrote out field names for nested lists based on the name + /// of the field. According to the parquet specification they should always be + /// "element". + bool compliant_nested_types() const { return compliant_nested_types_; } + + /// \brief The underlying engine version to use when writing Arrow data. + /// + /// V2 is currently the latest V1 is considered deprecated but left in + /// place in case there are bugs detected in V2. + EngineVersion engine_version() const { return engine_version_; } + + /// \brief Returns whether the writer will use multiple threads + /// to write columns in parallel in the buffered row group mode. + bool use_threads() const { return use_threads_; } + + /// \brief Returns the executor used to write columns in parallel. + ::arrow::internal::Executor* executor() const; + + private: + explicit ArrowWriterProperties(bool write_nanos_as_int96, + bool coerce_timestamps_enabled, + ::arrow::TimeUnit::type coerce_timestamps_unit, + bool truncated_timestamps_allowed, bool store_schema, + bool compliant_nested_types, + EngineVersion engine_version, bool use_threads, + ::arrow::internal::Executor* executor) + : write_timestamps_as_int96_(write_nanos_as_int96), + coerce_timestamps_enabled_(coerce_timestamps_enabled), + coerce_timestamps_unit_(coerce_timestamps_unit), + truncated_timestamps_allowed_(truncated_timestamps_allowed), + store_schema_(store_schema), + compliant_nested_types_(compliant_nested_types), + engine_version_(engine_version), + use_threads_(use_threads), + executor_(executor) {} + + const bool write_timestamps_as_int96_; + const bool coerce_timestamps_enabled_; + const ::arrow::TimeUnit::type coerce_timestamps_unit_; + const bool truncated_timestamps_allowed_; + const bool store_schema_; + const bool compliant_nested_types_; + const EngineVersion engine_version_; + const bool use_threads_; + ::arrow::internal::Executor* executor_; +}; + +/// \brief State object used for writing Arrow data directly to a Parquet +/// column chunk. API possibly not stable +struct ArrowWriteContext { + ArrowWriteContext(MemoryPool* memory_pool, ArrowWriterProperties* properties) + : memory_pool(memory_pool), + properties(properties), + data_buffer(AllocateBuffer(memory_pool)), + def_levels_buffer(AllocateBuffer(memory_pool)) {} + + template + ::arrow::Status GetScratchData(const int64_t num_values, T** out) { + ARROW_RETURN_NOT_OK(this->data_buffer->Resize(num_values * sizeof(T), false)); + *out = reinterpret_cast(this->data_buffer->mutable_data()); + return ::arrow::Status::OK(); + } + + MemoryPool* memory_pool; + const ArrowWriterProperties* properties; + + // Buffer used for storing the data of an array converted to the physical type + // as expected by parquet-cpp. + std::shared_ptr data_buffer; + + // We use the shared ownership of this buffer + std::shared_ptr def_levels_buffer; +}; + +PARQUET_EXPORT +std::shared_ptr default_arrow_writer_properties(); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/statistics.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/statistics.h new file mode 100644 index 0000000000000000000000000000000000000000..0d6ea9898f7ba2f9349b88ad7395aed85da6e8e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/statistics.h @@ -0,0 +1,382 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace arrow { + +class Array; +class BinaryArray; + +} // namespace arrow + +namespace parquet { + +class ColumnDescriptor; + +// ---------------------------------------------------------------------- +// Value comparator interfaces + +/// \brief Base class for value comparators. Generally used with +/// TypedComparator +class PARQUET_EXPORT Comparator { + public: + virtual ~Comparator() {} + + /// \brief Create a comparator explicitly from physical type and + /// sort order + /// \param[in] physical_type the physical type for the typed + /// comparator + /// \param[in] sort_order either SortOrder::SIGNED or + /// SortOrder::UNSIGNED + /// \param[in] type_length for FIXED_LEN_BYTE_ARRAY only + static std::shared_ptr Make(Type::type physical_type, + SortOrder::type sort_order, + int type_length = -1); + + /// \brief Create typed comparator inferring default sort order from + /// ColumnDescriptor + /// \param[in] descr the Parquet column schema + static std::shared_ptr Make(const ColumnDescriptor* descr); +}; + +/// \brief Interface for comparison of physical types according to the +/// semantics of a particular logical type. +template +class TypedComparator : public Comparator { + public: + using T = typename DType::c_type; + + /// \brief Scalar comparison of two elements, return true if first + /// is strictly less than the second + virtual bool Compare(const T& a, const T& b) const = 0; + + /// \brief Compute maximum and minimum elements in a batch of + /// elements without any nulls + virtual std::pair GetMinMax(const T* values, int64_t length) const = 0; + + /// \brief Compute minimum and maximum elements from an Arrow array. Only + /// valid for certain Parquet Type / Arrow Type combinations, like BYTE_ARRAY + /// / arrow::BinaryArray + virtual std::pair GetMinMax(const ::arrow::Array& values) const = 0; + + /// \brief Compute maximum and minimum elements in a batch of + /// elements with accompanying bitmap indicating which elements are + /// included (bit set) and excluded (bit not set) + /// + /// \param[in] values the sequence of values + /// \param[in] length the length of the sequence + /// \param[in] valid_bits a bitmap indicating which elements are + /// included (1) or excluded (0) + /// \param[in] valid_bits_offset the bit offset into the bitmap of + /// the first element in the sequence + virtual std::pair GetMinMaxSpaced(const T* values, int64_t length, + const uint8_t* valid_bits, + int64_t valid_bits_offset) const = 0; +}; + +/// \brief Typed version of Comparator::Make +template +std::shared_ptr> MakeComparator(Type::type physical_type, + SortOrder::type sort_order, + int type_length = -1) { + return std::static_pointer_cast>( + Comparator::Make(physical_type, sort_order, type_length)); +} + +/// \brief Typed version of Comparator::Make +template +std::shared_ptr> MakeComparator(const ColumnDescriptor* descr) { + return std::static_pointer_cast>(Comparator::Make(descr)); +} + +// ---------------------------------------------------------------------- + +/// \brief Structure represented encoded statistics to be written to +/// and read from Parquet serialized metadata. +class PARQUET_EXPORT EncodedStatistics { + std::string max_, min_; + bool is_signed_ = false; + + public: + EncodedStatistics() = default; + + const std::string& max() const { return max_; } + const std::string& min() const { return min_; } + + int64_t null_count = 0; + int64_t distinct_count = 0; + + bool has_min = false; + bool has_max = false; + bool has_null_count = false; + bool has_distinct_count = false; + + // When all values in the statistics are null, it is set to true. + // Otherwise, at least one value is not null, or we are not sure at all. + // Page index requires this information to decide whether a data page + // is a null page or not. + bool all_null_value = false; + + // From parquet-mr + // Don't write stats larger than the max size rather than truncating. The + // rationale is that some engines may use the minimum value in the page as + // the true minimum for aggregations and there is no way to mark that a + // value has been truncated and is a lower bound and not in the page. + void ApplyStatSizeLimits(size_t length) { + if (max_.length() > length) { + has_max = false; + max_.clear(); + } + if (min_.length() > length) { + has_min = false; + min_.clear(); + } + } + + bool is_set() const { + return has_min || has_max || has_null_count || has_distinct_count; + } + + bool is_signed() const { return is_signed_; } + + void set_is_signed(bool is_signed) { is_signed_ = is_signed; } + + EncodedStatistics& set_max(std::string value) { + max_ = std::move(value); + has_max = true; + return *this; + } + + EncodedStatistics& set_min(std::string value) { + min_ = std::move(value); + has_min = true; + return *this; + } + + EncodedStatistics& set_null_count(int64_t value) { + null_count = value; + has_null_count = true; + return *this; + } + + EncodedStatistics& set_distinct_count(int64_t value) { + distinct_count = value; + has_distinct_count = true; + return *this; + } +}; + +/// \brief Base type for computing column statistics while writing a file +class PARQUET_EXPORT Statistics { + public: + virtual ~Statistics() {} + + /// \brief Create a new statistics instance given a column schema + /// definition + /// \param[in] descr the column schema + /// \param[in] pool a memory pool to use for any memory allocations, optional + static std::shared_ptr Make( + const ColumnDescriptor* descr, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + /// \brief Create a new statistics instance given a column schema + /// definition and preexisting state + /// \param[in] descr the column schema + /// \param[in] encoded_min the encoded minimum value + /// \param[in] encoded_max the encoded maximum value + /// \param[in] num_values total number of values + /// \param[in] null_count number of null values + /// \param[in] distinct_count number of distinct values + /// \param[in] has_min_max whether the min/max statistics are set + /// \param[in] has_null_count whether the null_count statistics are set + /// \param[in] has_distinct_count whether the distinct_count statistics are set + /// \param[in] pool a memory pool to use for any memory allocations, optional + static std::shared_ptr Make( + const ColumnDescriptor* descr, const std::string& encoded_min, + const std::string& encoded_max, int64_t num_values, int64_t null_count, + int64_t distinct_count, bool has_min_max, bool has_null_count, + bool has_distinct_count, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + // Helper function to convert EncodedStatistics to Statistics. + // EncodedStatistics does not contain number of non-null values, and it can be + // passed using the num_values parameter. + static std::shared_ptr Make( + const ColumnDescriptor* descr, const EncodedStatistics* encoded_statistics, + int64_t num_values = -1, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + /// \brief Return true if the count of null values is set + virtual bool HasNullCount() const = 0; + + /// \brief The number of null values, may not be set + virtual int64_t null_count() const = 0; + + /// \brief Return true if the count of distinct values is set + virtual bool HasDistinctCount() const = 0; + + /// \brief The number of distinct values, may not be set + virtual int64_t distinct_count() const = 0; + + /// \brief The number of non-null values in the column + virtual int64_t num_values() const = 0; + + /// \brief Return true if the min and max statistics are set. Obtain + /// with TypedStatistics::min and max + virtual bool HasMinMax() const = 0; + + /// \brief Reset state of object to initial (no data observed) state + virtual void Reset() = 0; + + /// \brief Plain-encoded minimum value + virtual std::string EncodeMin() const = 0; + + /// \brief Plain-encoded maximum value + virtual std::string EncodeMax() const = 0; + + /// \brief The finalized encoded form of the statistics for transport + virtual EncodedStatistics Encode() = 0; + + /// \brief The physical type of the column schema + virtual Type::type physical_type() const = 0; + + /// \brief The full type descriptor from the column schema + virtual const ColumnDescriptor* descr() const = 0; + + /// \brief Check two Statistics for equality + virtual bool Equals(const Statistics& other) const = 0; + + protected: + static std::shared_ptr Make(Type::type physical_type, const void* min, + const void* max, int64_t num_values, + int64_t null_count, int64_t distinct_count); +}; + +/// \brief A typed implementation of Statistics +template +class TypedStatistics : public Statistics { + public: + using T = typename DType::c_type; + + /// \brief The current minimum value + virtual const T& min() const = 0; + + /// \brief The current maximum value + virtual const T& max() const = 0; + + /// \brief Update state with state of another Statistics object + virtual void Merge(const TypedStatistics& other) = 0; + + /// \brief Batch statistics update + virtual void Update(const T* values, int64_t num_values, int64_t null_count) = 0; + + /// \brief Batch statistics update with supplied validity bitmap + /// \param[in] values pointer to column values + /// \param[in] valid_bits Pointer to bitmap representing if values are non-null. + /// \param[in] valid_bits_offset Offset offset into valid_bits where the slice of + /// data begins. + /// \param[in] num_spaced_values The length of values in values/valid_bits to inspect + /// when calculating statistics. This can be smaller than + /// num_values+null_count as null_count can include nulls + /// from parents while num_spaced_values does not. + /// \param[in] num_values Number of values that are not null. + /// \param[in] null_count Number of values that are null. + virtual void UpdateSpaced(const T* values, const uint8_t* valid_bits, + int64_t valid_bits_offset, int64_t num_spaced_values, + int64_t num_values, int64_t null_count) = 0; + + /// \brief EXPERIMENTAL: Update statistics with an Arrow array without + /// conversion to a primitive Parquet C type. Only implemented for certain + /// Parquet type / Arrow type combinations like BYTE_ARRAY / + /// arrow::BinaryArray + /// + /// If update_counts is true then the null_count and num_values will be updated + /// based on the null_count of values. Set to false if these are updated + /// elsewhere (e.g. when updating a dictionary where the counts are taken from + /// the indices and not the values) + virtual void Update(const ::arrow::Array& values, bool update_counts = true) = 0; + + /// \brief Set min and max values to particular values + virtual void SetMinMax(const T& min, const T& max) = 0; + + /// \brief Increments the null count directly + /// Use Update to extract the null count from data. Use this if you determine + /// the null count through some other means (e.g. dictionary arrays where the + /// null count is determined from the indices) + virtual void IncrementNullCount(int64_t n) = 0; + + /// \brief Increments the number of values directly + /// The same note on IncrementNullCount applies here + virtual void IncrementNumValues(int64_t n) = 0; +}; + +using BoolStatistics = TypedStatistics; +using Int32Statistics = TypedStatistics; +using Int64Statistics = TypedStatistics; +using FloatStatistics = TypedStatistics; +using DoubleStatistics = TypedStatistics; +using ByteArrayStatistics = TypedStatistics; +using FLBAStatistics = TypedStatistics; + +/// \brief Typed version of Statistics::Make +template +std::shared_ptr> MakeStatistics( + const ColumnDescriptor* descr, + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + return std::static_pointer_cast>(Statistics::Make(descr, pool)); +} + +/// \brief Create Statistics initialized to a particular state +/// \param[in] min the minimum value +/// \param[in] max the minimum value +/// \param[in] num_values number of values +/// \param[in] null_count number of null values +/// \param[in] distinct_count number of distinct values +template +std::shared_ptr> MakeStatistics(const typename DType::c_type& min, + const typename DType::c_type& max, + int64_t num_values, + int64_t null_count, + int64_t distinct_count) { + return std::static_pointer_cast>(Statistics::Make( + DType::type_num, &min, &max, num_values, null_count, distinct_count)); +} + +/// \brief Typed version of Statistics::Make +template +std::shared_ptr> MakeStatistics( + const ColumnDescriptor* descr, const std::string& encoded_min, + const std::string& encoded_max, int64_t num_values, int64_t null_count, + int64_t distinct_count, bool has_min_max, bool has_null_count, + bool has_distinct_count, ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) { + return std::static_pointer_cast>(Statistics::Make( + descr, encoded_min, encoded_max, num_values, null_count, distinct_count, + has_min_max, has_null_count, has_distinct_count, pool)); +} + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/stream_reader.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/stream_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..a7dadac92c89277a104e3acc4149a77258177c8c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/stream_reader.h @@ -0,0 +1,303 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/column_reader.h" +#include "parquet/file_reader.h" +#include "parquet/stream_writer.h" + +namespace parquet { + +/// \brief A class for reading Parquet files using an output stream type API. +/// +/// The values given must be of the correct type i.e. the type must +/// match the file schema exactly otherwise a ParquetException will be +/// thrown. +/// +/// The user must explicitly advance to the next row using the +/// EndRow() function or EndRow input manipulator. +/// +/// Required and optional fields are supported: +/// - Required fields are read using operator>>(T) +/// - Optional fields are read with +/// operator>>(std::optional) +/// +/// Note that operator>>(std::optional) can be used to read +/// required fields. +/// +/// Similarly operator>>(T) can be used to read optional fields. +/// However, if the value is not present then a ParquetException will +/// be raised. +/// +/// Currently there is no support for repeated fields. +/// +class PARQUET_EXPORT StreamReader { + public: + template + using optional = ::std::optional; + + // N.B. Default constructed objects are not usable. This + // constructor is provided so that the object may be move + // assigned afterwards. + StreamReader() = default; + + explicit StreamReader(std::unique_ptr reader); + + ~StreamReader() = default; + + bool eof() const { return eof_; } + + int current_column() const { return column_index_; } + + int64_t current_row() const { return current_row_; } + + int num_columns() const; + + int64_t num_rows() const; + + // Moving is possible. + StreamReader(StreamReader&&) = default; + StreamReader& operator=(StreamReader&&) = default; + + // Copying is not allowed. + StreamReader(const StreamReader&) = delete; + StreamReader& operator=(const StreamReader&) = delete; + + StreamReader& operator>>(bool& v); + + StreamReader& operator>>(int8_t& v); + + StreamReader& operator>>(uint8_t& v); + + StreamReader& operator>>(int16_t& v); + + StreamReader& operator>>(uint16_t& v); + + StreamReader& operator>>(int32_t& v); + + StreamReader& operator>>(uint32_t& v); + + StreamReader& operator>>(int64_t& v); + + StreamReader& operator>>(uint64_t& v); + + StreamReader& operator>>(std::chrono::milliseconds& v); + + StreamReader& operator>>(std::chrono::microseconds& v); + + StreamReader& operator>>(float& v); + + StreamReader& operator>>(double& v); + + StreamReader& operator>>(char& v); + + template + StreamReader& operator>>(char (&v)[N]) { + ReadFixedLength(v, N); + return *this; + } + + template + StreamReader& operator>>(std::array& v) { + ReadFixedLength(v.data(), static_cast(N)); + return *this; + } + + // N.B. Cannot allow for reading to a arbitrary char pointer as the + // length cannot be verified. Also it would overshadow the + // char[N] input operator. + // StreamReader& operator>>(char * v); + + StreamReader& operator>>(std::string& v); + + StreamReader& operator>>(::arrow::Decimal128& v); + + // Input operators for optional fields. + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional& v); + + StreamReader& operator>>(optional<::arrow::Decimal128>& v); + + template + StreamReader& operator>>(optional>& v) { + CheckColumn(Type::FIXED_LEN_BYTE_ARRAY, ConvertedType::NONE, N); + FixedLenByteArray flba; + if (ReadOptional(&flba)) { + v = std::array{}; + std::memcpy(v->data(), flba.ptr, N); + } else { + v.reset(); + } + return *this; + } + + /// \brief Terminate current row and advance to next one. + /// \throws ParquetException if all columns in the row were not + /// read or skipped. + void EndRow(); + + /// \brief Skip the data in the next columns. + /// If the number of columns exceeds the columns remaining on the + /// current row then skipping is terminated - it does _not_ continue + /// skipping columns on the next row. + /// Skipping of columns still requires the use 'EndRow' even if all + /// remaining columns were skipped. + /// \return Number of columns actually skipped. + int64_t SkipColumns(int64_t num_columns_to_skip); + + /// \brief Skip the data in the next rows. + /// Skipping of rows is not allowed if reading of data for the + /// current row is not finished. + /// Skipping of rows will be terminated if the end of file is + /// reached. + /// \return Number of rows actually skipped. + int64_t SkipRows(int64_t num_rows_to_skip); + + protected: + [[noreturn]] void ThrowReadFailedException( + const std::shared_ptr& node); + + template + void Read(T* v) { + const auto& node = nodes_[column_index_]; + auto reader = static_cast(column_readers_[column_index_++].get()); + int16_t def_level; + int16_t rep_level; + int64_t values_read; + + reader->ReadBatch(kBatchSizeOne, &def_level, &rep_level, v, &values_read); + + if (values_read != 1) { + ThrowReadFailedException(node); + } + } + + template + void Read(T* v) { + const auto& node = nodes_[column_index_]; + auto reader = static_cast(column_readers_[column_index_++].get()); + int16_t def_level; + int16_t rep_level; + ReadType tmp; + int64_t values_read; + + reader->ReadBatch(kBatchSizeOne, &def_level, &rep_level, &tmp, &values_read); + + if (values_read == 1) { + *v = tmp; + } else { + ThrowReadFailedException(node); + } + } + + template + void ReadOptional(optional* v) { + const auto& node = nodes_[column_index_]; + auto reader = static_cast(column_readers_[column_index_++].get()); + int16_t def_level; + int16_t rep_level; + ReadType tmp; + int64_t values_read; + + reader->ReadBatch(kBatchSizeOne, &def_level, &rep_level, &tmp, &values_read); + + if (values_read == 1) { + *v = T(tmp); + } else if ((values_read == 0) && (def_level == 0)) { + v->reset(); + } else { + ThrowReadFailedException(node); + } + } + + void ReadFixedLength(char* ptr, int len); + + void Read(ByteArray* v); + + void Read(FixedLenByteArray* v); + + bool ReadOptional(ByteArray* v); + + bool ReadOptional(FixedLenByteArray* v); + + void NextRowGroup(); + + void CheckColumn(Type::type physical_type, ConvertedType::type converted_type, + int length = 0); + + void SkipRowsInColumn(ColumnReader* reader, int64_t num_rows_to_skip); + + void SetEof(); + + private: + std::unique_ptr file_reader_; + std::shared_ptr file_metadata_; + std::shared_ptr row_group_reader_; + std::vector> column_readers_; + std::vector> nodes_; + + bool eof_{true}; + int row_group_index_{0}; + int column_index_{0}; + int64_t current_row_{0}; + int64_t row_group_row_offset_{0}; + + static constexpr int64_t kBatchSizeOne = 1; +}; // namespace parquet + +PARQUET_EXPORT +StreamReader& operator>>(StreamReader&, EndRowType); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/stream_writer.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/stream_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..7637cf7da245c4d61ad7bedf5512b6903189a5b4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/stream_writer.h @@ -0,0 +1,243 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/column_writer.h" +#include "parquet/file_writer.h" + +namespace parquet { + +/// \brief A class for writing Parquet files using an output stream type API. +/// +/// The values given must be of the correct type i.e. the type must +/// match the file schema exactly otherwise a ParquetException will be +/// thrown. +/// +/// The user must explicitly indicate the end of the row using the +/// EndRow() function or EndRow output manipulator. +/// +/// A maximum row group size can be configured, the default size is +/// 512MB. Alternatively the row group size can be set to zero and the +/// user can create new row groups by calling the EndRowGroup() +/// function or using the EndRowGroup output manipulator. +/// +/// Required and optional fields are supported: +/// - Required fields are written using operator<<(T) +/// - Optional fields are written using +/// operator<<(std::optional). +/// +/// Note that operator<<(T) can be used to write optional fields. +/// +/// Similarly, operator<<(std::optional) can be used to +/// write required fields. However if the optional parameter does not +/// have a value (i.e. it is nullopt) then a ParquetException will be +/// raised. +/// +/// Currently there is no support for repeated fields. +/// +class PARQUET_EXPORT StreamWriter { + public: + template + using optional = ::std::optional; + + // N.B. Default constructed objects are not usable. This + // constructor is provided so that the object may be move + // assigned afterwards. + StreamWriter() = default; + + explicit StreamWriter(std::unique_ptr writer); + + ~StreamWriter() = default; + + static void SetDefaultMaxRowGroupSize(int64_t max_size); + + void SetMaxRowGroupSize(int64_t max_size); + + int current_column() const { return column_index_; } + + int64_t current_row() const { return current_row_; } + + int num_columns() const; + + // Moving is possible. + StreamWriter(StreamWriter&&) = default; + StreamWriter& operator=(StreamWriter&&) = default; + + // Copying is not allowed. + StreamWriter(const StreamWriter&) = delete; + StreamWriter& operator=(const StreamWriter&) = delete; + + /// \brief Output operators for required fields. + /// These can also be used for optional fields when a value must be set. + StreamWriter& operator<<(bool v); + + StreamWriter& operator<<(int8_t v); + + StreamWriter& operator<<(uint8_t v); + + StreamWriter& operator<<(int16_t v); + + StreamWriter& operator<<(uint16_t v); + + StreamWriter& operator<<(int32_t v); + + StreamWriter& operator<<(uint32_t v); + + StreamWriter& operator<<(int64_t v); + + StreamWriter& operator<<(uint64_t v); + + StreamWriter& operator<<(const std::chrono::milliseconds& v); + + StreamWriter& operator<<(const std::chrono::microseconds& v); + + StreamWriter& operator<<(float v); + + StreamWriter& operator<<(double v); + + StreamWriter& operator<<(char v); + + /// \brief Helper class to write fixed length strings. + /// This is useful as the standard string view (such as + /// std::string_view) is for variable length data. + struct PARQUET_EXPORT FixedStringView { + FixedStringView() = default; + + explicit FixedStringView(const char* data_ptr); + + FixedStringView(const char* data_ptr, std::size_t data_len); + + const char* data{NULLPTR}; + std::size_t size{0}; + }; + + /// \brief Output operators for fixed length strings. + template + StreamWriter& operator<<(const char (&v)[N]) { + return WriteFixedLength(v, N); + } + template + StreamWriter& operator<<(const std::array& v) { + return WriteFixedLength(v.data(), N); + } + StreamWriter& operator<<(FixedStringView v); + + /// \brief Output operators for variable length strings. + StreamWriter& operator<<(const char* v); + StreamWriter& operator<<(const std::string& v); + StreamWriter& operator<<(::std::string_view v); + + /// \brief Output operator for optional fields. + template + StreamWriter& operator<<(const optional& v) { + if (v) { + return operator<<(*v); + } + SkipOptionalColumn(); + return *this; + } + + /// \brief Skip the next N columns of optional data. If there are + /// less than N columns remaining then the excess columns are + /// ignored. + /// \throws ParquetException if there is an attempt to skip any + /// required column. + /// \return Number of columns actually skipped. + int64_t SkipColumns(int num_columns_to_skip); + + /// \brief Terminate the current row and advance to next one. + /// \throws ParquetException if all columns in the row were not + /// written or skipped. + void EndRow(); + + /// \brief Terminate the current row group and create new one. + void EndRowGroup(); + + protected: + template + StreamWriter& Write(const T v) { + auto writer = static_cast(row_group_writer_->column(column_index_++)); + + writer->WriteBatch(kBatchSizeOne, &kDefLevelOne, &kRepLevelZero, &v); + + if (max_row_group_size_ > 0) { + row_group_size_ += writer->estimated_buffered_value_bytes(); + } + return *this; + } + + StreamWriter& WriteVariableLength(const char* data_ptr, std::size_t data_len); + + StreamWriter& WriteFixedLength(const char* data_ptr, std::size_t data_len); + + void CheckColumn(Type::type physical_type, ConvertedType::type converted_type, + int length = -1); + + /// \brief Skip the next column which must be optional. + /// \throws ParquetException if the next column does not exist or is + /// not optional. + void SkipOptionalColumn(); + + void WriteNullValue(ColumnWriter* writer); + + private: + using node_ptr_type = std::shared_ptr; + + struct null_deleter { + void operator()(void*) {} + }; + + int32_t column_index_{0}; + int64_t current_row_{0}; + int64_t row_group_size_{0}; + int64_t max_row_group_size_{default_row_group_size_}; + + std::unique_ptr file_writer_; + std::unique_ptr row_group_writer_; + std::vector nodes_; + + static constexpr int16_t kDefLevelZero = 0; + static constexpr int16_t kDefLevelOne = 1; + static constexpr int16_t kRepLevelZero = 0; + static constexpr int64_t kBatchSizeOne = 1; + + static int64_t default_row_group_size_; +}; + +struct PARQUET_EXPORT EndRowType {}; +constexpr EndRowType EndRow = {}; + +struct PARQUET_EXPORT EndRowGroupType {}; +constexpr EndRowGroupType EndRowGroup = {}; + +PARQUET_EXPORT +StreamWriter& operator<<(StreamWriter&, EndRowType); + +PARQUET_EXPORT +StreamWriter& operator<<(StreamWriter&, EndRowGroupType); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/test_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..59728cf53f699f0767bdc1d5d792b266378d55f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/test_util.h @@ -0,0 +1,834 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module defines an abstract interface for iterating through pages in a +// Parquet column chunk within a row group. It could be extended in the future +// to iterate through all data pages in all chunks in a file. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "arrow/io/memory.h" +#include "arrow/testing/util.h" +#include "arrow/util/float16.h" + +#include "parquet/column_page.h" +#include "parquet/column_reader.h" +#include "parquet/column_writer.h" +#include "parquet/encoding.h" +#include "parquet/platform.h" + +// https://github.com/google/googletest/pull/2904 might not be available +// in our version of gtest/gmock +#define EXPECT_THROW_THAT(callable, ex_type, property) \ + EXPECT_THROW( \ + try { (callable)(); } catch (const ex_type& err) { \ + EXPECT_THAT(err, (property)); \ + throw; \ + }, \ + ex_type) + +namespace parquet { + +static constexpr int FLBA_LENGTH = 12; + +inline bool operator==(const FixedLenByteArray& a, const FixedLenByteArray& b) { + return 0 == memcmp(a.ptr, b.ptr, FLBA_LENGTH); +} + +namespace test { + +typedef ::testing::Types + ParquetTypes; + +class ParquetTestException : public parquet::ParquetException { + using ParquetException::ParquetException; +}; + +const char* get_data_dir(); +std::string get_bad_data_dir(); + +std::string get_data_file(const std::string& filename, bool is_good = true); + +template +static inline void assert_vector_equal(const std::vector& left, + const std::vector& right) { + ASSERT_EQ(left.size(), right.size()); + + for (size_t i = 0; i < left.size(); ++i) { + ASSERT_EQ(left[i], right[i]) << i; + } +} + +template +static inline bool vector_equal(const std::vector& left, const std::vector& right) { + if (left.size() != right.size()) { + return false; + } + + for (size_t i = 0; i < left.size(); ++i) { + if (left[i] != right[i]) { + std::cerr << "index " << i << " left was " << left[i] << " right was " << right[i] + << std::endl; + return false; + } + } + + return true; +} + +template +static std::vector slice(const std::vector& values, int start, int end) { + if (end < start) { + return std::vector(0); + } + + std::vector out(end - start); + for (int i = start; i < end; ++i) { + out[i - start] = values[i]; + } + return out; +} + +void random_bytes(int n, uint32_t seed, std::vector* out); +void random_bools(int n, double p, uint32_t seed, bool* out); + +template +inline void random_numbers(int n, uint32_t seed, T min_value, T max_value, T* out) { + std::default_random_engine gen(seed); + std::uniform_int_distribution d(min_value, max_value); + for (int i = 0; i < n; ++i) { + out[i] = d(gen); + } +} + +template <> +inline void random_numbers(int n, uint32_t seed, float min_value, float max_value, + float* out) { + std::default_random_engine gen(seed); + std::uniform_real_distribution d(min_value, max_value); + for (int i = 0; i < n; ++i) { + out[i] = d(gen); + } +} + +template <> +inline void random_numbers(int n, uint32_t seed, double min_value, double max_value, + double* out) { + std::default_random_engine gen(seed); + std::uniform_real_distribution d(min_value, max_value); + for (int i = 0; i < n; ++i) { + out[i] = d(gen); + } +} + +void random_Int96_numbers(int n, uint32_t seed, int32_t min_value, int32_t max_value, + Int96* out); + +void random_float16_numbers(int n, uint32_t seed, ::arrow::util::Float16 min_value, + ::arrow::util::Float16 max_value, uint16_t* out); + +void random_fixed_byte_array(int n, uint32_t seed, uint8_t* buf, int len, FLBA* out); + +void random_byte_array(int n, uint32_t seed, uint8_t* buf, ByteArray* out, int min_size, + int max_size); + +void random_byte_array(int n, uint32_t seed, uint8_t* buf, ByteArray* out, int max_size); + +void prefixed_random_byte_array(int n, uint32_t seed, uint8_t* buf, ByteArray* out, + int min_size, int max_size, double prefixed_probability); + +void prefixed_random_byte_array(int n, uint32_t seed, uint8_t* buf, int len, FLBA* out, + double prefixed_probability); + +template +std::shared_ptr EncodeValues(Encoding::type encoding, bool use_dictionary, + const Sequence& values, int length, + const ColumnDescriptor* descr) { + auto encoder = MakeTypedEncoder(encoding, use_dictionary, descr); + encoder->Put(values, length); + return encoder->FlushValues(); +} + +template +static void InitValues(int num_values, uint32_t seed, std::vector& values, + std::vector& buffer) { + random_numbers(num_values, seed, std::numeric_limits::min(), + std::numeric_limits::max(), values.data()); +} + +template +static void InitValues(int num_values, std::vector& values, + std::vector& buffer) { + InitValues(num_values, 0, values, buffer); +} + +template +static void InitDictValues(int num_values, int num_dicts, std::vector& values, + std::vector& buffer) { + int repeat_factor = num_values / num_dicts; + InitValues(num_dicts, values, buffer); + // add some repeated values + for (int j = 1; j < repeat_factor; ++j) { + for (int i = 0; i < num_dicts; ++i) { + std::memcpy(&values[num_dicts * j + i], &values[i], sizeof(T)); + } + } + // computed only dict_per_page * repeat_factor - 1 values < num_values + // compute remaining + for (int i = num_dicts * repeat_factor; i < num_values; ++i) { + std::memcpy(&values[i], &values[i - num_dicts * repeat_factor], sizeof(T)); + } +} + +template <> +inline void InitDictValues(int num_values, int num_dicts, std::vector& values, + std::vector& buffer) { + // No op for bool +} + +class MockPageReader : public PageReader { + public: + explicit MockPageReader(const std::vector>& pages) + : pages_(pages), page_index_(0) {} + + std::shared_ptr NextPage() override { + if (page_index_ == static_cast(pages_.size())) { + // EOS to consumer + return std::shared_ptr(nullptr); + } + return pages_[page_index_++]; + } + + // No-op + void set_max_page_header_size(uint32_t size) override {} + + private: + std::vector> pages_; + int page_index_; +}; + +// TODO(wesm): this is only used for testing for now. Refactor to form part of +// primary file write path +template +class DataPageBuilder { + public: + using c_type = typename Type::c_type; + + // This class writes data and metadata to the passed inputs + explicit DataPageBuilder(ArrowOutputStream* sink) + : sink_(sink), + num_values_(0), + encoding_(Encoding::PLAIN), + definition_level_encoding_(Encoding::RLE), + repetition_level_encoding_(Encoding::RLE), + have_def_levels_(false), + have_rep_levels_(false), + have_values_(false) {} + + void AppendDefLevels(const std::vector& levels, int16_t max_level, + Encoding::type encoding = Encoding::RLE) { + AppendLevels(levels, max_level, encoding); + + num_values_ = std::max(static_cast(levels.size()), num_values_); + definition_level_encoding_ = encoding; + have_def_levels_ = true; + } + + void AppendRepLevels(const std::vector& levels, int16_t max_level, + Encoding::type encoding = Encoding::RLE) { + AppendLevels(levels, max_level, encoding); + + num_values_ = std::max(static_cast(levels.size()), num_values_); + repetition_level_encoding_ = encoding; + have_rep_levels_ = true; + } + + void AppendValues(const ColumnDescriptor* d, const std::vector& values, + Encoding::type encoding = Encoding::PLAIN) { + std::shared_ptr values_sink = EncodeValues( + encoding, false, values.data(), static_cast(values.size()), d); + PARQUET_THROW_NOT_OK(sink_->Write(values_sink->data(), values_sink->size())); + + num_values_ = std::max(static_cast(values.size()), num_values_); + encoding_ = encoding; + have_values_ = true; + } + + int32_t num_values() const { return num_values_; } + + Encoding::type encoding() const { return encoding_; } + + Encoding::type rep_level_encoding() const { return repetition_level_encoding_; } + + Encoding::type def_level_encoding() const { return definition_level_encoding_; } + + private: + ArrowOutputStream* sink_; + + int32_t num_values_; + Encoding::type encoding_; + Encoding::type definition_level_encoding_; + Encoding::type repetition_level_encoding_; + + bool have_def_levels_; + bool have_rep_levels_; + bool have_values_; + + // Used internally for both repetition and definition levels + void AppendLevels(const std::vector& levels, int16_t max_level, + Encoding::type encoding) { + if (encoding != Encoding::RLE) { + ParquetException::NYI("only rle encoding currently implemented"); + } + + std::vector encode_buffer(LevelEncoder::MaxBufferSize( + Encoding::RLE, max_level, static_cast(levels.size()))); + + // We encode into separate memory from the output stream because the + // RLE-encoded bytes have to be preceded in the stream by their absolute + // size. + LevelEncoder encoder; + encoder.Init(encoding, max_level, static_cast(levels.size()), + encode_buffer.data(), static_cast(encode_buffer.size())); + + encoder.Encode(static_cast(levels.size()), levels.data()); + + int32_t rle_bytes = encoder.len(); + PARQUET_THROW_NOT_OK( + sink_->Write(reinterpret_cast(&rle_bytes), sizeof(int32_t))); + PARQUET_THROW_NOT_OK(sink_->Write(encode_buffer.data(), rle_bytes)); + } +}; + +template <> +inline void DataPageBuilder::AppendValues(const ColumnDescriptor* d, + const std::vector& values, + Encoding::type encoding) { + if (encoding != Encoding::PLAIN) { + ParquetException::NYI("only plain encoding currently implemented"); + } + + auto encoder = MakeTypedEncoder(Encoding::PLAIN, false, d); + dynamic_cast(encoder.get()) + ->Put(values, static_cast(values.size())); + std::shared_ptr buffer = encoder->FlushValues(); + PARQUET_THROW_NOT_OK(sink_->Write(buffer->data(), buffer->size())); + + num_values_ = std::max(static_cast(values.size()), num_values_); + encoding_ = encoding; + have_values_ = true; +} + +template +static std::shared_ptr MakeDataPage( + const ColumnDescriptor* d, const std::vector& values, + int num_vals, Encoding::type encoding, const uint8_t* indices, int indices_size, + const std::vector& def_levels, int16_t max_def_level, + const std::vector& rep_levels, int16_t max_rep_level) { + int num_values = 0; + + auto page_stream = CreateOutputStream(); + test::DataPageBuilder page_builder(page_stream.get()); + + if (!rep_levels.empty()) { + page_builder.AppendRepLevels(rep_levels, max_rep_level); + } + if (!def_levels.empty()) { + page_builder.AppendDefLevels(def_levels, max_def_level); + } + + if (encoding == Encoding::PLAIN) { + page_builder.AppendValues(d, values, encoding); + num_values = std::max(page_builder.num_values(), num_vals); + } else { // DICTIONARY PAGES + PARQUET_THROW_NOT_OK(page_stream->Write(indices, indices_size)); + num_values = std::max(page_builder.num_values(), num_vals); + } + + PARQUET_ASSIGN_OR_THROW(auto buffer, page_stream->Finish()); + + return std::make_shared(buffer, num_values, encoding, + page_builder.def_level_encoding(), + page_builder.rep_level_encoding(), buffer->size()); +} + +template +class DictionaryPageBuilder { + public: + typedef typename TYPE::c_type TC; + static constexpr int TN = TYPE::type_num; + using SpecializedEncoder = typename EncodingTraits::Encoder; + + // This class writes data and metadata to the passed inputs + explicit DictionaryPageBuilder(const ColumnDescriptor* d) + : num_dict_values_(0), have_values_(false) { + auto encoder = MakeTypedEncoder(Encoding::PLAIN, true, d); + dict_traits_ = dynamic_cast*>(encoder.get()); + encoder_.reset(dynamic_cast(encoder.release())); + } + + ~DictionaryPageBuilder() {} + + std::shared_ptr AppendValues(const std::vector& values) { + int num_values = static_cast(values.size()); + // Dictionary encoding + encoder_->Put(values.data(), num_values); + num_dict_values_ = dict_traits_->num_entries(); + have_values_ = true; + return encoder_->FlushValues(); + } + + std::shared_ptr WriteDict() { + std::shared_ptr dict_buffer = + AllocateBuffer(::arrow::default_memory_pool(), dict_traits_->dict_encoded_size()); + dict_traits_->WriteDict(dict_buffer->mutable_data()); + return dict_buffer; + } + + int32_t num_values() const { return num_dict_values_; } + + private: + DictEncoder* dict_traits_; + std::unique_ptr encoder_; + int32_t num_dict_values_; + bool have_values_; +}; + +template <> +inline DictionaryPageBuilder::DictionaryPageBuilder( + const ColumnDescriptor* d) { + ParquetException::NYI("only plain encoding currently implemented for boolean"); +} + +template <> +inline std::shared_ptr DictionaryPageBuilder::WriteDict() { + ParquetException::NYI("only plain encoding currently implemented for boolean"); + return nullptr; +} + +template <> +inline std::shared_ptr DictionaryPageBuilder::AppendValues( + const std::vector& values) { + ParquetException::NYI("only plain encoding currently implemented for boolean"); + return nullptr; +} + +template +inline static std::shared_ptr MakeDictPage( + const ColumnDescriptor* d, const std::vector& values, + const std::vector& values_per_page, Encoding::type encoding, + std::vector>& rle_indices) { + test::DictionaryPageBuilder page_builder(d); + int num_pages = static_cast(values_per_page.size()); + int value_start = 0; + + for (int i = 0; i < num_pages; i++) { + rle_indices.push_back(page_builder.AppendValues( + slice(values, value_start, value_start + values_per_page[i]))); + value_start += values_per_page[i]; + } + + auto buffer = page_builder.WriteDict(); + + return std::make_shared(buffer, page_builder.num_values(), + Encoding::PLAIN); +} + +// Given def/rep levels and values create multiple dict pages +template +inline static void PaginateDict(const ColumnDescriptor* d, + const std::vector& values, + const std::vector& def_levels, + int16_t max_def_level, + const std::vector& rep_levels, + int16_t max_rep_level, int num_levels_per_page, + const std::vector& values_per_page, + std::vector>& pages, + Encoding::type encoding = Encoding::RLE_DICTIONARY) { + int num_pages = static_cast(values_per_page.size()); + std::vector> rle_indices; + std::shared_ptr dict_page = + MakeDictPage(d, values, values_per_page, encoding, rle_indices); + pages.push_back(dict_page); + int def_level_start = 0; + int def_level_end = 0; + int rep_level_start = 0; + int rep_level_end = 0; + for (int i = 0; i < num_pages; i++) { + if (max_def_level > 0) { + def_level_start = i * num_levels_per_page; + def_level_end = (i + 1) * num_levels_per_page; + } + if (max_rep_level > 0) { + rep_level_start = i * num_levels_per_page; + rep_level_end = (i + 1) * num_levels_per_page; + } + std::shared_ptr data_page = MakeDataPage( + d, {}, values_per_page[i], encoding, rle_indices[i]->data(), + static_cast(rle_indices[i]->size()), + slice(def_levels, def_level_start, def_level_end), max_def_level, + slice(rep_levels, rep_level_start, rep_level_end), max_rep_level); + pages.push_back(data_page); + } +} + +// Given def/rep levels and values create multiple plain pages +template +static inline void PaginatePlain(const ColumnDescriptor* d, + const std::vector& values, + const std::vector& def_levels, + int16_t max_def_level, + const std::vector& rep_levels, + int16_t max_rep_level, int num_levels_per_page, + const std::vector& values_per_page, + std::vector>& pages, + Encoding::type encoding = Encoding::PLAIN) { + int num_pages = static_cast(values_per_page.size()); + int def_level_start = 0; + int def_level_end = 0; + int rep_level_start = 0; + int rep_level_end = 0; + int value_start = 0; + for (int i = 0; i < num_pages; i++) { + if (max_def_level > 0) { + def_level_start = i * num_levels_per_page; + def_level_end = (i + 1) * num_levels_per_page; + } + if (max_rep_level > 0) { + rep_level_start = i * num_levels_per_page; + rep_level_end = (i + 1) * num_levels_per_page; + } + std::shared_ptr page = MakeDataPage( + d, slice(values, value_start, value_start + values_per_page[i]), + values_per_page[i], encoding, nullptr, 0, + slice(def_levels, def_level_start, def_level_end), max_def_level, + slice(rep_levels, rep_level_start, rep_level_end), max_rep_level); + pages.push_back(page); + value_start += values_per_page[i]; + } +} + +// Generates pages from randomly generated data +template +static inline int MakePages(const ColumnDescriptor* d, int num_pages, int levels_per_page, + std::vector& def_levels, + std::vector& rep_levels, + std::vector& values, + std::vector& buffer, + std::vector>& pages, + Encoding::type encoding = Encoding::PLAIN, + uint32_t seed = 0) { + int num_levels = levels_per_page * num_pages; + int num_values = 0; + int16_t zero = 0; + int16_t max_def_level = d->max_definition_level(); + int16_t max_rep_level = d->max_repetition_level(); + std::vector values_per_page(num_pages, levels_per_page); + // Create definition levels + if (max_def_level > 0 && num_levels != 0) { + def_levels.resize(num_levels); + random_numbers(num_levels, seed, zero, max_def_level, def_levels.data()); + for (int p = 0; p < num_pages; p++) { + int num_values_per_page = 0; + for (int i = 0; i < levels_per_page; i++) { + if (def_levels[i + p * levels_per_page] == max_def_level) { + num_values_per_page++; + num_values++; + } + } + values_per_page[p] = num_values_per_page; + } + } else { + num_values = num_levels; + } + // Create repetition levels + if (max_rep_level > 0 && num_levels != 0) { + rep_levels.resize(num_levels); + // Using a different seed so that def_levels and rep_levels are different. + random_numbers(num_levels, seed + 789, zero, max_rep_level, rep_levels.data()); + // The generated levels are random. Force the very first page to start with a new + // record. + rep_levels[0] = 0; + // For a null value, rep_levels and def_levels are both 0. + // If we have a repeated value right after this, it needs to start with + // rep_level = 0 to indicate a new record. + for (int i = 0; i < num_levels - 1; ++i) { + if (rep_levels[i] == 0 && def_levels[i] == 0) { + rep_levels[i + 1] = 0; + } + } + } + // Create values + values.resize(num_values); + if (encoding == Encoding::PLAIN) { + InitValues(num_values, values, buffer); + PaginatePlain(d, values, def_levels, max_def_level, rep_levels, max_rep_level, + levels_per_page, values_per_page, pages); + } else if (encoding == Encoding::RLE_DICTIONARY || + encoding == Encoding::PLAIN_DICTIONARY) { + // Calls InitValues and repeats the data + InitDictValues(num_values, levels_per_page, values, buffer); + PaginateDict(d, values, def_levels, max_def_level, rep_levels, max_rep_level, + levels_per_page, values_per_page, pages); + } + + return num_values; +} + +// ---------------------------------------------------------------------- +// Test data generation + +template <> +void inline InitValues(int num_values, uint32_t seed, std::vector& values, + std::vector& buffer) { + values = {}; + if (seed == 0) { + seed = static_cast(::arrow::random_seed()); + } + ::arrow::random_is_valid(num_values, 0.5, &values, static_cast(seed)); +} + +template <> +inline void InitValues(int num_values, uint32_t seed, + std::vector& values, + std::vector& buffer) { + int max_byte_array_len = 12; + int num_bytes = static_cast(max_byte_array_len + sizeof(uint32_t)); + size_t nbytes = num_values * num_bytes; + buffer.resize(nbytes); + random_byte_array(num_values, seed, buffer.data(), values.data(), max_byte_array_len); +} + +inline void InitWideByteArrayValues(int num_values, std::vector& values, + std::vector& buffer, int min_len, + int max_len) { + int num_bytes = static_cast(max_len + sizeof(uint32_t)); + size_t nbytes = num_values * num_bytes; + buffer.resize(nbytes); + random_byte_array(num_values, 0, buffer.data(), values.data(), min_len, max_len); +} + +template <> +inline void InitValues(int num_values, uint32_t seed, std::vector& values, + std::vector& buffer) { + size_t nbytes = num_values * FLBA_LENGTH; + buffer.resize(nbytes); + random_fixed_byte_array(num_values, seed, buffer.data(), FLBA_LENGTH, values.data()); +} + +template <> +inline void InitValues(int num_values, uint32_t seed, std::vector& values, + std::vector& buffer) { + random_Int96_numbers(num_values, seed, std::numeric_limits::min(), + std::numeric_limits::max(), values.data()); +} + +inline std::string TestColumnName(int i) { + std::stringstream col_name; + col_name << "column_" << i; + return col_name.str(); +} + +// This class lives here because of its dependency on the InitValues specializations. +template +class PrimitiveTypedTest : public ::testing::Test { + public: + using c_type = typename TestType::c_type; + + void SetUpSchema(Repetition::type repetition, int num_columns = 1) { + std::vector fields; + + for (int i = 0; i < num_columns; ++i) { + std::string name = TestColumnName(i); + fields.push_back(schema::PrimitiveNode::Make(name, repetition, TestType::type_num, + ConvertedType::NONE, FLBA_LENGTH)); + } + node_ = schema::GroupNode::Make("schema", Repetition::REQUIRED, fields); + schema_.Init(node_); + } + + void GenerateData(int64_t num_values, uint32_t seed = 0); + void SetupValuesOut(int64_t num_values); + void SyncValuesOut(); + + protected: + schema::NodePtr node_; + SchemaDescriptor schema_; + + // Input buffers + std::vector values_; + + std::vector def_levels_; + + std::vector buffer_; + // Pointer to the values, needed as we cannot use std::vector::data() + c_type* values_ptr_; + std::vector bool_buffer_; + + // Output buffers + std::vector values_out_; + std::vector bool_buffer_out_; + c_type* values_out_ptr_; +}; + +template +inline void PrimitiveTypedTest::SyncValuesOut() {} + +template <> +inline void PrimitiveTypedTest::SyncValuesOut() { + std::vector::const_iterator source_iterator = bool_buffer_out_.begin(); + std::vector::iterator destination_iterator = values_out_.begin(); + while (source_iterator != bool_buffer_out_.end()) { + *destination_iterator++ = *source_iterator++ != 0; + } +} + +template +inline void PrimitiveTypedTest::SetupValuesOut(int64_t num_values) { + values_out_.clear(); + values_out_.resize(num_values); + values_out_ptr_ = values_out_.data(); +} + +template <> +inline void PrimitiveTypedTest::SetupValuesOut(int64_t num_values) { + values_out_.clear(); + values_out_.resize(num_values); + + bool_buffer_out_.clear(); + bool_buffer_out_.resize(num_values); + // Write once to all values so we can copy it without getting Valgrind errors + // about uninitialised values. + std::fill(bool_buffer_out_.begin(), bool_buffer_out_.end(), true); + values_out_ptr_ = reinterpret_cast(bool_buffer_out_.data()); +} + +template +inline void PrimitiveTypedTest::GenerateData(int64_t num_values, + uint32_t seed) { + def_levels_.resize(num_values); + values_.resize(num_values); + + InitValues(static_cast(num_values), seed, values_, buffer_); + values_ptr_ = values_.data(); + + std::fill(def_levels_.begin(), def_levels_.end(), 1); +} + +template <> +inline void PrimitiveTypedTest::GenerateData(int64_t num_values, + uint32_t seed) { + def_levels_.resize(num_values); + values_.resize(num_values); + + InitValues(static_cast(num_values), seed, values_, buffer_); + bool_buffer_.resize(num_values); + std::copy(values_.begin(), values_.end(), bool_buffer_.begin()); + values_ptr_ = reinterpret_cast(bool_buffer_.data()); + + std::fill(def_levels_.begin(), def_levels_.end(), 1); +} + +// ---------------------------------------------------------------------- +// test data generation + +template +inline void GenerateData(int num_values, T* out, std::vector* heap) { + // seed the prng so failure is deterministic + random_numbers(num_values, 0, std::numeric_limits::min(), + std::numeric_limits::max(), out); +} + +template +inline void GenerateBoundData(int num_values, T* out, T min, T max, + std::vector* heap) { + // seed the prng so failure is deterministic + random_numbers(num_values, 0, min, max, out); +} + +template <> +inline void GenerateData(int num_values, bool* out, std::vector* heap) { + // seed the prng so failure is deterministic + random_bools(num_values, 0.5, 0, out); +} + +template <> +inline void GenerateData(int num_values, Int96* out, std::vector* heap) { + // seed the prng so failure is deterministic + random_Int96_numbers(num_values, 0, std::numeric_limits::min(), + std::numeric_limits::max(), out); +} + +template <> +inline void GenerateData(int num_values, ByteArray* out, + std::vector* heap) { + int max_byte_array_len = 12; + heap->resize(num_values * max_byte_array_len); + // seed the prng so failure is deterministic + random_byte_array(num_values, 0, heap->data(), out, 2, max_byte_array_len); +} + +// Generate ByteArray or FLBA data where there is a given probability +// for each value to share a common prefix with its predecessor. +// This is useful to exercise prefix-based encodings such as DELTA_BYTE_ARRAY. +template +inline void GeneratePrefixedData(int num_values, T* out, std::vector* heap, + double prefixed_probability); + +template <> +inline void GeneratePrefixedData(int num_values, ByteArray* out, + std::vector* heap, + double prefixed_probability) { + int max_byte_array_len = 12; + heap->resize(num_values * max_byte_array_len); + // seed the prng so failure is deterministic + prefixed_random_byte_array(num_values, /*seed=*/0, heap->data(), out, /*min_size=*/2, + /*max_size=*/max_byte_array_len, prefixed_probability); +} + +static constexpr int kGenerateDataFLBALength = 8; + +template <> +inline void GeneratePrefixedData(int num_values, FLBA* out, + std::vector* heap, + double prefixed_probability) { + heap->resize(num_values * kGenerateDataFLBALength); + // seed the prng so failure is deterministic + prefixed_random_byte_array(num_values, /*seed=*/0, heap->data(), + kGenerateDataFLBALength, out, prefixed_probability); +} + +template <> +inline void GenerateData(int num_values, FLBA* out, std::vector* heap) { + heap->resize(num_values * kGenerateDataFLBALength); + // seed the prng so failure is deterministic + random_fixed_byte_array(num_values, 0, heap->data(), kGenerateDataFLBALength, out); +} + +} // namespace test +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..da0d0f7bdee96c944410ed19d7f9c28fb48bea50 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/type_fwd.h @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace parquet { + +/// \brief Feature selection when writing Parquet files +/// +/// `ParquetVersion::type` governs which data types are allowed and how they +/// are represented. For example, uint32_t data will be written differently +/// depending on this value (as INT64 for PARQUET_1_0, as UINT32 for other +/// versions). +/// +/// However, some features - such as compression algorithms, encryption, +/// or the improved "v2" data page format - must be enabled separately in +/// ArrowWriterProperties. +struct ParquetVersion { + enum type : int { + /// Enable only pre-2.2 Parquet format features when writing + /// + /// This setting is useful for maximum compatibility with legacy readers. + /// Note that logical types may still be emitted, as long they have a + /// corresponding converted type. + PARQUET_1_0, + + /// DEPRECATED: Enable Parquet format 2.6 features + /// + /// This misleadingly named enum value is roughly similar to PARQUET_2_6. + PARQUET_2_0 ARROW_DEPRECATED_ENUM_VALUE("use PARQUET_2_4 or PARQUET_2_6 " + "for fine-grained feature selection"), + + /// Enable Parquet format 2.4 and earlier features when writing + /// + /// This enables UINT32 as well as logical types which don't have + /// a corresponding converted type. + /// + /// Note: Parquet format 2.4.0 was released in October 2017. + PARQUET_2_4, + + /// Enable Parquet format 2.6 and earlier features when writing + /// + /// This enables the NANOS time unit in addition to the PARQUET_2_4 + /// features. + /// + /// Note: Parquet format 2.6.0 was released in September 2018. + PARQUET_2_6, + + /// Enable latest Parquet format 2.x features + /// + /// This value is equal to the greatest 2.x version supported by + /// this library. + PARQUET_2_LATEST = PARQUET_2_6 + }; +}; + +class FileMetaData; +class RowGroupMetaData; + +class ColumnDescriptor; +class SchemaDescriptor; + +class ReaderProperties; +class ArrowReaderProperties; + +class WriterProperties; +class WriterPropertiesBuilder; +class ArrowWriterProperties; +class ArrowWriterPropertiesBuilder; + +namespace arrow { + +class FileWriter; +class FileReader; + +} // namespace arrow +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/types.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/types.h new file mode 100644 index 0000000000000000000000000000000000000000..38529bceae85f27cb8160707bd5fe4469a5b6f72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/types.h @@ -0,0 +1,814 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/platform.h" +#include "parquet/type_fwd.h" +#include "parquet/windows_fixup.h" // for OPTIONAL + +namespace arrow::util { + +class Codec; + +} // namespace arrow::util + +namespace parquet { + +// ---------------------------------------------------------------------- +// Metadata enums to match Thrift metadata +// +// The reason we maintain our own enums is to avoid transitive dependency on +// the compiled Thrift headers (and thus thrift/Thrift.h) for users of the +// public API. After building parquet-cpp, you should not need to include +// Thrift headers in your application. This means some boilerplate to convert +// between our types and Parquet's Thrift types. +// +// We can also add special values like NONE to distinguish between metadata +// values being set and not set. As an example consider ConvertedType and +// CompressionCodec + +// Mirrors parquet::Type +struct Type { + enum type { + BOOLEAN = 0, + INT32 = 1, + INT64 = 2, + INT96 = 3, + FLOAT = 4, + DOUBLE = 5, + BYTE_ARRAY = 6, + FIXED_LEN_BYTE_ARRAY = 7, + // Should always be last element. + UNDEFINED = 8 + }; +}; + +// Mirrors parquet::ConvertedType +struct ConvertedType { + enum type { + NONE, // Not a real converted type, but means no converted type is specified + UTF8, + MAP, + MAP_KEY_VALUE, + LIST, + ENUM, + DECIMAL, + DATE, + TIME_MILLIS, + TIME_MICROS, + TIMESTAMP_MILLIS, + TIMESTAMP_MICROS, + UINT_8, + UINT_16, + UINT_32, + UINT_64, + INT_8, + INT_16, + INT_32, + INT_64, + JSON, + BSON, + INTERVAL, + // DEPRECATED INVALID ConvertedType for all-null data. + // Only useful for reading legacy files written out by interim Parquet C++ releases. + // For writing, always emit LogicalType::Null instead. + // See PARQUET-1990. + NA = 25, + UNDEFINED = 26 // Not a real converted type; should always be last element + }; +}; + +// forward declaration +namespace format { + +class LogicalType; + +} + +// Mirrors parquet::FieldRepetitionType +struct Repetition { + enum type { REQUIRED = 0, OPTIONAL = 1, REPEATED = 2, /*Always last*/ UNDEFINED = 3 }; +}; + +// Reference: +// parquet-mr/parquet-hadoop/src/main/java/org/apache/parquet/ +// format/converter/ParquetMetadataConverter.java +// Sort order for page and column statistics. Types are associated with sort +// orders (e.g., UTF8 columns should use UNSIGNED) and column stats are +// aggregated using a sort order. As of parquet-format version 2.3.1, the +// order used to aggregate stats is always SIGNED and is not stored in the +// Parquet file. These stats are discarded for types that need unsigned. +// See PARQUET-686. +struct SortOrder { + enum type { SIGNED, UNSIGNED, UNKNOWN }; +}; + +namespace schema { + +struct DecimalMetadata { + bool isset; + int32_t scale; + int32_t precision; +}; + +} // namespace schema + +/// \brief Implementation of parquet.thrift LogicalType types. +class PARQUET_EXPORT LogicalType { + public: + struct Type { + enum type { + UNDEFINED = 0, // Not a real logical type + STRING = 1, + MAP, + LIST, + ENUM, + DECIMAL, + DATE, + TIME, + TIMESTAMP, + INTERVAL, + INT, + NIL, // Thrift NullType: annotates data that is always null + JSON, + BSON, + UUID, + FLOAT16, + NONE // Not a real logical type; should always be last element + }; + }; + + struct TimeUnit { + enum unit { UNKNOWN = 0, MILLIS = 1, MICROS, NANOS }; + }; + + /// \brief If possible, return a logical type equivalent to the given legacy + /// converted type (and decimal metadata if applicable). + static std::shared_ptr FromConvertedType( + const parquet::ConvertedType::type converted_type, + const parquet::schema::DecimalMetadata converted_decimal_metadata = {false, -1, + -1}); + + /// \brief Return the logical type represented by the Thrift intermediary object. + static std::shared_ptr FromThrift( + const parquet::format::LogicalType& thrift_logical_type); + + /// \brief Return the explicitly requested logical type. + static std::shared_ptr String(); + static std::shared_ptr Map(); + static std::shared_ptr List(); + static std::shared_ptr Enum(); + static std::shared_ptr Decimal(int32_t precision, int32_t scale = 0); + static std::shared_ptr Date(); + static std::shared_ptr Time(bool is_adjusted_to_utc, + LogicalType::TimeUnit::unit time_unit); + + /// \brief Create a Timestamp logical type + /// \param[in] is_adjusted_to_utc set true if the data is UTC-normalized + /// \param[in] time_unit the resolution of the timestamp + /// \param[in] is_from_converted_type if true, the timestamp was generated + /// by translating a legacy converted type of TIMESTAMP_MILLIS or + /// TIMESTAMP_MICROS. Default is false. + /// \param[in] force_set_converted_type if true, always set the + /// legacy ConvertedType TIMESTAMP_MICROS and TIMESTAMP_MILLIS + /// metadata. Default is false + static std::shared_ptr Timestamp( + bool is_adjusted_to_utc, LogicalType::TimeUnit::unit time_unit, + bool is_from_converted_type = false, bool force_set_converted_type = false); + + static std::shared_ptr Interval(); + static std::shared_ptr Int(int bit_width, bool is_signed); + + /// \brief Create a logical type for data that's always null + /// + /// Any physical type can be annotated with this logical type. + static std::shared_ptr Null(); + + static std::shared_ptr JSON(); + static std::shared_ptr BSON(); + static std::shared_ptr UUID(); + static std::shared_ptr Float16(); + + /// \brief Create a placeholder for when no logical type is specified + static std::shared_ptr None(); + + /// \brief Return true if this logical type is consistent with the given underlying + /// physical type. + bool is_applicable(parquet::Type::type primitive_type, + int32_t primitive_length = -1) const; + + /// \brief Return true if this logical type is equivalent to the given legacy converted + /// type (and decimal metadata if applicable). + bool is_compatible(parquet::ConvertedType::type converted_type, + parquet::schema::DecimalMetadata converted_decimal_metadata = { + false, -1, -1}) const; + + /// \brief If possible, return the legacy converted type (and decimal metadata if + /// applicable) equivalent to this logical type. + parquet::ConvertedType::type ToConvertedType( + parquet::schema::DecimalMetadata* out_decimal_metadata) const; + + /// \brief Return a printable representation of this logical type. + std::string ToString() const; + + /// \brief Return a JSON representation of this logical type. + std::string ToJSON() const; + + /// \brief Return a serializable Thrift object for this logical type. + parquet::format::LogicalType ToThrift() const; + + /// \brief Return true if the given logical type is equivalent to this logical type. + bool Equals(const LogicalType& other) const; + + /// \brief Return the enumerated type of this logical type. + LogicalType::Type::type type() const; + + /// \brief Return the appropriate sort order for this logical type. + SortOrder::type sort_order() const; + + // Type checks ... + bool is_string() const; + bool is_map() const; + bool is_list() const; + bool is_enum() const; + bool is_decimal() const; + bool is_date() const; + bool is_time() const; + bool is_timestamp() const; + bool is_interval() const; + bool is_int() const; + bool is_null() const; + bool is_JSON() const; + bool is_BSON() const; + bool is_UUID() const; + bool is_float16() const; + bool is_none() const; + /// \brief Return true if this logical type is of a known type. + bool is_valid() const; + bool is_invalid() const; + /// \brief Return true if this logical type is suitable for a schema GroupNode. + bool is_nested() const; + bool is_nonnested() const; + /// \brief Return true if this logical type is included in the Thrift output for its + /// node. + bool is_serialized() const; + + LogicalType(const LogicalType&) = delete; + LogicalType& operator=(const LogicalType&) = delete; + virtual ~LogicalType() noexcept; + + protected: + LogicalType(); + + class Impl; + std::unique_ptr impl_; +}; + +/// \brief Allowed for physical type BYTE_ARRAY, must be encoded as UTF-8. +class PARQUET_EXPORT StringLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + StringLogicalType() = default; +}; + +/// \brief Allowed for group nodes only. +class PARQUET_EXPORT MapLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + MapLogicalType() = default; +}; + +/// \brief Allowed for group nodes only. +class PARQUET_EXPORT ListLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + ListLogicalType() = default; +}; + +/// \brief Allowed for physical type BYTE_ARRAY, must be encoded as UTF-8. +class PARQUET_EXPORT EnumLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + EnumLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32, INT64, FIXED_LEN_BYTE_ARRAY, or BYTE_ARRAY, +/// depending on the precision. +class PARQUET_EXPORT DecimalLogicalType : public LogicalType { + public: + static std::shared_ptr Make(int32_t precision, int32_t scale = 0); + int32_t precision() const; + int32_t scale() const; + + private: + DecimalLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32. +class PARQUET_EXPORT DateLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + DateLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32 (for MILLIS) or INT64 (for MICROS and NANOS). +class PARQUET_EXPORT TimeLogicalType : public LogicalType { + public: + static std::shared_ptr Make(bool is_adjusted_to_utc, + LogicalType::TimeUnit::unit time_unit); + bool is_adjusted_to_utc() const; + LogicalType::TimeUnit::unit time_unit() const; + + private: + TimeLogicalType() = default; +}; + +/// \brief Allowed for physical type INT64. +class PARQUET_EXPORT TimestampLogicalType : public LogicalType { + public: + static std::shared_ptr Make(bool is_adjusted_to_utc, + LogicalType::TimeUnit::unit time_unit, + bool is_from_converted_type = false, + bool force_set_converted_type = false); + bool is_adjusted_to_utc() const; + LogicalType::TimeUnit::unit time_unit() const; + + /// \brief If true, will not set LogicalType in Thrift metadata + bool is_from_converted_type() const; + + /// \brief If true, will set ConvertedType for micros and millis + /// resolution in legacy ConvertedType Thrift metadata + bool force_set_converted_type() const; + + private: + TimestampLogicalType() = default; +}; + +/// \brief Allowed for physical type FIXED_LEN_BYTE_ARRAY with length 12 +class PARQUET_EXPORT IntervalLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + IntervalLogicalType() = default; +}; + +/// \brief Allowed for physical type INT32 (for bit widths 8, 16, and 32) and INT64 +/// (for bit width 64). +class PARQUET_EXPORT IntLogicalType : public LogicalType { + public: + static std::shared_ptr Make(int bit_width, bool is_signed); + int bit_width() const; + bool is_signed() const; + + private: + IntLogicalType() = default; +}; + +/// \brief Allowed for any physical type. +class PARQUET_EXPORT NullLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + NullLogicalType() = default; +}; + +/// \brief Allowed for physical type BYTE_ARRAY. +class PARQUET_EXPORT JSONLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + JSONLogicalType() = default; +}; + +/// \brief Allowed for physical type BYTE_ARRAY. +class PARQUET_EXPORT BSONLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + BSONLogicalType() = default; +}; + +/// \brief Allowed for physical type FIXED_LEN_BYTE_ARRAY with length 16, +/// must encode raw UUID bytes. +class PARQUET_EXPORT UUIDLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + UUIDLogicalType() = default; +}; + +/// \brief Allowed for physical type FIXED_LEN_BYTE_ARRAY with length 2, +/// must encode raw FLOAT16 bytes. +class PARQUET_EXPORT Float16LogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + Float16LogicalType() = default; +}; + +/// \brief Allowed for any physical type. +class PARQUET_EXPORT NoLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + NoLogicalType() = default; +}; + +// Internal API, for unrecognized logical types +class PARQUET_EXPORT UndefinedLogicalType : public LogicalType { + public: + static std::shared_ptr Make(); + + private: + UndefinedLogicalType() = default; +}; + +// Data encodings. Mirrors parquet::Encoding +struct Encoding { + enum type { + PLAIN = 0, + PLAIN_DICTIONARY = 2, + RLE = 3, + BIT_PACKED = 4, + DELTA_BINARY_PACKED = 5, + DELTA_LENGTH_BYTE_ARRAY = 6, + DELTA_BYTE_ARRAY = 7, + RLE_DICTIONARY = 8, + BYTE_STREAM_SPLIT = 9, + // Should always be last element (except UNKNOWN) + UNDEFINED = 10, + UNKNOWN = 999 + }; +}; + +// Exposed data encodings. It is the encoding of the data read from the file, +// rather than the encoding of the data in the file. E.g., the data encoded as +// RLE_DICTIONARY in the file can be read as dictionary indices by RLE +// decoding, in which case the data read from the file is DICTIONARY encoded. +enum class ExposedEncoding { + NO_ENCODING = 0, // data is not encoded, i.e. already decoded during reading + DICTIONARY = 1 +}; + +/// \brief Return true if Parquet supports indicated compression type +PARQUET_EXPORT +bool IsCodecSupported(Compression::type codec); + +PARQUET_EXPORT +std::unique_ptr GetCodec(Compression::type codec); + +PARQUET_EXPORT +std::unique_ptr GetCodec(Compression::type codec, + const CodecOptions& codec_options); + +PARQUET_EXPORT +std::unique_ptr GetCodec(Compression::type codec, int compression_level); + +struct ParquetCipher { + enum type { AES_GCM_V1 = 0, AES_GCM_CTR_V1 = 1 }; +}; + +struct AadMetadata { + std::string aad_prefix; + std::string aad_file_unique; + bool supply_aad_prefix; +}; + +struct EncryptionAlgorithm { + ParquetCipher::type algorithm; + AadMetadata aad; +}; + +// parquet::PageType +struct PageType { + enum type { + DATA_PAGE, + INDEX_PAGE, + DICTIONARY_PAGE, + DATA_PAGE_V2, + // Should always be last element + UNDEFINED + }; +}; + +bool PageCanUseChecksum(PageType::type pageType); + +class ColumnOrder { + public: + enum type { UNDEFINED, TYPE_DEFINED_ORDER }; + explicit ColumnOrder(ColumnOrder::type column_order) : column_order_(column_order) {} + // Default to Type Defined Order + ColumnOrder() : column_order_(type::TYPE_DEFINED_ORDER) {} + ColumnOrder::type get_order() { return column_order_; } + + static ColumnOrder undefined_; + static ColumnOrder type_defined_; + + private: + ColumnOrder::type column_order_; +}; + +/// \brief BoundaryOrder is a proxy around format::BoundaryOrder. +struct BoundaryOrder { + enum type { + Unordered = 0, + Ascending = 1, + Descending = 2, + // Should always be last element + UNDEFINED = 3 + }; +}; + +/// \brief SortingColumn is a proxy around format::SortingColumn. +struct PARQUET_EXPORT SortingColumn { + // The column index (in this row group) + int32_t column_idx; + + // If true, indicates this column is sorted in descending order. + bool descending; + + // If true, nulls will come before non-null values, otherwise, nulls go at the end. + bool nulls_first; +}; + +inline bool operator==(const SortingColumn& left, const SortingColumn& right) { + return left.nulls_first == right.nulls_first && left.descending == right.descending && + left.column_idx == right.column_idx; +} + +inline bool operator!=(const SortingColumn& left, const SortingColumn& right) { + return !(left == right); +} + +// ---------------------------------------------------------------------- + +struct ByteArray { + ByteArray() : len(0), ptr(NULLPTR) {} + ByteArray(uint32_t len, const uint8_t* ptr) : len(len), ptr(ptr) {} + + ByteArray(::std::string_view view) // NOLINT implicit conversion + : ByteArray(static_cast(view.size()), + reinterpret_cast(view.data())) {} + + explicit operator std::string_view() const { + return std::string_view{reinterpret_cast(ptr), len}; + } + + uint32_t len; + const uint8_t* ptr; +}; + +inline bool operator==(const ByteArray& left, const ByteArray& right) { + return left.len == right.len && + (left.len == 0 || std::memcmp(left.ptr, right.ptr, left.len) == 0); +} + +inline bool operator!=(const ByteArray& left, const ByteArray& right) { + return !(left == right); +} + +struct FixedLenByteArray { + FixedLenByteArray() : ptr(NULLPTR) {} + explicit FixedLenByteArray(const uint8_t* ptr) : ptr(ptr) {} + const uint8_t* ptr; +}; + +using FLBA = FixedLenByteArray; + +// Julian day at unix epoch. +// +// The Julian Day Number (JDN) is the integer assigned to a whole solar day in +// the Julian day count starting from noon Universal time, with Julian day +// number 0 assigned to the day starting at noon on Monday, January 1, 4713 BC, +// proleptic Julian calendar (November 24, 4714 BC, in the proleptic Gregorian +// calendar), +constexpr int64_t kJulianToUnixEpochDays = INT64_C(2440588); +constexpr int64_t kSecondsPerDay = INT64_C(60 * 60 * 24); +constexpr int64_t kMillisecondsPerDay = kSecondsPerDay * INT64_C(1000); +constexpr int64_t kMicrosecondsPerDay = kMillisecondsPerDay * INT64_C(1000); +constexpr int64_t kNanosecondsPerDay = kMicrosecondsPerDay * INT64_C(1000); + +MANUALLY_ALIGNED_STRUCT(1) Int96 { uint32_t value[3]; }; +STRUCT_END(Int96, 12); + +inline bool operator==(const Int96& left, const Int96& right) { + return std::equal(left.value, left.value + 3, right.value); +} + +inline bool operator!=(const Int96& left, const Int96& right) { return !(left == right); } + +static inline std::string ByteArrayToString(const ByteArray& a) { + return std::string(reinterpret_cast(a.ptr), a.len); +} + +static inline void Int96SetNanoSeconds(parquet::Int96& i96, int64_t nanoseconds) { + std::memcpy(&i96.value, &nanoseconds, sizeof(nanoseconds)); +} + +struct DecodedInt96 { + uint64_t days_since_epoch; + uint64_t nanoseconds; +}; + +static inline DecodedInt96 DecodeInt96Timestamp(const parquet::Int96& i96) { + // We do the computations in the unsigned domain to avoid unsigned behaviour + // on overflow. + DecodedInt96 result; + result.days_since_epoch = i96.value[2] - static_cast(kJulianToUnixEpochDays); + result.nanoseconds = 0; + + memcpy(&result.nanoseconds, &i96.value, sizeof(uint64_t)); + return result; +} + +static inline int64_t Int96GetNanoSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + return static_cast(decoded.days_since_epoch * kNanosecondsPerDay + + decoded.nanoseconds); +} + +static inline int64_t Int96GetMicroSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + uint64_t microseconds = decoded.nanoseconds / static_cast(1000); + return static_cast(decoded.days_since_epoch * kMicrosecondsPerDay + + microseconds); +} + +static inline int64_t Int96GetMilliSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + uint64_t milliseconds = decoded.nanoseconds / static_cast(1000000); + return static_cast(decoded.days_since_epoch * kMillisecondsPerDay + + milliseconds); +} + +static inline int64_t Int96GetSeconds(const parquet::Int96& i96) { + const auto decoded = DecodeInt96Timestamp(i96); + uint64_t seconds = decoded.nanoseconds / static_cast(1000000000); + return static_cast(decoded.days_since_epoch * kSecondsPerDay + seconds); +} + +static inline std::string Int96ToString(const Int96& a) { + std::ostringstream result; + std::copy(a.value, a.value + 3, std::ostream_iterator(result, " ")); + return result.str(); +} + +static inline std::string FixedLenByteArrayToString(const FixedLenByteArray& a, int len) { + std::ostringstream result; + std::copy(a.ptr, a.ptr + len, std::ostream_iterator(result, " ")); + return result.str(); +} + +template +struct type_traits {}; + +template <> +struct type_traits { + using value_type = bool; + + static constexpr int value_byte_size = 1; + static constexpr const char* printf_code = "d"; +}; + +template <> +struct type_traits { + using value_type = int32_t; + + static constexpr int value_byte_size = 4; + static constexpr const char* printf_code = "d"; +}; + +template <> +struct type_traits { + using value_type = int64_t; + + static constexpr int value_byte_size = 8; + static constexpr const char* printf_code = + (sizeof(long) == 64) ? "ld" : "lld"; // NOLINT: runtime/int +}; + +template <> +struct type_traits { + using value_type = Int96; + + static constexpr int value_byte_size = 12; + static constexpr const char* printf_code = "s"; +}; + +template <> +struct type_traits { + using value_type = float; + + static constexpr int value_byte_size = 4; + static constexpr const char* printf_code = "f"; +}; + +template <> +struct type_traits { + using value_type = double; + + static constexpr int value_byte_size = 8; + static constexpr const char* printf_code = "lf"; +}; + +template <> +struct type_traits { + using value_type = ByteArray; + + static constexpr int value_byte_size = sizeof(ByteArray); + static constexpr const char* printf_code = "s"; +}; + +template <> +struct type_traits { + using value_type = FixedLenByteArray; + + static constexpr int value_byte_size = sizeof(FixedLenByteArray); + static constexpr const char* printf_code = "s"; +}; + +template +struct PhysicalType { + using c_type = typename type_traits::value_type; + static constexpr Type::type type_num = TYPE; +}; + +using BooleanType = PhysicalType; +using Int32Type = PhysicalType; +using Int64Type = PhysicalType; +using Int96Type = PhysicalType; +using FloatType = PhysicalType; +using DoubleType = PhysicalType; +using ByteArrayType = PhysicalType; +using FLBAType = PhysicalType; + +template +inline std::string format_fwf(int width) { + std::stringstream ss; + ss << "%-" << width << type_traits::printf_code; + return ss.str(); +} + +PARQUET_EXPORT std::string EncodingToString(Encoding::type t); + +PARQUET_EXPORT std::string ConvertedTypeToString(ConvertedType::type t); + +PARQUET_EXPORT std::string TypeToString(Type::type t); + +PARQUET_EXPORT std::string TypeToString(Type::type t, int type_length); + +PARQUET_EXPORT std::string FormatStatValue(Type::type parquet_type, + ::std::string_view val); + +PARQUET_EXPORT int GetTypeByteSize(Type::type t); + +PARQUET_EXPORT SortOrder::type DefaultSortOrder(Type::type primitive); + +PARQUET_EXPORT SortOrder::type GetSortOrder(ConvertedType::type converted, + Type::type primitive); + +PARQUET_EXPORT SortOrder::type GetSortOrder( + const std::shared_ptr& logical_type, Type::type primitive); + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/windows_compatibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/windows_compatibility.h new file mode 100644 index 0000000000000000000000000000000000000000..fe84d8c6ce06e12b2b50563997b40337f691ee53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/windows_compatibility.h @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/windows_compatibility.h" +#include "parquet/windows_fixup.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/windows_fixup.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/windows_fixup.h new file mode 100644 index 0000000000000000000000000000000000000000..ce44480c5732e760841c2c29b0bede4159b6e33e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/windows_fixup.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This header needs to be included multiple times. + +#include "arrow/util/windows_fixup.h" + +#ifdef _WIN32 + +// parquet.thrift's OPTIONAL RepetitionType conflicts with a Windows #define +#ifdef OPTIONAL +#undef OPTIONAL +#endif + +#endif // _WIN32 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/xxhasher.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/xxhasher.h new file mode 100644 index 0000000000000000000000000000000000000000..a54f287883e006e9cd6d9aeeb2efeb1d6f9db2df --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/xxhasher.h @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "parquet/hasher.h" +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace parquet { + +class PARQUET_EXPORT XxHasher : public Hasher { + public: + uint64_t Hash(int32_t value) const override; + uint64_t Hash(int64_t value) const override; + uint64_t Hash(float value) const override; + uint64_t Hash(double value) const override; + uint64_t Hash(const Int96* value) const override; + uint64_t Hash(const ByteArray* value) const override; + uint64_t Hash(const FLBA* val, uint32_t len) const override; + + void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const override; + void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const override; + void Hashes(const float* values, int num_values, uint64_t* hashes) const override; + void Hashes(const double* values, int num_values, uint64_t* hashes) const override; + void Hashes(const Int96* values, int num_values, uint64_t* hashes) const override; + void Hashes(const ByteArray* values, int num_values, uint64_t* hashes) const override; + void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const override; + + static constexpr int kParquetBloomXxHashSeed = 0; +}; + +} // namespace parquet