diff --git a/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ddf3d5fbac8cfe893398271c5905d6f71ed03b41 --- /dev/null +++ b/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f51e6253f50ae9eb088f7dca0a13e0ffa01def0b96eb883d45ca5f1a31129156 +size 33555612 diff --git a/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d495ec5e8bda2ef1b27f800ccc83716c3796de2d --- /dev/null +++ b/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b49e9761c4b8ccf8c6e14ffc1ce8f21ec363502d008eb46d842207dc1a91c99 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/24.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/24.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..bc6d69325d417dc84cb8cf60e0fe2d09cc7c94b8 --- /dev/null +++ b/ckpts/universal/global_step80/zero/24.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdb8251b987c9ee461212dc5703ad28b984fdbedfb38dd8442e1ffaf78f72ea4 +size 16778411 diff --git a/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..ee245dc9cc673d8da5238d1ffcefa2d79e217733 --- /dev/null +++ b/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bd2a9b606031b56b575fcdb1b3d3c63d1bf21b34bc2d43fcd8bd03dccd031fe +size 33555612 diff --git a/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b9a9eb7b210a4999e668d4b3791097cf99d122fe --- /dev/null +++ b/ckpts/universal/global_step80/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bf572747c0c2d138a6dc95595ff956401865f72cbb93ced955383e53cd62dc0 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/8.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/8.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1b70cab72c84703d731744704f53dd166c5d9f83 --- /dev/null +++ b/ckpts/universal/global_step80/zero/8.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3e00cb78804dd24f01a1463321d5c4ca0ad37a567fa7c36b584a3b4530c6a44 +size 16778396 diff --git a/ckpts/universal/global_step80/zero/8.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/8.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..6822f1f5a13c93b262077d8f0054104849bf7ee7 --- /dev/null +++ b/ckpts/universal/global_step80/zero/8.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e24cdc6af74ac6e9b1a093311858a565f1f109e7866e81395fb6708758cc39b +size 16778411 diff --git a/ckpts/universal/global_step80/zero/8.attention.dense.weight/fp32.pt b/ckpts/universal/global_step80/zero/8.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b0e7f2254664f3f11331e9079c3ebfe610dd9eb3 --- /dev/null +++ b/ckpts/universal/global_step80/zero/8.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5be5f976557379e87c487aa3c39b5fa755650dd5f3cd917aed7b46601dfae496 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..19fdee61243d17de729e795d18102ece6cbf7bd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_binary.h @@ -0,0 +1,329 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor classes for Binary, LargeBinary, String, LargeString, +// FixedSizeBinary + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/stl_iterator.h" +#include "arrow/type.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +/// Base class for variable-sized binary arrays, regardless of offset size +/// and logical interpretation. +template +class BaseBinaryArray : public FlatArray { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + using IteratorType = stl::ArrayIterator>; + + /// Return the pointer to the given elements bytes + // XXX should GetValue(int64_t i) return a string_view? + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + // Account for base offset + i += data_->offset; + const offset_type pos = raw_value_offsets_[i]; + *out_length = raw_value_offsets_[i + 1] - pos; + return raw_data_ + pos; + } + + /// \brief Get binary value as a string_view + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view GetView(int64_t i) const { + // Account for base offset + i += data_->offset; + const offset_type pos = raw_value_offsets_[i]; + return std::string_view(reinterpret_cast(raw_data_ + pos), + raw_value_offsets_[i + 1] - pos); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + /// \brief Get binary value as a string_view + /// Provided for consistency with other arrays. + /// + /// \param i the value index + /// \return the view over the selected value + std::string_view Value(int64_t i) const { return GetView(i); } + + /// \brief Get binary value as a std::string + /// + /// \param i the value index + /// \return the value copied into a std::string + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_offsets() const { return data_->buffers[1]; } + + /// Note that this buffer does not account for any slice offset + std::shared_ptr value_data() const { return data_->buffers[2]; } + + const offset_type* raw_value_offsets() const { + return raw_value_offsets_ + data_->offset; + } + + const uint8_t* raw_data() const { return raw_data_; } + + /// \brief Return the data buffer absolute offset of the data for the value + /// at the passed index. + /// + /// Does not perform boundschecking + offset_type value_offset(int64_t i) const { + return raw_value_offsets_[i + data_->offset]; + } + + /// \brief Return the length of the data for the value at the passed index. + /// + /// Does not perform boundschecking + offset_type value_length(int64_t i) const { + i += data_->offset; + return raw_value_offsets_[i + 1] - raw_value_offsets_[i]; + } + + /// \brief Return the total length of the memory in the data buffer + /// referenced by this array. If the array has been sliced then this may be + /// less than the size of the data buffer (data_->buffers[2]). + offset_type total_values_length() const { + if (data_->length > 0) { + return raw_value_offsets_[data_->length + data_->offset] - + raw_value_offsets_[data_->offset]; + } else { + return 0; + } + } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + // For subclasses + BaseBinaryArray() = default; + + // Protected method for constructors + void SetData(const std::shared_ptr& data) { + this->Array::SetData(data); + raw_value_offsets_ = data->GetValuesSafe(1, /*offset=*/0); + raw_data_ = data->GetValuesSafe(2, /*offset=*/0); + } + + const offset_type* raw_value_offsets_ = NULLPTR; + const uint8_t* raw_data_ = NULLPTR; +}; + +/// Concrete Array class for variable-size binary data +class ARROW_EXPORT BinaryArray : public BaseBinaryArray { + public: + explicit BinaryArray(const std::shared_ptr& data); + + BinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as StringArray + BinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for variable-size string (utf-8) data +class ARROW_EXPORT StringArray : public BinaryArray { + public: + using TypeClass = StringType; + + explicit StringArray(const std::shared_ptr& data); + + StringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +/// Concrete Array class for large variable-size binary data +class ARROW_EXPORT LargeBinaryArray : public BaseBinaryArray { + public: + explicit LargeBinaryArray(const std::shared_ptr& data); + + LargeBinaryArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + protected: + // For subclasses such as LargeStringArray + LargeBinaryArray() : BaseBinaryArray() {} +}; + +/// Concrete Array class for large variable-size string (utf-8) data +class ARROW_EXPORT LargeStringArray : public LargeBinaryArray { + public: + using TypeClass = LargeStringType; + + explicit LargeStringArray(const std::shared_ptr& data); + + LargeStringArray(int64_t length, const std::shared_ptr& value_offsets, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// BinaryView and StringView + +/// Concrete Array class for variable-size binary view data using the +/// BinaryViewType::c_type struct to reference in-line or out-of-line string values +class ARROW_EXPORT BinaryViewArray : public FlatArray { + public: + using TypeClass = BinaryViewType; + using IteratorType = stl::ArrayIterator; + using c_type = BinaryViewType::c_type; + + explicit BinaryViewArray(std::shared_ptr data); + + BinaryViewArray(std::shared_ptr type, int64_t length, + std::shared_ptr views, BufferVector data_buffers, + std::shared_ptr null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + // For API compatibility with BinaryArray etc. + std::string_view GetView(int64_t i) const; + std::string GetString(int64_t i) const { return std::string{GetView(i)}; } + + const auto& values() const { return data_->buffers[1]; } + const c_type* raw_values() const { return raw_values_; } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + IteratorType begin() const { return IteratorType(*this); } + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using FlatArray::FlatArray; + + void SetData(std::shared_ptr data) { + FlatArray::SetData(std::move(data)); + raw_values_ = data_->GetValuesSafe(1); + } + + const c_type* raw_values_; +}; + +/// Concrete Array class for variable-size string view (utf-8) data using +/// BinaryViewType::c_type to reference in-line or out-of-line string values +class ARROW_EXPORT StringViewArray : public BinaryViewArray { + public: + using TypeClass = StringViewType; + + explicit StringViewArray(std::shared_ptr data); + + using BinaryViewArray::BinaryViewArray; + + /// \brief Validate that this array contains only valid UTF8 entries + /// + /// This check is also implied by ValidateFull() + Status ValidateUTF8() const; +}; + +// ---------------------------------------------------------------------- +// Fixed width binary + +/// Concrete Array class for fixed-size binary data +class ARROW_EXPORT FixedSizeBinaryArray : public PrimitiveArray { + public: + using TypeClass = FixedSizeBinaryType; + using IteratorType = stl::ArrayIterator; + + explicit FixedSizeBinaryArray(const std::shared_ptr& data); + + FixedSizeBinaryArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + const uint8_t* GetValue(int64_t i) const; + const uint8_t* Value(int64_t i) const { return GetValue(i); } + + std::string_view GetView(int64_t i) const { + return std::string_view(reinterpret_cast(GetValue(i)), byte_width()); + } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + std::string GetString(int64_t i) const { return std::string(GetView(i)); } + + int32_t byte_width() const { return byte_width_; } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width_; } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + void SetData(const std::shared_ptr& data) { + this->PrimitiveArray::SetData(data); + byte_width_ = + internal::checked_cast(*type()).byte_width(); + } + + int32_t byte_width_; +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..f14812549089ad9c2f6d0e80c8cc17e05d6252c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_decimal.h @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/array_binary.h" +#include "arrow/array/data.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-arrays +/// +/// @{ + +// ---------------------------------------------------------------------- +// Decimal128Array + +/// Concrete Array class for 128-bit decimal data +class ARROW_EXPORT Decimal128Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal128Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal128Array from ArrayData instance + explicit Decimal128Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +// Backward compatibility +using DecimalArray = Decimal128Array; + +// ---------------------------------------------------------------------- +// Decimal256Array + +/// Concrete Array class for 256-bit decimal data +class ARROW_EXPORT Decimal256Array : public FixedSizeBinaryArray { + public: + using TypeClass = Decimal256Type; + + using FixedSizeBinaryArray::FixedSizeBinaryArray; + + /// \brief Construct Decimal256Array from ArrayData instance + explicit Decimal256Array(const std::shared_ptr& data); + + std::string FormatValue(int64_t i) const; +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..bf376b51f8c9470d2b4e4c7ed950c9a513fddc9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_dict.h @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// DictionaryArray + +/// \brief Array type for dictionary-encoded data with a +/// data-dependent dictionary +/// +/// A dictionary array contains an array of non-negative integers (the +/// "dictionary indices") along with a data type containing a "dictionary" +/// corresponding to the distinct values represented in the data. +/// +/// For example, the array +/// +/// ["foo", "bar", "foo", "bar", "foo", "bar"] +/// +/// with dictionary ["bar", "foo"], would have dictionary array representation +/// +/// indices: [1, 0, 1, 0, 1, 0] +/// dictionary: ["bar", "foo"] +/// +/// The indices in principle may be any integer type. +class ARROW_EXPORT DictionaryArray : public Array { + public: + using TypeClass = DictionaryType; + + explicit DictionaryArray(const std::shared_ptr& data); + + DictionaryArray(const std::shared_ptr& type, + const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + /// \brief Construct DictionaryArray from dictionary and indices + /// array and validate + /// + /// This function does the validation of the indices and input type. It checks if + /// all indices are non-negative and smaller than the size of the dictionary. + /// + /// \param[in] type a dictionary type + /// \param[in] dictionary the dictionary with same value type as the + /// type object + /// \param[in] indices an array of non-negative integers smaller than the + /// size of the dictionary + static Result> FromArrays( + const std::shared_ptr& type, const std::shared_ptr& indices, + const std::shared_ptr& dictionary); + + static Result> FromArrays( + const std::shared_ptr& indices, const std::shared_ptr& dictionary) { + return FromArrays(::arrow::dictionary(indices->type(), dictionary->type()), indices, + dictionary); + } + + /// \brief Transpose this DictionaryArray + /// + /// This method constructs a new dictionary array with the given dictionary + /// type, transposing indices using the transpose map. The type and the + /// transpose map are typically computed using DictionaryUnifier. + /// + /// \param[in] type the new type object + /// \param[in] dictionary the new dictionary + /// \param[in] transpose_map transposition array of this array's indices + /// into the target array's indices + /// \param[in] pool a pool to allocate the array data from + Result> Transpose( + const std::shared_ptr& type, const std::shared_ptr& dictionary, + const int32_t* transpose_map, MemoryPool* pool = default_memory_pool()) const; + + Result> Compact(MemoryPool* pool = default_memory_pool()) const; + + /// \brief Determine whether dictionary arrays may be compared without unification + bool CanCompareIndices(const DictionaryArray& other) const; + + /// \brief Return the dictionary for this array, which is stored as + /// a member of the ArrayData internal structure + const std::shared_ptr& dictionary() const; + const std::shared_ptr& indices() const; + + /// \brief Return the ith value of indices, cast to int64_t. Not recommended + /// for use in performance-sensitive code. Does not validate whether the + /// value is null or out-of-bounds. + int64_t GetValueIndex(int64_t i) const; + + const DictionaryType* dict_type() const { return dict_type_; } + + private: + void SetData(const std::shared_ptr& data); + const DictionaryType* dict_type_; + std::shared_ptr indices_; + + // Lazily initialized when invoking dictionary() + mutable std::shared_ptr dictionary_; +}; + +/// \brief Helper class for incremental dictionary unification +class ARROW_EXPORT DictionaryUnifier { + public: + virtual ~DictionaryUnifier() = default; + + /// \brief Construct a DictionaryUnifier + /// \param[in] value_type the data type of the dictionaries + /// \param[in] pool MemoryPool to use for memory allocations + static Result> Make( + std::shared_ptr value_type, MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries across array chunks + /// + /// The dictionaries in the array chunks will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyChunkedArray( + const std::shared_ptr& array, + MemoryPool* pool = default_memory_pool()); + + /// \brief Unify dictionaries across the chunks of each table column + /// + /// The dictionaries in each table column will be unified, their indices + /// accordingly transposed. + /// + /// Only dictionaries with a primitive value type are currently supported. + /// However, dictionaries nested inside a more complex type are correctly unified. + static Result> UnifyTable( + const Table& table, MemoryPool* pool = default_memory_pool()); + + /// \brief Append dictionary to the internal memo + virtual Status Unify(const Array& dictionary) = 0; + + /// \brief Append dictionary and compute transpose indices + /// \param[in] dictionary the dictionary values to unify + /// \param[out] out_transpose a Buffer containing computed transpose indices + /// as int32_t values equal in length to the passed dictionary. The value in + /// each slot corresponds to the new index value for each original index + /// for a DictionaryArray with the old dictionary + virtual Status Unify(const Array& dictionary, + std::shared_ptr* out_transpose) = 0; + + /// \brief Return a result DictionaryType with the smallest possible index + /// type to accommodate the unified dictionary. The unifier cannot be used + /// after this is called + virtual Status GetResult(std::shared_ptr* out_type, + std::shared_ptr* out_dict) = 0; + + /// \brief Return a unified dictionary with the given index type. If + /// the index type is not large enough then an invalid status will be returned. + /// The unifier cannot be used after this is called + virtual Status GetResultWithIndexType(const std::shared_ptr& index_type, + std::shared_ptr* out_dict) = 0; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h new file mode 100644 index 0000000000000000000000000000000000000000..e6df92e3b788ceaf74d67219b4f88fd97d786724 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/array_primitive.h @@ -0,0 +1,202 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Array accessor types for primitive/C-type-based arrays, such as numbers, +// boolean, and temporal types. + +#pragma once + +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/data.h" +#include "arrow/stl_iterator.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" // IWYU pragma: export +#include "arrow/type_traits.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// Concrete Array class for boolean data +class ARROW_EXPORT BooleanArray : public PrimitiveArray { + public: + using TypeClass = BooleanType; + using IteratorType = stl::ArrayIterator; + + explicit BooleanArray(const std::shared_ptr& data); + + BooleanArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + bool Value(int64_t i) const { + return bit_util::GetBit(reinterpret_cast(raw_values_), + i + data_->offset); + } + + bool GetView(int64_t i) const { return Value(i); } + + std::optional operator[](int64_t i) const { return *IteratorType(*this, i); } + + /// \brief Return the number of false (0) values among the valid + /// values. Result is not cached. + int64_t false_count() const; + + /// \brief Return the number of true (1) values among the valid + /// values. Result is not cached. + int64_t true_count() const; + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using PrimitiveArray::PrimitiveArray; +}; + +/// \addtogroup numeric-arrays +/// +/// @{ + +/// \brief Concrete Array class for numeric data with a corresponding C type +/// +/// This class is templated on the corresponding DataType subclass for the +/// given data, for example NumericArray or NumericArray. +/// +/// Note that convenience aliases are available for all accepted types +/// (for example Int8Array for NumericArray). +template +class NumericArray : public PrimitiveArray { + public: + using TypeClass = TYPE; + using value_type = typename TypeClass::c_type; + using IteratorType = stl::ArrayIterator>; + + explicit NumericArray(const std::shared_ptr& data) : PrimitiveArray(data) {} + + // Only enable this constructor without a type argument for types without additional + // metadata + template + NumericArray(enable_if_parameter_free length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : PrimitiveArray(TypeTraits::type_singleton(), length, data, null_bitmap, + null_count, offset) {} + + const value_type* raw_values() const { + return reinterpret_cast(raw_values_) + data_->offset; + } + + value_type Value(int64_t i) const { return raw_values()[i]; } + + // For API compatibility with BinaryArray etc. + value_type GetView(int64_t i) const { return Value(i); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + protected: + using PrimitiveArray::PrimitiveArray; +}; + +/// DayTimeArray +/// --------------------- +/// \brief Array of Day and Millisecond values. +class ARROW_EXPORT DayTimeIntervalArray : public PrimitiveArray { + public: + using TypeClass = DayTimeIntervalType; + using IteratorType = stl::ArrayIterator; + + explicit DayTimeIntervalArray(const std::shared_ptr& data); + + DayTimeIntervalArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + DayTimeIntervalArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + TypeClass::DayMilliseconds GetValue(int64_t i) const; + TypeClass::DayMilliseconds Value(int64_t i) const { return GetValue(i); } + + // For compatibility with Take kernel. + TypeClass::DayMilliseconds GetView(int64_t i) const { return GetValue(i); } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + int32_t byte_width() const { return sizeof(TypeClass::DayMilliseconds); } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width(); } +}; + +/// \brief Array of Month, Day and nanosecond values. +class ARROW_EXPORT MonthDayNanoIntervalArray : public PrimitiveArray { + public: + using TypeClass = MonthDayNanoIntervalType; + using IteratorType = stl::ArrayIterator; + + explicit MonthDayNanoIntervalArray(const std::shared_ptr& data); + + MonthDayNanoIntervalArray(const std::shared_ptr& type, int64_t length, + const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + MonthDayNanoIntervalArray(int64_t length, const std::shared_ptr& data, + const std::shared_ptr& null_bitmap = NULLPTR, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + TypeClass::MonthDayNanos GetValue(int64_t i) const; + TypeClass::MonthDayNanos Value(int64_t i) const { return GetValue(i); } + + // For compatibility with Take kernel. + TypeClass::MonthDayNanos GetView(int64_t i) const { return GetValue(i); } + + IteratorType begin() const { return IteratorType(*this); } + + IteratorType end() const { return IteratorType(*this, length()); } + + std::optional operator[](int64_t i) const { + return *IteratorType(*this, i); + } + + int32_t byte_width() const { return sizeof(TypeClass::MonthDayNanos); } + + const uint8_t* raw_values() const { return raw_values_ + data_->offset * byte_width(); } +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h new file mode 100644 index 0000000000000000000000000000000000000000..0cea571be3e3244741f3df15f87c8958eedddf76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h @@ -0,0 +1,215 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/builder_base.h" +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-builders +/// +/// @{ + +namespace internal { + +class ARROW_EXPORT AdaptiveIntBuilderBase : public ArrayBuilder { + public: + AdaptiveIntBuilderBase(uint8_t start_int_size, MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment); + + explicit AdaptiveIntBuilderBase(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : AdaptiveIntBuilderBase(sizeof(uint8_t), pool, alignment) {} + + /// \brief Append multiple nulls + /// \param[in] length the number of nulls to append + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(CommitPendingData()); + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(Reserve(length)); + memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length); + UnsafeSetNull(length); + } + return Status::OK(); + } + + Status AppendNull() final { + pending_data_[pending_pos_] = 0; + pending_valid_[pending_pos_] = 0; + pending_has_nulls_ = true; + ++pending_pos_; + ++length_; + ++null_count_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(CommitPendingData()); + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(Reserve(length)); + memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length); + UnsafeSetNotNull(length); + } + return Status::OK(); + } + + Status AppendEmptyValue() final { + pending_data_[pending_pos_] = 0; + pending_valid_[pending_pos_] = 1; + ++pending_pos_; + ++length_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + void Reset() override; + Status Resize(int64_t capacity) override; + + protected: + Status AppendInternal(const uint64_t val) { + pending_data_[pending_pos_] = val; + pending_valid_[pending_pos_] = 1; + ++pending_pos_; + ++length_; + + if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) { + return CommitPendingData(); + } + return Status::OK(); + } + + virtual Status CommitPendingData() = 0; + + template + typename std::enable_if= sizeof(new_type), Status>::type + ExpandIntSizeInternal(); + template + typename std::enable_if<(sizeof(old_type) < sizeof(new_type)), Status>::type + ExpandIntSizeInternal(); + + std::shared_ptr data_; + uint8_t* raw_data_ = NULLPTR; + + const uint8_t start_int_size_; + uint8_t int_size_; + + static constexpr int32_t pending_size_ = 1024; + uint8_t pending_valid_[pending_size_]; + uint64_t pending_data_[pending_size_]; + int32_t pending_pos_ = 0; + bool pending_has_nulls_ = false; +}; + +} // namespace internal + +class ARROW_EXPORT AdaptiveUIntBuilder : public internal::AdaptiveIntBuilderBase { + public: + explicit AdaptiveUIntBuilder(uint8_t start_int_size, + MemoryPool* pool = default_memory_pool()); + + explicit AdaptiveUIntBuilder(MemoryPool* pool = default_memory_pool()) + : AdaptiveUIntBuilder(sizeof(uint8_t), pool) {} + + using internal::AdaptiveIntBuilderBase::Reset; + + /// Scalar append + Status Append(const uint64_t val) { return AppendInternal(val); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override; + + protected: + Status CommitPendingData() override; + Status ExpandIntSize(uint8_t new_int_size); + + Status AppendValuesInternal(const uint64_t* values, int64_t length, + const uint8_t* valid_bytes); + + template + Status ExpandIntSizeN(); +}; + +class ARROW_EXPORT AdaptiveIntBuilder : public internal::AdaptiveIntBuilderBase { + public: + explicit AdaptiveIntBuilder(uint8_t start_int_size, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + explicit AdaptiveIntBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : AdaptiveIntBuilder(sizeof(uint8_t), pool, alignment) {} + + using internal::AdaptiveIntBuilderBase::Reset; + + /// Scalar append + Status Append(const int64_t val) { return AppendInternal(static_cast(val)); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const int64_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override; + + protected: + Status CommitPendingData() override; + Status ExpandIntSize(uint8_t new_int_size); + + Status AppendValuesInternal(const int64_t* values, int64_t length, + const uint8_t* valid_bytes); + + template + Status ExpandIntSizeN(); +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h new file mode 100644 index 0000000000000000000000000000000000000000..d825f7d32520a309905871155a81b9adccff3c07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_binary.h @@ -0,0 +1,971 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_binary.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer.h" +#include "arrow/buffer_builder.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/binary_view_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup binary-builders +/// +/// @{ + +// ---------------------------------------------------------------------- +// Binary and String + +template +class BaseBinaryBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, std::string_view> { + public: + using TypeClass = TYPE; + using offset_type = typename TypeClass::offset_type; + + explicit BaseBinaryBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + offsets_builder_(pool, alignment), + value_data_builder_(pool, alignment) {} + + BaseBinaryBuilder(const std::shared_ptr& type, MemoryPool* pool) + : BaseBinaryBuilder(pool) {} + + Status Append(const uint8_t* value, offset_type length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status Append(const char* value, offset_type length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// Extend the last appended value by appending more data at the end + /// + /// Unlike Append, this does not create a new offset. + Status ExtendCurrent(const uint8_t* value, offset_type length) { + // Safety check for UBSAN. + if (ARROW_PREDICT_TRUE(length > 0)) { + ARROW_RETURN_NOT_OK(ValidateOverflow(length)); + ARROW_RETURN_NOT_OK(value_data_builder_.Append(value, length)); + } + return Status::OK(); + } + + Status ExtendCurrent(std::string_view value) { + return ExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNulls(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, false); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNextOffset(); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + const int64_t num_bytes = value_data_builder_.length(); + ARROW_RETURN_NOT_OK(Reserve(length)); + for (int64_t i = 0; i < length; ++i) { + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } + UnsafeAppendToBitmap(length, true); + return Status::OK(); + } + + /// \brief Append without checking capacity + /// + /// Offsets and data should have been presized using Reserve() and + /// ReserveData(), respectively. + void UnsafeAppend(const uint8_t* value, offset_type length) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(value, length); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppend(const char* value, offset_type length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// Like ExtendCurrent, but do not check capacity + void UnsafeExtendCurrent(const uint8_t* value, offset_type length) { + value_data_builder_.UnsafeAppend(value, length); + } + + void UnsafeExtendCurrent(std::string_view value) { + UnsafeExtendCurrent(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + void UnsafeAppendNull() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a sequence of strings in one shot. + /// + /// \param[in] values a vector of strings + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const std::vector& values, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = std::accumulate( + values.begin(), values.end(), 0ULL, + [](uint64_t sum, const std::string& str) { return sum + str.size(); }); + ARROW_RETURN_NOT_OK(Reserve(values.size())); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes != NULLPTR) { + for (std::size_t i = 0; i < values.size(); ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + value_data_builder_.UnsafeAppend( + reinterpret_cast(values[i].data()), values[i].size()); + } + } + } else { + for (const auto& value : values) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(value.data()), + value.size()); + } + } + + UnsafeAppendToBitmap(valid_bytes, values.size()); + return Status::OK(); + } + + /// \brief Append a sequence of nul-terminated strings in one shot. + /// If one of the values is NULL, it is processed as a null + /// value even if the corresponding valid_bytes entry is 1. + /// + /// \param[in] values a contiguous C array of nul-terminated char * + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const char** values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + std::size_t total_length = 0; + std::vector value_lengths(length); + bool have_null_value = false; + for (int64_t i = 0; i < length; ++i) { + if (values[i] != NULLPTR) { + auto value_length = strlen(values[i]); + value_lengths[i] = value_length; + total_length += value_length; + } else { + have_null_value = true; + } + } + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + + if (valid_bytes) { + int64_t valid_bytes_offset = 0; + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (valid_bytes[i]) { + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } else { + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, + i - valid_bytes_offset); + UnsafeAppendToBitmap(false); + valid_bytes_offset = i + 1; + } + } + } + UnsafeAppendToBitmap(valid_bytes + valid_bytes_offset, length - valid_bytes_offset); + } else { + if (have_null_value) { + std::vector valid_vector(length, 0); + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + if (values[i]) { + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + valid_vector[i] = 1; + } + } + UnsafeAppendToBitmap(valid_vector.data(), length); + } else { + for (int64_t i = 0; i < length; ++i) { + UnsafeAppendNextOffset(); + value_data_builder_.UnsafeAppend(reinterpret_cast(values[i]), + value_lengths[i]); + } + UnsafeAppendToBitmap(NULLPTR, length); + } + } + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + auto bitmap = array.GetValues(0, 0); + auto offsets = array.GetValues(1); + auto data = array.GetValues(2, 0); + auto total_length = offsets[offset + length] - offsets[offset]; + ARROW_RETURN_NOT_OK(Reserve(length)); + ARROW_RETURN_NOT_OK(ReserveData(total_length)); + for (int64_t i = 0; i < length; i++) { + if (!bitmap || bit_util::GetBit(bitmap, array.offset + offset + i)) { + const offset_type start = offsets[offset + i]; + const offset_type end = offsets[offset + i + 1]; + UnsafeAppend(data + start, end - start); + } else { + UnsafeAppendNull(); + } + } + return Status::OK(); + } + + void Reset() override { + ArrayBuilder::Reset(); + offsets_builder_.Reset(); + value_data_builder_.Reset(); + } + + Status ValidateOverflow(int64_t new_bytes) { + auto new_size = value_data_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + // One more than requested for offsets + ARROW_RETURN_NOT_OK(offsets_builder_.Resize(capacity + 1)); + return ArrayBuilder::Resize(capacity); + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return value_data_builder_.Reserve(elements); + } + + Status FinishInternal(std::shared_ptr* out) override { + // Write final offset (values length) + ARROW_RETURN_NOT_OK(AppendNextOffset()); + + // These buffers' padding zeroed by BufferBuilder + std::shared_ptr offsets, value_data, null_bitmap; + ARROW_RETURN_NOT_OK(offsets_builder_.Finish(&offsets)); + ARROW_RETURN_NOT_OK(value_data_builder_.Finish(&value_data)); + ARROW_RETURN_NOT_OK(null_bitmap_builder_.Finish(&null_bitmap)); + + *out = ArrayData::Make(type(), length_, {null_bitmap, offsets, value_data}, + null_count_, 0); + Reset(); + return Status::OK(); + } + + /// \return data pointer of the value date builder + const uint8_t* value_data() const { return value_data_builder_.data(); } + /// \return size of values buffer so far + int64_t value_data_length() const { return value_data_builder_.length(); } + /// \return capacity of values buffer + int64_t value_data_capacity() const { return value_data_builder_.capacity(); } + + /// \return data pointer of the value date builder + const offset_type* offsets_data() const { return offsets_builder_.data(); } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i, offset_type* out_length) const { + const offset_type* offsets = offsets_builder_.data(); + const auto offset = offsets[i]; + if (i == (length_ - 1)) { + *out_length = static_cast(value_data_builder_.length()) - offset; + } else { + *out_length = offsets[i + 1] - offset; + } + return value_data_builder_.data() + offset; + } + + offset_type offset(int64_t i) const { return offsets_data()[i]; } + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const { + offset_type value_length; + const uint8_t* value_data = GetValue(i, &value_length); + return std::string_view(reinterpret_cast(value_data), value_length); + } + + // Cannot make this a static attribute because of linking issues + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + protected: + TypedBufferBuilder offsets_builder_; + TypedBufferBuilder value_data_builder_; + + Status AppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + return offsets_builder_.Append(static_cast(num_bytes)); + } + + void UnsafeAppendNextOffset() { + const int64_t num_bytes = value_data_builder_.length(); + offsets_builder_.UnsafeAppend(static_cast(num_bytes)); + } +}; + +/// \class BinaryBuilder +/// \brief Builder class for variable-length binary data +class ARROW_EXPORT BinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return binary(); } +}; + +/// \class StringBuilder +/// \brief Builder class for UTF8 strings +class ARROW_EXPORT StringBuilder : public BinaryBuilder { + public: + using BinaryBuilder::BinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return utf8(); } +}; + +/// \class LargeBinaryBuilder +/// \brief Builder class for large variable-length binary data +class ARROW_EXPORT LargeBinaryBuilder : public BaseBinaryBuilder { + public: + using BaseBinaryBuilder::BaseBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_binary(); } +}; + +/// \class LargeStringBuilder +/// \brief Builder class for large UTF8 strings +class ARROW_EXPORT LargeStringBuilder : public LargeBinaryBuilder { + public: + using LargeBinaryBuilder::LargeBinaryBuilder; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return large_utf8(); } +}; + +// ---------------------------------------------------------------------- +// BinaryViewBuilder, StringViewBuilder +// +// These builders do not support building raw pointer view arrays. + +namespace internal { + +// We allocate medium-sized memory chunks and accumulate data in those, which +// may result in some waste if there are many large-ish strings. If a string +// comes along that does not fit into a block, we allocate a new block and +// write into that. +// +// Later we can implement optimizations to continuing filling underfull blocks +// after encountering a large string that required allocating a new block. +class ARROW_EXPORT StringHeapBuilder { + public: + static constexpr int64_t kDefaultBlocksize = 32 << 10; // 32KB + + StringHeapBuilder(MemoryPool* pool, int64_t alignment) + : pool_(pool), alignment_(alignment) {} + + void SetBlockSize(int64_t blocksize) { blocksize_ = blocksize; } + + using c_type = BinaryViewType::c_type; + + template + std::conditional_t, c_type> Append(const uint8_t* value, + int64_t length) { + if (length <= BinaryViewType::kInlineSize) { + return util::ToInlineBinaryView(value, static_cast(length)); + } + + if constexpr (Safe) { + ARROW_RETURN_NOT_OK(Reserve(length)); + } + + auto v = + util::ToBinaryView(value, static_cast(length), + static_cast(blocks_.size() - 1), current_offset_); + + memcpy(current_out_buffer_, value, static_cast(length)); + current_out_buffer_ += length; + current_remaining_bytes_ -= length; + current_offset_ += static_cast(length); + return v; + } + + static constexpr int64_t ValueSizeLimit() { + return std::numeric_limits::max(); + } + + /// \brief Ensure that the indicated number of bytes can be appended via + /// UnsafeAppend operations without the need to allocate more memory + Status Reserve(int64_t num_bytes) { + if (ARROW_PREDICT_FALSE(num_bytes > ValueSizeLimit())) { + return Status::CapacityError( + "BinaryView or StringView elements cannot reference " + "strings larger than 2GB"); + } + if (num_bytes > current_remaining_bytes_) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + current_remaining_bytes_ = num_bytes > blocksize_ ? num_bytes : blocksize_; + ARROW_ASSIGN_OR_RAISE( + std::shared_ptr new_block, + AllocateResizableBuffer(current_remaining_bytes_, alignment_, pool_)); + current_offset_ = 0; + current_out_buffer_ = new_block->mutable_data(); + blocks_.emplace_back(std::move(new_block)); + } + return Status::OK(); + } + + void Reset() { + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + blocks_.clear(); + } + + int64_t current_remaining_bytes() const { return current_remaining_bytes_; } + + Result>> Finish() { + if (!blocks_.empty()) { + ARROW_RETURN_NOT_OK(FinishLastBlock()); + } + current_offset_ = 0; + current_out_buffer_ = NULLPTR; + current_remaining_bytes_ = 0; + return std::move(blocks_); + } + + private: + Status FinishLastBlock() { + if (current_remaining_bytes_ > 0) { + // Avoid leaking uninitialized bytes from the allocator + ARROW_RETURN_NOT_OK( + blocks_.back()->Resize(blocks_.back()->size() - current_remaining_bytes_, + /*shrink_to_fit=*/true)); + blocks_.back()->ZeroPadding(); + } + return Status::OK(); + } + + MemoryPool* pool_; + int64_t alignment_; + int64_t blocksize_ = kDefaultBlocksize; + std::vector> blocks_; + + int32_t current_offset_ = 0; + uint8_t* current_out_buffer_ = NULLPTR; + int64_t current_remaining_bytes_ = 0; +}; + +} // namespace internal + +class ARROW_EXPORT BinaryViewBuilder : public ArrayBuilder { + public: + using TypeClass = BinaryViewType; + + // this constructor provided for MakeBuilder compatibility + BinaryViewBuilder(const std::shared_ptr&, MemoryPool* pool); + + explicit BinaryViewBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + data_builder_(pool, alignment), + data_heap_builder_(pool, alignment) {} + + /// Set the size for future preallocated data buffers. + /// + /// The default size is 32KB, so after each 32KB of string data appended to the builder + /// a new data buffer will be allocated. Adjust this to a larger value to decrease the + /// frequency of allocation, or to a smaller value to lower the overhead of each + /// allocation. + void SetBlockSize(int64_t blocksize) { data_heap_builder_.SetBlockSize(blocksize); } + + /// The number of bytes which can be appended to this builder without allocating another + /// data buffer. + int64_t current_block_bytes_remaining() const { + return data_heap_builder_.current_remaining_bytes(); + } + + Status Append(const uint8_t* value, int64_t length) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendToBitmap(true); + ARROW_ASSIGN_OR_RAISE(auto v, + data_heap_builder_.Append(value, length)); + data_builder_.UnsafeAppend(v); + return Status::OK(); + } + + Status Append(const char* value, int64_t length) { + return Append(reinterpret_cast(value), length); + } + + Status Append(std::string_view value) { + return Append(value.data(), static_cast(value.size())); + } + + /// \brief Append without checking capacity + /// + /// Builder should have been presized using Reserve() and ReserveData(), + /// respectively, and the value must not be larger than 2GB + void UnsafeAppend(const uint8_t* value, int64_t length) { + UnsafeAppendToBitmap(true); + auto v = data_heap_builder_.Append(value, length); + data_builder_.UnsafeAppend(v); + } + + void UnsafeAppend(const char* value, int64_t length) { + UnsafeAppend(reinterpret_cast(value), length); + } + + void UnsafeAppend(const std::string& value) { + UnsafeAppend(value.c_str(), static_cast(value.size())); + } + + void UnsafeAppend(std::string_view value) { + UnsafeAppend(value.data(), static_cast(value.size())); + } + + /// \brief Ensures there is enough allocated available capacity in the + /// out-of-line data heap to append the indicated number of bytes without + /// additional allocations + Status ReserveData(int64_t length); + + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element (length-0 inline string) + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, BinaryViewType::c_type{}); + UnsafeSetNotNull(length); + return Status::OK(); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppendEmptyValue() { + data_builder_.UnsafeAppend(BinaryViewType::c_type{}); + UnsafeAppendToBitmap(true); + } + + /// \brief Append a slice of a BinaryViewArray passed as an ArraySpan. Copies + /// the underlying out-of-line string memory to avoid memory lifetime issues + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + + void Reset() override; + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + Status FinishInternal(std::shared_ptr* out) override; + + std::shared_ptr type() const override { return binary_view(); } + + protected: + TypedBufferBuilder data_builder_; + + // Accumulates out-of-line data in fixed-size chunks which are then attached + // to the resulting ArrayData + internal::StringHeapBuilder data_heap_builder_; +}; + +class ARROW_EXPORT StringViewBuilder : public BinaryViewBuilder { + public: + using BinaryViewBuilder::BinaryViewBuilder; + std::shared_ptr type() const override { return utf8_view(); } +}; + +// ---------------------------------------------------------------------- +// FixedSizeBinaryBuilder + +class ARROW_EXPORT FixedSizeBinaryBuilder : public ArrayBuilder { + public: + using TypeClass = FixedSizeBinaryType; + + explicit FixedSizeBinaryBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + Status Append(const uint8_t* value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(value); + return Status::OK(); + } + + Status Append(const char* value) { + return Append(reinterpret_cast(value)); + } + + Status Append(std::string_view view) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(view); + return Status::OK(); + } + + Status Append(const std::string& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const Buffer& s) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(s); + return Status::OK(); + } + + Status Append(const std::shared_ptr& s) { return Append(*s); } + + template + Status Append(const std::array& value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend( + std::string_view(reinterpret_cast(value.data()), value.size())); + return Status::OK(); + } + + Status AppendValues(const uint8_t* data, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + Status AppendValues(const uint8_t* data, int64_t length, const uint8_t* validity, + int64_t bitmap_offset); + + Status AppendNull() final; + Status AppendNulls(int64_t length) final; + + Status AppendEmptyValue() final; + Status AppendEmptyValues(int64_t length) final; + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues( + array.GetValues(1, 0) + ((array.offset + offset) * byte_width_), length, + array.GetValues(0, 0), array.offset + offset); + } + + void UnsafeAppend(const uint8_t* value) { + UnsafeAppendToBitmap(true); + if (ARROW_PREDICT_TRUE(byte_width_ > 0)) { + byte_builder_.UnsafeAppend(value, byte_width_); + } + } + + void UnsafeAppend(const char* value) { + UnsafeAppend(reinterpret_cast(value)); + } + + void UnsafeAppend(std::string_view value) { +#ifndef NDEBUG + CheckValueSize(static_cast(value.size())); +#endif + UnsafeAppend(reinterpret_cast(value.data())); + } + + void UnsafeAppend(const Buffer& s) { UnsafeAppend(std::string_view{s}); } + + void UnsafeAppend(const std::shared_ptr& s) { UnsafeAppend(*s); } + + void UnsafeAppendNull() { + UnsafeAppendToBitmap(false); + byte_builder_.UnsafeAppend(/*num_copies=*/byte_width_, 0); + } + + Status ValidateOverflow(int64_t new_bytes) const { + auto new_size = byte_builder_.length() + new_bytes; + if (ARROW_PREDICT_FALSE(new_size > memory_limit())) { + return Status::CapacityError("array cannot contain more than ", memory_limit(), + " bytes, have ", new_size); + } else { + return Status::OK(); + } + } + + /// \brief Ensures there is enough allocated capacity to append the indicated + /// number of bytes to the value data buffer without additional allocations + Status ReserveData(int64_t elements) { + ARROW_RETURN_NOT_OK(ValidateOverflow(elements)); + return byte_builder_.Reserve(elements); + } + + void Reset() override; + Status Resize(int64_t capacity) override; + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \return size of values buffer so far + int64_t value_data_length() const { return byte_builder_.length(); } + + int32_t byte_width() const { return byte_width_; } + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + const uint8_t* GetValue(int64_t i) const; + + /// Temporary access to a value. + /// + /// This view becomes invalid on the next modifying operation. + std::string_view GetView(int64_t i) const; + + static constexpr int64_t memory_limit() { + return std::numeric_limits::max() - 1; + } + + std::shared_ptr type() const override { + return fixed_size_binary(byte_width_); + } + + protected: + int32_t byte_width_; + BufferBuilder byte_builder_; + + /// Temporary access to a value. + /// + /// This pointer becomes invalid on the next modifying operation. + uint8_t* GetMutableValue(int64_t i) { + uint8_t* data_ptr = byte_builder_.mutable_data(); + return data_ptr + i * byte_width_; + } + + void CheckValueSize(int64_t size); +}; + +/// @} + +// ---------------------------------------------------------------------- +// Chunked builders: build a sequence of BinaryArray or StringArray that are +// limited to a particular size (to the upper limit of 2GB) + +namespace internal { + +class ARROW_EXPORT ChunkedBinaryBuilder { + public: + explicit ChunkedBinaryBuilder(int32_t max_chunk_value_length, + MemoryPool* pool = default_memory_pool()); + + ChunkedBinaryBuilder(int32_t max_chunk_value_length, int32_t max_chunk_length, + MemoryPool* pool = default_memory_pool()); + + virtual ~ChunkedBinaryBuilder() = default; + + Status Append(const uint8_t* value, int32_t length) { + if (ARROW_PREDICT_FALSE(length + builder_->value_data_length() > + max_chunk_value_length_)) { + if (builder_->value_data_length() == 0) { + // The current item is larger than max_chunk_size_; + // this chunk will be oversize and hold *only* this item + ARROW_RETURN_NOT_OK(builder_->Append(value, length)); + return NextChunk(); + } + // The current item would cause builder_->value_data_length() to exceed + // max_chunk_size_, so finish this chunk and append the current item to the next + // chunk + ARROW_RETURN_NOT_OK(NextChunk()); + return Append(value, length); + } + + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + // The current item would cause builder_->length() to exceed max_chunk_length_, so + // finish this chunk and append the current item to the next chunk + ARROW_RETURN_NOT_OK(NextChunk()); + } + + return builder_->Append(value, length); + } + + Status Append(std::string_view value) { + return Append(reinterpret_cast(value.data()), + static_cast(value.size())); + } + + Status AppendNull() { + if (ARROW_PREDICT_FALSE(builder_->length() == max_chunk_length_)) { + ARROW_RETURN_NOT_OK(NextChunk()); + } + return builder_->AppendNull(); + } + + Status Reserve(int64_t values); + + virtual Status Finish(ArrayVector* out); + + protected: + Status NextChunk(); + + // maximum total character data size per chunk + int64_t max_chunk_value_length_; + + // maximum elements allowed per chunk + int64_t max_chunk_length_ = kListMaximumElements; + + // when Reserve() would cause builder_ to exceed its max_chunk_length_, + // add to extra_capacity_ instead and wait to reserve until the next chunk + int64_t extra_capacity_ = 0; + + std::unique_ptr builder_; + std::vector> chunks_; +}; + +class ARROW_EXPORT ChunkedStringBuilder : public ChunkedBinaryBuilder { + public: + using ChunkedBinaryBuilder::ChunkedBinaryBuilder; + + Status Finish(ArrayVector* out) override; +}; + +} // namespace internal + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..8094250aef8d4b5068d8d8a3ec5ab6488221775c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_decimal.h @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array/array_decimal.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/data.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup numeric-builders +/// +/// @{ + +class ARROW_EXPORT Decimal128Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal128Type; + using ValueType = Decimal128; + + explicit Decimal128Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(Decimal128 val); + void UnsafeAppend(Decimal128 val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +class ARROW_EXPORT Decimal256Builder : public FixedSizeBinaryBuilder { + public: + using TypeClass = Decimal256Type; + using ValueType = Decimal256; + + explicit Decimal256Builder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + using FixedSizeBinaryBuilder::Append; + using FixedSizeBinaryBuilder::AppendValues; + using FixedSizeBinaryBuilder::Reset; + + Status Append(const Decimal256& val); + void UnsafeAppend(const Decimal256& val); + void UnsafeAppend(std::string_view val); + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + std::shared_ptr type() const override { return decimal_type_; } + + protected: + std::shared_ptr decimal_type_; +}; + +using DecimalBuilder = Decimal128Builder; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h new file mode 100644 index 0000000000000000000000000000000000000000..29e01d55edeb110c6b93e12722521e81ca445436 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_primitive.h @@ -0,0 +1,555 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" + +namespace arrow { + +class ARROW_EXPORT NullBuilder : public ArrayBuilder { + public: + explicit NullBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool) {} + explicit NullBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NullBuilder(pool, alignment) {} + + /// \brief Append the specified number of null elements + Status AppendNulls(int64_t length) final { + if (length < 0) return Status::Invalid("length must be positive"); + null_count_ += length; + length_ += length; + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { return AppendNulls(1); } + + Status AppendEmptyValues(int64_t length) final { return AppendNulls(length); } + + Status AppendEmptyValue() final { return AppendEmptyValues(1); } + + Status Append(std::nullptr_t) { return AppendNull(); } + + Status AppendArraySlice(const ArraySpan&, int64_t, int64_t length) override { + return AppendNulls(length); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + std::shared_ptr type() const override { return null(); } + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } +}; + +/// \addtogroup numeric-builders +/// +/// @{ + +/// Base class for all Builders that emit an Array of a scalar numerical type. +template +class NumericBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps, typename T::c_type> { + public: + using TypeClass = T; + using value_type = typename T::c_type; + using ArrayType = typename TypeTraits::ArrayType; + + template + explicit NumericBuilder( + enable_if_parameter_free pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), + type_(TypeTraits::type_singleton()), + data_builder_(pool, alignment) {} + + NumericBuilder(const std::shared_ptr& type, MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : ArrayBuilder(pool, alignment), type_(type), data_builder_(pool, alignment) {} + + /// Append a single scalar and increase the size if necessary. + Status Append(const value_type val) { + ARROW_RETURN_NOT_OK(ArrayBuilder::Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + /// The memory at the corresponding data slot is set to 0 to prevent + /// uninitialized memory access + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNull(length); + return Status::OK(); + } + + /// \brief Append a single null element + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(false); + return Status::OK(); + } + + /// \brief Append a empty element + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(value_type{}); // zero + UnsafeAppendToBitmap(true); + return Status::OK(); + } + + /// \brief Append several empty elements + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, value_type{}); // zero + UnsafeSetNotNull(length); + return Status::OK(); + } + + value_type GetValue(int64_t index) const { return data_builder_.data()[index]; } + + void Reset() override { + data_builder_.Reset(); + ArrayBuilder::Reset(); + } + + Status Resize(int64_t capacity) override { + ARROW_RETURN_NOT_OK(CheckCapacity(capacity)); + capacity = std::max(capacity, kMinBuilderCapacity); + ARROW_RETURN_NOT_OK(data_builder_.Resize(capacity)); + return ArrayBuilder::Resize(capacity); + } + + value_type operator[](int64_t index) const { return GetValue(index); } + + value_type& operator[](int64_t index) { + return reinterpret_cast(data_builder_.mutable_data())[index]; + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(valid_bytes, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] bitmap a validity bitmap to copy (may be null) + /// \param[in] bitmap_offset an offset into the validity bitmap + /// \return Status + Status AppendValues(const value_type* values, int64_t length, const uint8_t* bitmap, + int64_t bitmap_offset) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(bitmap, bitmap_offset, length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const value_type* values, int64_t length, + const std::vector& is_valid) { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values, length); + // length_ is update by these + ArrayBuilder::UnsafeAppendToBitmap(is_valid); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid) { + return AppendValues(values.data(), static_cast(values.size()), is_valid); + } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of values + /// \return Status + Status AppendValues(const std::vector& values) { + return AppendValues(values.data(), static_cast(values.size())); + } + + Status FinishInternal(std::shared_ptr* out) override { + ARROW_ASSIGN_OR_RAISE(auto null_bitmap, + null_bitmap_builder_.FinishWithLength(length_)); + ARROW_ASSIGN_OR_RAISE(auto data, data_builder_.FinishWithLength(length_)); + *out = ArrayData::Make(type(), length_, {null_bitmap, data}, null_count_); + capacity_ = length_ = null_count_ = 0; + return Status::OK(); + } + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values. + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, with a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(values_begin, values_end); + // this updates the length_ + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + } + + return Status::OK(); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1) + offset, length, + array.GetValues(0, 0), array.offset + offset); + } + + /// Append a single scalar under the assumption that the underlying Buffer is + /// large enough. + /// + /// This method does not capacity-check; make sure to call Reserve + /// beforehand. + void UnsafeAppend(const value_type val) { + ArrayBuilder::UnsafeAppendToBitmap(true); + data_builder_.UnsafeAppend(val); + } + + void UnsafeAppendNull() { + ArrayBuilder::UnsafeAppendToBitmap(false); + data_builder_.UnsafeAppend(value_type{}); // zero + } + + std::shared_ptr type() const override { return type_; } + + protected: + std::shared_ptr type_; + TypedBufferBuilder data_builder_; +}; + +// Builders + +using UInt8Builder = NumericBuilder; +using UInt16Builder = NumericBuilder; +using UInt32Builder = NumericBuilder; +using UInt64Builder = NumericBuilder; + +using Int8Builder = NumericBuilder; +using Int16Builder = NumericBuilder; +using Int32Builder = NumericBuilder; +using Int64Builder = NumericBuilder; + +using HalfFloatBuilder = NumericBuilder; +using FloatBuilder = NumericBuilder; +using DoubleBuilder = NumericBuilder; + +/// @} + +/// \addtogroup temporal-builders +/// +/// @{ + +using Date32Builder = NumericBuilder; +using Date64Builder = NumericBuilder; +using Time32Builder = NumericBuilder; +using Time64Builder = NumericBuilder; +using TimestampBuilder = NumericBuilder; +using MonthIntervalBuilder = NumericBuilder; +using DurationBuilder = NumericBuilder; + +/// @} + +class ARROW_EXPORT BooleanBuilder + : public ArrayBuilder, + public internal::ArrayBuilderExtraOps { + public: + using TypeClass = BooleanType; + using value_type = bool; + + explicit BooleanBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + BooleanBuilder(const std::shared_ptr& type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment); + + /// Write nulls as uint8_t* (0 value indicates null) into pre-allocated memory + Status AppendNulls(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNull(length); + return Status::OK(); + } + + Status AppendNull() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppendNull(); + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(Reserve(1)); + data_builder_.UnsafeAppend(false); + UnsafeSetNotNull(1); + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend(length, false); + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// Scalar append + Status Append(const bool val) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(val); + return Status::OK(); + } + + Status Append(const uint8_t val) { return Append(val != 0); } + + /// Scalar append, without checking for capacity + void UnsafeAppend(const bool val) { + data_builder_.UnsafeAppend(val); + UnsafeAppendToBitmap(true); + } + + void UnsafeAppendNull() { + data_builder_.UnsafeAppend(false); + UnsafeAppendToBitmap(false); + } + + void UnsafeAppend(const uint8_t val) { UnsafeAppend(val != 0); } + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous array of bytes (non-zero is 1) + /// \param[in] length the number of values to append + /// \param[in] valid_bytes an optional sequence of bytes where non-zero + /// indicates a valid (non-null) value + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const uint8_t* valid_bytes = NULLPTR); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a bitmap of values + /// \param[in] length the number of values to append + /// \param[in] validity a validity bitmap to copy (may be null) + /// \param[in] offset an offset into the values and validity bitmaps + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, const uint8_t* validity, + int64_t offset); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a contiguous C array of values + /// \param[in] length the number of values to append + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const uint8_t* values, int64_t length, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, + const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values a std::vector of bytes + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \param[in] is_valid an std::vector indicating valid (1) or null + /// (0). Equal in length to values + /// \return Status + Status AppendValues(const std::vector& values, const std::vector& is_valid); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values an std::vector indicating true (1) or false + /// \return Status + Status AppendValues(const std::vector& values); + + /// \brief Append a sequence of elements in one shot + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// or null(0) values + /// \return Status + template + Status AppendValues(ValuesIter values_begin, ValuesIter values_end) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + // this updates length_ + UnsafeSetNotNull(length); + return Status::OK(); + } + + /// \brief Append a sequence of elements in one shot, with a specified nullmap + /// \param[in] values_begin InputIterator to the beginning of the values + /// \param[in] values_end InputIterator pointing to the end of the values + /// \param[in] valid_begin InputIterator with elements indication valid(1) + /// or null(0) values + /// \return Status + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + static_assert(!internal::is_null_pointer::value, + "Don't pass a NULLPTR directly as valid_begin, use the 2-argument " + "version instead"); + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + // Same as above, for a pointer type ValidIter + template + enable_if_t::value, Status> AppendValues( + ValuesIter values_begin, ValuesIter values_end, ValidIter valid_begin) { + int64_t length = static_cast(std::distance(values_begin, values_end)); + ARROW_RETURN_NOT_OK(Reserve(length)); + data_builder_.UnsafeAppend( + length, [&values_begin]() -> bool { return *values_begin++; }); + + if (valid_begin == NULLPTR) { + UnsafeSetNotNull(length); + } else { + null_bitmap_builder_.UnsafeAppend( + length, [&valid_begin]() -> bool { return *valid_begin++; }); + } + length_ = null_bitmap_builder_.length(); + null_count_ = null_bitmap_builder_.false_count(); + return Status::OK(); + } + + Status AppendValues(int64_t length, bool value); + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override { + return AppendValues(array.GetValues(1, 0), length, + array.GetValues(0, 0), array.offset + offset); + } + + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + void Reset() override; + Status Resize(int64_t capacity) override; + + std::shared_ptr type() const override { return boolean(); } + + protected: + TypedBufferBuilder data_builder_; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h new file mode 100644 index 0000000000000000000000000000000000000000..da29ae3124b5d3da32605503b29edf6920cdf6d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_time.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Contains declarations of time related Arrow builder types. + +#pragma once + +#include + +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_primitive.h" + +namespace arrow { + +/// \addtogroup temporal-builders +/// +/// @{ + +// TODO(ARROW-7938): this class is untested + +class ARROW_EXPORT DayTimeIntervalBuilder : public NumericBuilder { + public: + using DayMilliseconds = DayTimeIntervalType::DayMilliseconds; + + explicit DayTimeIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : DayTimeIntervalBuilder(day_time_interval(), pool, alignment) {} + + explicit DayTimeIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +class ARROW_EXPORT MonthDayNanoIntervalBuilder + : public NumericBuilder { + public: + using MonthDayNanos = MonthDayNanoIntervalType::MonthDayNanos; + + explicit MonthDayNanoIntervalBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : MonthDayNanoIntervalBuilder(month_day_nano_interval(), pool, alignment) {} + + explicit MonthDayNanoIntervalBuilder(std::shared_ptr type, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : NumericBuilder(type, pool, alignment) {} +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h new file mode 100644 index 0000000000000000000000000000000000000000..718ef4c32cebef1d30e4f7c036a7ab8f4b333e4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_union.h @@ -0,0 +1,254 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_nested.h" +#include "arrow/array/builder_base.h" +#include "arrow/array/data.h" +#include "arrow/buffer_builder.h" +#include "arrow/memory_pool.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \addtogroup nested-builders +/// +/// @{ + +/// \brief Base class for union array builds. +/// +/// Note that while we subclass ArrayBuilder, as union types do not have a +/// validity bitmap, the bitmap builder member of ArrayBuilder is not used. +class ARROW_EXPORT BasicUnionBuilder : public ArrayBuilder { + public: + Status FinishInternal(std::shared_ptr* out) override; + + /// \cond FALSE + using ArrayBuilder::Finish; + /// \endcond + + Status Finish(std::shared_ptr* out) { return FinishTyped(out); } + + /// \brief Make a new child builder available to the UnionArray + /// + /// \param[in] new_child the child builder + /// \param[in] field_name the name of the field in the union array type + /// if type inference is used + /// \return child index, which is the "type" argument that needs + /// to be passed to the "Append" method to add a new element to + /// the union array. + int8_t AppendChild(const std::shared_ptr& new_child, + const std::string& field_name = ""); + + std::shared_ptr type() const override; + + int64_t length() const override { return types_builder_.length(); } + + protected: + BasicUnionBuilder(MemoryPool* pool, int64_t alignment, + const std::vector>& children, + const std::shared_ptr& type); + + int8_t NextTypeId(); + + std::vector> child_fields_; + std::vector type_codes_; + UnionMode::type mode_; + + std::vector type_id_to_children_; + std::vector type_id_to_child_id_; + // for all type_id < dense_type_id_, type_id_to_children_[type_id] != nullptr + int8_t dense_type_id_ = 0; + TypedBufferBuilder types_builder_; +}; + +/// \class DenseUnionBuilder +/// +/// This API is EXPERIMENTAL. +class ARROW_EXPORT DenseUnionBuilder : public BasicUnionBuilder { + public: + /// Use this constructor to initialize the UnionBuilder with no child builders, + /// allowing type to be inferred. You will need to call AppendChild for each of the + /// children builders you want to use. + explicit DenseUnionBuilder(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, {}, dense_union(FieldVector{})), + offsets_builder_(pool, alignment) {} + + /// Use this constructor to specify the type explicitly. + /// You can still add child builders to the union after using this constructor + DenseUnionBuilder(MemoryPool* pool, + const std::vector>& children, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, children, type), + offsets_builder_(pool, alignment) {} + + Status AppendNull() final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(static_cast(child_builder->length()))); + // Append a null arbitrarily to the first child + return child_builder->AppendNull(); + } + + Status AppendNulls(int64_t length) final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(length, static_cast(child_builder->length()))); + // Append just a single null to the first child + return child_builder->AppendNull(); + } + + Status AppendEmptyValue() final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(static_cast(child_builder->length()))); + // Append an empty value arbitrarily to the first child + return child_builder->AppendEmptyValue(); + } + + Status AppendEmptyValues(int64_t length) final { + const int8_t first_child_code = type_codes_[0]; + ArrayBuilder* child_builder = type_id_to_children_[first_child_code]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK( + offsets_builder_.Append(length, static_cast(child_builder->length()))); + // Append just a single empty value to the first child + return child_builder->AppendEmptyValue(); + } + + /// \brief Append an element to the UnionArray. This must be followed + /// by an append to the appropriate child builder. + /// + /// \param[in] next_type type_id of the child to which the next value will be appended. + /// + /// The corresponding child builder must be appended to independently after this method + /// is called. + Status Append(int8_t next_type) { + ARROW_RETURN_NOT_OK(types_builder_.Append(next_type)); + if (type_id_to_children_[next_type]->length() == kListMaximumElements) { + return Status::CapacityError( + "a dense UnionArray cannot contain more than 2^31 - 1 elements from a single " + "child"); + } + auto offset = static_cast(type_id_to_children_[next_type]->length()); + return offsets_builder_.Append(offset); + } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; + + Status FinishInternal(std::shared_ptr* out) override; + + private: + TypedBufferBuilder offsets_builder_; +}; + +/// \class SparseUnionBuilder +/// +/// This API is EXPERIMENTAL. +class ARROW_EXPORT SparseUnionBuilder : public BasicUnionBuilder { + public: + /// Use this constructor to initialize the UnionBuilder with no child builders, + /// allowing type to be inferred. You will need to call AppendChild for each of the + /// children builders you want to use. + explicit SparseUnionBuilder(MemoryPool* pool, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, {}, sparse_union(FieldVector{})) {} + + /// Use this constructor to specify the type explicitly. + /// You can still add child builders to the union after using this constructor + SparseUnionBuilder(MemoryPool* pool, + const std::vector>& children, + const std::shared_ptr& type, + int64_t alignment = kDefaultBufferAlignment) + : BasicUnionBuilder(pool, alignment, children, type) {} + + /// \brief Append a null value. + /// + /// A null is appended to the first child, empty values to the other children. + Status AppendNull() final { + const auto first_child_code = type_codes_[0]; + ARROW_RETURN_NOT_OK(types_builder_.Append(first_child_code)); + ARROW_RETURN_NOT_OK(type_id_to_children_[first_child_code]->AppendNull()); + for (int i = 1; i < static_cast(type_codes_.size()); ++i) { + ARROW_RETURN_NOT_OK(type_id_to_children_[type_codes_[i]]->AppendEmptyValue()); + } + return Status::OK(); + } + + /// \brief Append multiple null values. + /// + /// Nulls are appended to the first child, empty values to the other children. + Status AppendNulls(int64_t length) final { + const auto first_child_code = type_codes_[0]; + ARROW_RETURN_NOT_OK(types_builder_.Append(length, first_child_code)); + ARROW_RETURN_NOT_OK(type_id_to_children_[first_child_code]->AppendNulls(length)); + for (int i = 1; i < static_cast(type_codes_.size()); ++i) { + ARROW_RETURN_NOT_OK( + type_id_to_children_[type_codes_[i]]->AppendEmptyValues(length)); + } + return Status::OK(); + } + + Status AppendEmptyValue() final { + ARROW_RETURN_NOT_OK(types_builder_.Append(type_codes_[0])); + for (int8_t code : type_codes_) { + ARROW_RETURN_NOT_OK(type_id_to_children_[code]->AppendEmptyValue()); + } + return Status::OK(); + } + + Status AppendEmptyValues(int64_t length) final { + ARROW_RETURN_NOT_OK(types_builder_.Append(length, type_codes_[0])); + for (int8_t code : type_codes_) { + ARROW_RETURN_NOT_OK(type_id_to_children_[code]->AppendEmptyValues(length)); + } + return Status::OK(); + } + + /// \brief Append an element to the UnionArray. This must be followed + /// by an append to the appropriate child builder. + /// + /// \param[in] next_type type_id of the child to which the next value will be appended. + /// + /// The corresponding child builder must be appended to independently after this method + /// is called, and all other child builders must have null or empty value appended. + Status Append(int8_t next_type) { return types_builder_.Append(next_type); } + + Status AppendArraySlice(const ArraySpan& array, int64_t offset, + int64_t length) override; +}; + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h new file mode 100644 index 0000000000000000000000000000000000000000..d8a6663cec5803eafdd7c3bce47d8f456ee89e3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/data.h @@ -0,0 +1,634 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include // IWYU pragma: export +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" +#include "arrow/util/span.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { +// ---------------------------------------------------------------------- +// Null handling for types without a validity bitmap and the dictionary type + +ARROW_EXPORT bool IsNullSparseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullDenseUnion(const ArrayData& data, int64_t i); +ARROW_EXPORT bool IsNullRunEndEncoded(const ArrayData& data, int64_t i); + +ARROW_EXPORT bool UnionMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool RunEndEncodedMayHaveLogicalNulls(const ArrayData& data); +ARROW_EXPORT bool DictionaryMayHaveLogicalNulls(const ArrayData& data); +} // namespace internal + +// When slicing, we do not know the null count of the sliced range without +// doing some computation. To avoid doing this eagerly, we set the null count +// to -1 (any negative number will do). When Array::null_count is called the +// first time, the null count will be computed. See ARROW-33 +constexpr int64_t kUnknownNullCount = -1; + +// ---------------------------------------------------------------------- +// Generic array data container + +/// \class ArrayData +/// \brief Mutable container for generic Arrow array data +/// +/// This data structure is a self-contained representation of the memory and +/// metadata inside an Arrow array data structure (called vectors in Java). The +/// classes arrow::Array and its subclasses provide strongly-typed accessors +/// with support for the visitor pattern and other affordances. +/// +/// This class is designed for easy internal data manipulation, analytical data +/// processing, and data transport to and from IPC messages. For example, we +/// could cast from int64 to float64 like so: +/// +/// Int64Array arr = GetMyData(); +/// auto new_data = arr.data()->Copy(); +/// new_data->type = arrow::float64(); +/// DoubleArray double_arr(new_data); +/// +/// This object is also useful in an analytics setting where memory may be +/// reused. For example, if we had a group of operations all returning doubles, +/// say: +/// +/// Log(Sqrt(Expr(arr))) +/// +/// Then the low-level implementations of each of these functions could have +/// the signatures +/// +/// void Log(const ArrayData& values, ArrayData* out); +/// +/// As another example a function may consume one or more memory buffers in an +/// input array and replace them with newly-allocated data, changing the output +/// data type as well. +struct ARROW_EXPORT ArrayData { + ArrayData() = default; + + ArrayData(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : type(std::move(type)), length(length), null_count(null_count), offset(offset) {} + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); + } + + ArrayData(std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0) + : ArrayData(std::move(type), length, null_count, offset) { + this->buffers = std::move(buffers); + this->child_data = std::move(child_data); + } + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + std::vector> buffers, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + int64_t null_count = kUnknownNullCount, int64_t offset = 0); + + static std::shared_ptr Make( + std::shared_ptr type, int64_t length, + std::vector> buffers, + std::vector> child_data, + std::shared_ptr dictionary, int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + static std::shared_ptr Make(std::shared_ptr type, int64_t length, + int64_t null_count = kUnknownNullCount, + int64_t offset = 0); + + // Move constructor + ArrayData(ArrayData&& other) noexcept + : type(std::move(other.type)), + length(other.length), + offset(other.offset), + buffers(std::move(other.buffers)), + child_data(std::move(other.child_data)), + dictionary(std::move(other.dictionary)) { + SetNullCount(other.null_count); + } + + // Copy constructor + ArrayData(const ArrayData& other) noexcept + : type(other.type), + length(other.length), + offset(other.offset), + buffers(other.buffers), + child_data(other.child_data), + dictionary(other.dictionary) { + SetNullCount(other.null_count); + } + + // Move assignment + ArrayData& operator=(ArrayData&& other) { + type = std::move(other.type); + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = std::move(other.buffers); + child_data = std::move(other.child_data); + dictionary = std::move(other.dictionary); + return *this; + } + + // Copy assignment + ArrayData& operator=(const ArrayData& other) { + type = other.type; + length = other.length; + SetNullCount(other.null_count); + offset = other.offset; + buffers = other.buffers; + child_data = other.child_data; + dictionary = other.dictionary; + return *this; + } + + std::shared_ptr Copy() const { return std::make_shared(*this); } + + /// \brief Copy all buffers and children recursively to destination MemoryManager + /// + /// This utilizes MemoryManager::CopyBuffer to create a new ArrayData object + /// recursively copying the buffers and all child buffers to the destination + /// memory manager. This includes dictionaries if applicable. + Result> CopyTo( + const std::shared_ptr& to) const; + /// \brief View or Copy this ArrayData to destination memory manager. + /// + /// Tries to view the buffer contents on the given memory manager's device + /// if possible (to avoid a copy) but falls back to copying if a no-copy view + /// isn't supported. + Result> ViewOrCopyTo( + const std::shared_ptr& to) const; + + bool IsNull(int64_t i) const { return !IsValid(i); } + + bool IsValid(int64_t i) const { + if (buffers[0] != NULLPTR) { + return bit_util::GetBit(buffers[0]->data(), i + offset); + } + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !internal::IsNullSparseUnion(*this, i); + } + if (type == Type::DENSE_UNION) { + return !internal::IsNullDenseUnion(*this, i); + } + if (type == Type::RUN_END_ENCODED) { + return !internal::IsNullRunEndEncoded(*this, i); + } + return null_count.load() != length; + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, offset); + } + + // Like GetValues, but returns NULLPTR instead of aborting if the underlying + // buffer is not a CPU buffer. + template + inline const T* GetValuesSafe(int i, int64_t absolute_offset) const { + if (buffers[i] && buffers[i]->is_cpu()) { + return reinterpret_cast(buffers[i]->data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline const T* GetValuesSafe(int i) const { + return GetValuesSafe(i, offset); + } + + // Access a buffer's data as a typed C pointer + template + inline T* GetMutableValues(int i, int64_t absolute_offset) { + if (buffers[i]) { + return reinterpret_cast(buffers[i]->mutable_data()) + absolute_offset; + } else { + return NULLPTR; + } + } + + template + inline T* GetMutableValues(int i) { + return GetMutableValues(i, offset); + } + + /// \brief Construct a zero-copy slice of the data with the given offset and length + std::shared_ptr Slice(int64_t offset, int64_t length) const; + + /// \brief Input-checking variant of Slice + /// + /// An Invalid Status is returned if the requested slice falls out of bounds. + /// Note that unlike Slice, `length` isn't clamped to the available buffer size. + Result> SliceSafe(int64_t offset, int64_t length) const; + + void SetNullCount(int64_t v) { null_count.store(v); } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the data has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known. + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count.load() != 0 && buffers[0] != NULLPTR; + } + + /// \brief Return true if the data has a validity bitmap + bool HasValidityBitmap() const { return buffers[0] != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// This is not a drop-in replacement for MayHaveNulls, as historically + /// MayHaveNulls() has been used to check for the presence of a validity + /// bitmap that needs to be checked. + /// + /// Code that previously used MayHaveNulls() and then dealt with the validity + /// bitmap directly can be fixed to handle all types correctly without + /// performance degradation when handling most types by adopting + /// HasValidityBitmap and MayHaveLogicalNulls. + /// + /// Before: + /// + /// uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// if (validity && !bit_util::GetBit(validity, i)) { + /// continue; // skip a NULL + /// } + /// ... + /// } + /// + /// After: + /// + /// bool all_valid = !array.MayHaveLogicalNulls(); + /// uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR; + /// for (int64_t i = 0; i < array.length; ++i) { + /// bool is_valid = all_valid || + /// (validity && bit_util::GetBit(validity, i)) || + /// array.IsValid(i); + /// if (!is_valid) { + /// continue; // skip a NULL + /// } + /// ... + /// } + bool MayHaveLogicalNulls() const { + if (buffers[0] != NULLPTR) { + return null_count.load() != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return internal::UnionMayHaveLogicalNulls(*this); + } + if (t == Type::RUN_END_ENCODED) { + return internal::RunEndEncodedMayHaveLogicalNulls(*this); + } + if (t == Type::DICTIONARY) { + return internal::DictionaryMayHaveLogicalNulls(*this); + } + return null_count.load() != 0; + } + + /// \brief Computes the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + std::shared_ptr type; + int64_t length = 0; + mutable std::atomic null_count{0}; + // The logical start point into the physical buffers (in values, not bytes). + // Note that, for child data, this must be *added* to the child data's own offset. + int64_t offset = 0; + std::vector> buffers; + std::vector> child_data; + + // The dictionary for this Array, if any. Only used for dictionary type + std::shared_ptr dictionary; +}; + +/// \brief A non-owning Buffer reference +struct ARROW_EXPORT BufferSpan { + // It is the user of this class's responsibility to ensure that + // buffers that were const originally are not written to + // accidentally. + uint8_t* data = NULLPTR; + int64_t size = 0; + // Pointer back to buffer that owns this memory + const std::shared_ptr* owner = NULLPTR; + + template + const T* data_as() const { + return reinterpret_cast(data); + } + template + T* mutable_data_as() { + return reinterpret_cast(data); + } +}; + +/// \brief EXPERIMENTAL: A non-owning ArrayData reference that is cheaply +/// copyable and does not contain any shared_ptr objects. Do not use in public +/// APIs aside from compute kernels for now +struct ARROW_EXPORT ArraySpan { + const DataType* type = NULLPTR; + int64_t length = 0; + mutable int64_t null_count = kUnknownNullCount; + int64_t offset = 0; + BufferSpan buffers[3]; + + ArraySpan() = default; + + explicit ArraySpan(const DataType* type, int64_t length) : type(type), length(length) {} + + ArraySpan(const ArrayData& data) { // NOLINT implicit conversion + SetMembers(data); + } + explicit ArraySpan(const Scalar& data) { FillFromScalar(data); } + + /// If dictionary-encoded, put dictionary in the first entry + std::vector child_data; + + /// \brief Populate ArraySpan to look like an array of length 1 pointing at + /// the data members of a Scalar value + void FillFromScalar(const Scalar& value); + + void SetMembers(const ArrayData& data); + + void SetBuffer(int index, const std::shared_ptr& buffer) { + this->buffers[index].data = const_cast(buffer->data()); + this->buffers[index].size = buffer->size(); + this->buffers[index].owner = &buffer; + } + + const ArraySpan& dictionary() const { return child_data[0]; } + + /// \brief Return the number of buffers (out of 3) that are used to + /// constitute this array + int num_buffers() const; + + // Access a buffer's data as a typed C pointer + template + inline T* GetValues(int i, int64_t absolute_offset) { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline T* GetValues(int i) { + return GetValues(i, this->offset); + } + + // Access a buffer's data as a typed C pointer + template + inline const T* GetValues(int i, int64_t absolute_offset) const { + return reinterpret_cast(buffers[i].data) + absolute_offset; + } + + template + inline const T* GetValues(int i) const { + return GetValues(i, this->offset); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) const { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].data_as() + this->offset, length); + } + + /// \brief Access a buffer's data as a span + /// + /// \param i The buffer index + /// \param length The required length (in number of typed values) of the requested span + /// \pre i > 0 + /// \pre length <= the length of the buffer (in number of values) that's expected for + /// this array type + /// \return A span of the requested length + template + util::span GetSpan(int i, int64_t length) { + const int64_t buffer_length = buffers[i].size / static_cast(sizeof(T)); + assert(i > 0 && length + offset <= buffer_length); + ARROW_UNUSED(buffer_length); + return util::span(buffers[i].mutable_data_as() + this->offset, length); + } + + inline bool IsNull(int64_t i) const { return !IsValid(i); } + + inline bool IsValid(int64_t i) const { + if (this->buffers[0].data != NULLPTR) { + return bit_util::GetBit(this->buffers[0].data, i + this->offset); + } else { + const auto type = this->type->id(); + if (type == Type::SPARSE_UNION) { + return !IsNullSparseUnion(i); + } + if (type == Type::DENSE_UNION) { + return !IsNullDenseUnion(i); + } + if (type == Type::RUN_END_ENCODED) { + return !IsNullRunEndEncoded(i); + } + return this->null_count != this->length; + } + } + + std::shared_ptr ToArrayData() const; + + std::shared_ptr ToArray() const; + + std::shared_ptr GetBuffer(int index) const { + const BufferSpan& buf = this->buffers[index]; + if (buf.owner) { + return *buf.owner; + } else if (buf.data != NULLPTR) { + // Buffer points to some memory without an owning buffer + return std::make_shared(buf.data, buf.size); + } else { + return NULLPTR; + } + } + + void SetSlice(int64_t offset, int64_t length) { + this->offset = offset; + this->length = length; + if (this->type->id() == Type::NA) { + this->null_count = this->length; + } else if (this->MayHaveNulls()) { + this->null_count = kUnknownNullCount; + } else { + this->null_count = 0; + } + } + + /// \brief Return physical null count, or compute and set it if it's not known + int64_t GetNullCount() const; + + /// \brief Return true if the array has a validity bitmap and the physical null + /// count is known to be non-zero or not yet known + /// + /// Note that this is not the same as MayHaveLogicalNulls, which also checks + /// for the presence of nulls in child data for types like unions and run-end + /// encoded types. + /// + /// \see HasValidityBitmap + /// \see MayHaveLogicalNulls + bool MayHaveNulls() const { + // If an ArrayData is slightly malformed it may have kUnknownNullCount set + // but no buffer + return null_count != 0 && buffers[0].data != NULLPTR; + } + + /// \brief Return true if the array has a validity bitmap + bool HasValidityBitmap() const { return buffers[0].data != NULLPTR; } + + /// \brief Return true if the validity bitmap may have 0's in it, or if the + /// child arrays (in the case of types without a validity bitmap) may have + /// nulls, or if the dictionary of dictionay array may have nulls. + /// + /// \see ArrayData::MayHaveLogicalNulls + bool MayHaveLogicalNulls() const { + if (buffers[0].data != NULLPTR) { + return null_count != 0; + } + const auto t = type->id(); + if (t == Type::SPARSE_UNION || t == Type::DENSE_UNION) { + return UnionMayHaveLogicalNulls(); + } + if (t == Type::RUN_END_ENCODED) { + return RunEndEncodedMayHaveLogicalNulls(); + } + if (t == Type::DICTIONARY) { + return DictionaryMayHaveLogicalNulls(); + } + return null_count != 0; + } + + /// \brief Compute the logical null count for arrays of all types including + /// those that do not have a validity bitmap like union and run-end encoded + /// arrays + /// + /// If the array has a validity bitmap, this function behaves the same as + /// GetNullCount. For types that have no validity bitmap, this function will + /// recompute the logical null count every time it is called. + /// + /// \see GetNullCount + int64_t ComputeLogicalNullCount() const; + + /// Some DataTypes (StringView, BinaryView) may have an arbitrary number of variadic + /// buffers. Since ArraySpan only has 3 buffers, we pack the variadic buffers into + /// buffers[2]; IE buffers[2].data points to the first shared_ptr of the + /// variadic set and buffers[2].size is the number of variadic buffers times + /// sizeof(shared_ptr). + /// + /// \see HasVariadicBuffers + util::span> GetVariadicBuffers() const; + bool HasVariadicBuffers() const; + + private: + ARROW_FRIEND_EXPORT friend bool internal::IsNullRunEndEncoded(const ArrayData& span, + int64_t i); + + bool IsNullSparseUnion(int64_t i) const; + bool IsNullDenseUnion(int64_t i) const; + + /// \brief Return true if the value at logical index i is null + /// + /// This function uses binary-search, so it has a O(log N) cost. + /// Iterating over the whole array and calling IsNull is O(N log N), so + /// for better performance it is recommended to use a + /// ree_util::RunEndEncodedArraySpan to iterate run by run instead. + bool IsNullRunEndEncoded(int64_t i) const; + + bool UnionMayHaveLogicalNulls() const; + bool RunEndEncodedMayHaveLogicalNulls() const; + bool DictionaryMayHaveLogicalNulls() const; +}; + +namespace internal { + +void FillZeroLengthArray(const DataType* type, ArraySpan* span); + +/// Construct a zero-copy view of this ArrayData with the given type. +/// +/// This method checks if the types are layout-compatible. +/// Nested types are traversed in depth-first order. Data buffers must have +/// the same item sizes, even though the logical types may be different. +/// An error is returned if the types are not layout-compatible. +ARROW_EXPORT +Result> GetArrayView(const std::shared_ptr& data, + const std::shared_ptr& type); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h new file mode 100644 index 0000000000000000000000000000000000000000..a405164b333f3b21a17e8414ef59a8a628c28579 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/diff.h @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/array/array_nested.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief Compare two arrays, returning an edit script which expresses the difference +/// between them +/// +/// An edit script is an array of struct(insert: bool, run_length: int64_t). +/// Each element of "insert" determines whether an element was inserted into (true) +/// or deleted from (false) base. Each insertion or deletion is followed by a run of +/// elements which are unchanged from base to target; the length of this run is stored +/// in "run_length". (Note that the edit script begins and ends with a run of shared +/// elements but both fields of the struct must have the same length. To accommodate this +/// the first element of "insert" should be ignored.) +/// +/// For example for base "hlloo" and target "hello", the edit script would be +/// [ +/// {"insert": false, "run_length": 1}, // leading run of length 1 ("h") +/// {"insert": true, "run_length": 3}, // insert("e") then a run of length 3 ("llo") +/// {"insert": false, "run_length": 0} // delete("o") then an empty run +/// ] +/// +/// Diffing arrays containing nulls is not currently supported. +/// +/// \param[in] base baseline for comparison +/// \param[in] target an array of identical type to base whose elements differ from base's +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return an edit script array which can be applied to base to produce target +ARROW_EXPORT +Result> Diff(const Array& base, const Array& target, + MemoryPool* pool = default_memory_pool()); + +/// \brief visitor interface for easy traversal of an edit script +/// +/// visitor will be called for each hunk of insertions and deletions. +ARROW_EXPORT Status VisitEditScript( + const Array& edits, + const std::function& visitor); + +/// \brief return a function which will format an edit script in unified +/// diff format to os, given base and target arrays of type +ARROW_EXPORT Result< + std::function> +MakeUnifiedDiffFormatter(const DataType& type, std::ostream* os); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h new file mode 100644 index 0000000000000000000000000000000000000000..3ebfa0a51edce21ca585862b1dbb074b6cf8d9c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/array/validate.h @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// Internal functions implementing Array::Validate() and friends. + +// O(1) array metadata validation + +ARROW_EXPORT +Status ValidateArray(const Array& array); + +ARROW_EXPORT +Status ValidateArray(const ArrayData& data); + +// O(N) array data validation. +// Note that, starting from 7.0.0, "full" routines also validate metadata. +// Before, ValidateArray() needed to be called before ValidateArrayFull() +// to ensure metadata correctness, otherwise invalid memory accesses +// may occur. + +ARROW_EXPORT +Status ValidateArrayFull(const Array& array); + +ARROW_EXPORT +Status ValidateArrayFull(const ArrayData& data); + +ARROW_EXPORT +Status ValidateUTF8(const Array& array); + +ARROW_EXPORT +Status ValidateUTF8(const ArrayData& data); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h new file mode 100644 index 0000000000000000000000000000000000000000..b701d9928691f42b70a201569feb27d5ea86f8cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +/// \defgroup compute-functions Abstract compute function API +/// @{ +/// @} + +/// \defgroup compute-concrete-options Concrete option classes for compute functions +/// @{ +/// @} + +#include "arrow/compute/api_aggregate.h" // IWYU pragma: export +#include "arrow/compute/api_scalar.h" // IWYU pragma: export +#include "arrow/compute/api_vector.h" // IWYU pragma: export +#include "arrow/compute/cast.h" // IWYU pragma: export +#include "arrow/compute/function.h" // IWYU pragma: export +#include "arrow/compute/function_options.h" // IWYU pragma: export +#include "arrow/compute/kernel.h" // IWYU pragma: export +#include "arrow/compute/registry.h" // IWYU pragma: export +#include "arrow/datum.h" // IWYU pragma: export + +#include "arrow/compute/expression.h" // IWYU pragma: export + +/// \defgroup execnode-row Utilities for working with data in a row-major format +/// @{ +/// @} + +#include "arrow/compute/row/grouper.h" // IWYU pragma: export + +/// \defgroup acero-internals Acero internals, useful for those extending Acero +/// @{ +/// @} + +#include "arrow/compute/exec.h" // IWYU pragma: export diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h new file mode 100644 index 0000000000000000000000000000000000000000..18e56092dda2a5f8f997de5b5cd1c81262e77a8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/compute/function.h" +#include "arrow/compute/function_options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; + +namespace compute { + +class ExecContext; + +/// \addtogroup compute-concrete-options +/// @{ + +class ARROW_EXPORT CastOptions : public FunctionOptions { + public: + explicit CastOptions(bool safe = true); + + static constexpr char const kTypeName[] = "CastOptions"; + static CastOptions Safe(TypeHolder to_type = {}) { + CastOptions safe(true); + safe.to_type = std::move(to_type); + return safe; + } + + static CastOptions Unsafe(TypeHolder to_type = {}) { + CastOptions unsafe(false); + unsafe.to_type = std::move(to_type); + return unsafe; + } + + // Type being casted to. May be passed separate to eager function + // compute::Cast + TypeHolder to_type; + + bool allow_int_overflow; + bool allow_time_truncate; + bool allow_time_overflow; + bool allow_decimal_truncate; + bool allow_float_truncate; + // Indicate if conversions from Binary/FixedSizeBinary to string must + // validate the utf8 payload. + bool allow_invalid_utf8; + + /// true if the safety options all match CastOptions::Safe + /// + /// Note, if this returns false it does not mean is_unsafe will return true + bool is_safe() const; + /// true if the safety options all match CastOptions::Unsafe + /// + /// Note, if this returns false it does not mean is_safe will return true + bool is_unsafe() const; +}; + +/// @} + +/// \brief Return true if a cast function is defined +ARROW_EXPORT +bool CanCast(const DataType& from_type, const DataType& to_type); + +// ---------------------------------------------------------------------- +// Convenience invocation APIs for a number of kernels + +/// \brief Cast from one array type to another +/// \param[in] value array to cast +/// \param[in] to_type type to cast to +/// \param[in] options casting options +/// \param[in] ctx the function execution context, optional +/// \return the resulting array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> Cast(const Array& value, const TypeHolder& to_type, + const CastOptions& options = CastOptions::Safe(), + ExecContext* ctx = NULLPTR); + +/// \brief Cast from one array type to another +/// \param[in] value array to cast +/// \param[in] options casting options. The "to_type" field must be populated +/// \param[in] ctx the function execution context, optional +/// \return the resulting array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Cast(const Datum& value, const CastOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Cast from one value to another +/// \param[in] value datum to cast +/// \param[in] to_type type to cast to +/// \param[in] options casting options +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Cast(const Datum& value, const TypeHolder& to_type, + const CastOptions& options = CastOptions::Safe(), + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h new file mode 100644 index 0000000000000000000000000000000000000000..2b86f642166e2ccb8a49e3842d98120d59cb25e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h @@ -0,0 +1,409 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle. + +#pragma once + +#include +#include +#include + +#include "arrow/compute/kernel.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-functions +/// @{ + +/// \brief Contains the number of required arguments for the function. +/// +/// Naming conventions taken from https://en.wikipedia.org/wiki/Arity. +struct ARROW_EXPORT Arity { + /// \brief A function taking no arguments + static Arity Nullary() { return Arity(0, false); } + + /// \brief A function taking 1 argument + static Arity Unary() { return Arity(1, false); } + + /// \brief A function taking 2 arguments + static Arity Binary() { return Arity(2, false); } + + /// \brief A function taking 3 arguments + static Arity Ternary() { return Arity(3, false); } + + /// \brief A function taking a variable number of arguments + /// + /// \param[in] min_args the minimum number of arguments required when + /// invoking the function + static Arity VarArgs(int min_args = 0) { return Arity(min_args, true); } + + // NOTE: the 0-argument form (default constructor) is required for Cython + explicit Arity(int num_args = 0, bool is_varargs = false) + : num_args(num_args), is_varargs(is_varargs) {} + + /// The number of required arguments (or the minimum number for varargs + /// functions). + int num_args; + + /// If true, then the num_args is the minimum number of required arguments. + bool is_varargs = false; +}; + +struct ARROW_EXPORT FunctionDoc { + /// \brief A one-line summary of the function, using a verb. + /// + /// For example, "Add two numeric arrays or scalars". + std::string summary; + + /// \brief A detailed description of the function, meant to follow the summary. + std::string description; + + /// \brief Symbolic names (identifiers) for the function arguments. + /// + /// Some bindings may use this to generate nicer function signatures. + std::vector arg_names; + + // TODO add argument descriptions? + + /// \brief Name of the options class, if any. + std::string options_class; + + /// \brief Whether options are required for function execution + /// + /// If false, then either the function does not have an options class + /// or there is a usable default options value. + bool options_required; + + FunctionDoc() = default; + + FunctionDoc(std::string summary, std::string description, + std::vector arg_names, std::string options_class = "", + bool options_required = false) + : summary(std::move(summary)), + description(std::move(description)), + arg_names(std::move(arg_names)), + options_class(std::move(options_class)), + options_required(options_required) {} + + static const FunctionDoc& Empty(); +}; + +/// \brief An executor of a function with a preconfigured kernel +class ARROW_EXPORT FunctionExecutor { + public: + virtual ~FunctionExecutor() = default; + /// \brief Initialize or re-initialize the preconfigured kernel + /// + /// This method may be called zero or more times. Depending on how + /// the FunctionExecutor was obtained, it may already have been initialized. + virtual Status Init(const FunctionOptions* options = NULLPTR, + ExecContext* exec_ctx = NULLPTR) = 0; + /// \brief Execute the preconfigured kernel with arguments that must fit it + /// + /// The method requires the arguments be castable to the preconfigured types. + /// + /// \param[in] args Arguments to execute the function on + /// \param[in] length Length of arguments batch or -1 to default it. If the + /// function has no parameters, this determines the batch length, defaulting + /// to 0. Otherwise, if the function is scalar, this must equal the argument + /// batch's inferred length or be -1 to default to it. This is ignored for + /// vector functions. + virtual Result Execute(const std::vector& args, int64_t length = -1) = 0; +}; + +/// \brief Base class for compute functions. Function implementations contain a +/// collection of "kernels" which are implementations of the function for +/// specific argument types. Selecting a viable kernel for executing a function +/// is referred to as "dispatching". +class ARROW_EXPORT Function { + public: + /// \brief The kind of function, which indicates in what contexts it is + /// valid for use. + enum Kind { + /// A function that performs scalar data operations on whole arrays of + /// data. Can generally process Array or Scalar values. The size of the + /// output will be the same as the size (or broadcasted size, in the case + /// of mixing Array and Scalar inputs) of the input. + SCALAR, + + /// A function with array input and output whose behavior depends on the + /// values of the entire arrays passed, rather than the value of each scalar + /// value. + VECTOR, + + /// A function that computes scalar summary statistics from array input. + SCALAR_AGGREGATE, + + /// A function that computes grouped summary statistics from array input + /// and an array of group identifiers. + HASH_AGGREGATE, + + /// A function that dispatches to other functions and does not contain its + /// own kernels. + META + }; + + virtual ~Function() = default; + + /// \brief The name of the kernel. The registry enforces uniqueness of names. + const std::string& name() const { return name_; } + + /// \brief The kind of kernel, which indicates in what contexts it is valid + /// for use. + Function::Kind kind() const { return kind_; } + + /// \brief Contains the number of arguments the function requires, or if the + /// function accepts variable numbers of arguments. + const Arity& arity() const { return arity_; } + + /// \brief Return the function documentation + const FunctionDoc& doc() const { return doc_; } + + /// \brief Returns the number of registered kernels for this function. + virtual int num_kernels() const = 0; + + /// \brief Return a kernel that can execute the function given the exact + /// argument types (without implicit type casts). + /// + /// NB: This function is overridden in CastFunction. + virtual Result DispatchExact(const std::vector& types) const; + + /// \brief Return a best-match kernel that can execute the function given the argument + /// types, after implicit casts are applied. + /// + /// \param[in,out] values Argument types. An element may be modified to + /// indicate that the returned kernel only approximately matches the input + /// value descriptors; callers are responsible for casting inputs to the type + /// required by the kernel. + virtual Result DispatchBest(std::vector* values) const; + + /// \brief Get a function executor with a best-matching kernel + /// + /// The returned executor will by default work with the default FunctionOptions + /// and KernelContext. If you want to change that, call `FunctionExecutor::Init`. + virtual Result> GetBestExecutor( + std::vector inputs) const; + + /// \brief Execute the function eagerly with the passed input arguments with + /// kernel dispatch, batch iteration, and memory allocation details taken + /// care of. + /// + /// If the `options` pointer is null, then `default_options()` will be used. + /// + /// This function can be overridden in subclasses. + virtual Result Execute(const std::vector& args, + const FunctionOptions* options, ExecContext* ctx) const; + + virtual Result Execute(const ExecBatch& batch, const FunctionOptions* options, + ExecContext* ctx) const; + + /// \brief Returns the default options for this function. + /// + /// Whatever option semantics a Function has, implementations must guarantee + /// that default_options() is valid to pass to Execute as options. + const FunctionOptions* default_options() const { return default_options_; } + + virtual Status Validate() const; + + /// \brief Returns the pure property for this function. + /// + /// Impure functions are those that may return different results for the same + /// input arguments. For example, a function that returns a random number is + /// not pure. An expression containing only pure functions can be simplified by + /// pre-evaluating any sub-expressions that have constant arguments. + virtual bool is_pure() const { return true; } + + protected: + Function(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options) + : name_(std::move(name)), + kind_(kind), + arity_(arity), + doc_(std::move(doc)), + default_options_(default_options) {} + + Status CheckArity(size_t num_args) const; + + std::string name_; + Function::Kind kind_; + Arity arity_; + const FunctionDoc doc_; + const FunctionOptions* default_options_ = NULLPTR; +}; + +namespace detail { + +template +class FunctionImpl : public Function { + public: + /// \brief Return pointers to current-available kernels for inspection + std::vector kernels() const { + std::vector result; + for (const auto& kernel : kernels_) { + result.push_back(&kernel); + } + return result; + } + + int num_kernels() const override { return static_cast(kernels_.size()); } + + protected: + FunctionImpl(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options) + : Function(std::move(name), kind, arity, std::move(doc), default_options) {} + + std::vector kernels_; +}; + +/// \brief Look up a kernel in a function. If no Kernel is found, nullptr is returned. +ARROW_EXPORT +const Kernel* DispatchExactImpl(const Function* func, const std::vector&); + +/// \brief Return an error message if no Kernel is found. +ARROW_EXPORT +Status NoMatchingKernel(const Function* func, const std::vector&); + +} // namespace detail + +/// \brief A function that executes elementwise operations on arrays or +/// scalars, and therefore whose results generally do not depend on the order +/// of the values in the arguments. Accepts and returns arrays that are all of +/// the same size. These functions roughly correspond to the functions used in +/// SQL expressions. +class ARROW_EXPORT ScalarFunction : public detail::FunctionImpl { + public: + using KernelType = ScalarKernel; + + ScalarFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR, bool is_pure = true) + : detail::FunctionImpl(std::move(name), Function::SCALAR, arity, + std::move(doc), default_options), + is_pure_(is_pure) {} + + /// \brief Add a kernel with given input/output types, no required state + /// initialization, preallocation for fixed-width types, and default null + /// handling (intersect validity bitmaps of inputs). + Status AddKernel(std::vector in_types, OutputType out_type, + ArrayKernelExec exec, KernelInit init = NULLPTR); + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(ScalarKernel kernel); + + /// \brief Returns the pure property for this function. + bool is_pure() const override { return is_pure_; } + + private: + const bool is_pure_; +}; + +/// \brief A function that executes general array operations that may yield +/// outputs of different sizes or have results that depend on the whole array +/// contents. These functions roughly correspond to the functions found in +/// non-SQL array languages like APL and its derivatives. +class ARROW_EXPORT VectorFunction : public detail::FunctionImpl { + public: + using KernelType = VectorKernel; + + VectorFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), Function::VECTOR, arity, + std::move(doc), default_options) {} + + /// \brief Add a simple kernel with given input/output types, no required + /// state initialization, no data preallocation, and no preallocation of the + /// validity bitmap. + Status AddKernel(std::vector in_types, OutputType out_type, + ArrayKernelExec exec, KernelInit init = NULLPTR); + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(VectorKernel kernel); +}; + +class ARROW_EXPORT ScalarAggregateFunction + : public detail::FunctionImpl { + public: + using KernelType = ScalarAggregateKernel; + + ScalarAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), + Function::SCALAR_AGGREGATE, arity, + std::move(doc), default_options) {} + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(ScalarAggregateKernel kernel); +}; + +class ARROW_EXPORT HashAggregateFunction + : public detail::FunctionImpl { + public: + using KernelType = HashAggregateKernel; + + HashAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), + Function::HASH_AGGREGATE, arity, + std::move(doc), default_options) {} + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(HashAggregateKernel kernel); +}; + +/// \brief A function that dispatches to other functions. Must implement +/// MetaFunction::ExecuteImpl. +/// +/// For Array, ChunkedArray, and Scalar Datum kinds, may rely on the execution +/// of concrete Function types, but must handle other Datum kinds on its own. +class ARROW_EXPORT MetaFunction : public Function { + public: + int num_kernels() const override { return 0; } + + Result Execute(const std::vector& args, const FunctionOptions* options, + ExecContext* ctx) const override; + + Result Execute(const ExecBatch& batch, const FunctionOptions* options, + ExecContext* ctx) const override; + + protected: + virtual Result ExecuteImpl(const std::vector& args, + const FunctionOptions* options, + ExecContext* ctx) const = 0; + + MetaFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : Function(std::move(name), Function::META, arity, std::move(doc), + default_options) {} +}; + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h new file mode 100644 index 0000000000000000000000000000000000000000..88ec2fd2d0679b5c849549179aa652bec9b37b56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle. + +#pragma once + +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-functions +/// @{ + +/// \brief Extension point for defining options outside libarrow (but +/// still within this project). +class ARROW_EXPORT FunctionOptionsType { + public: + virtual ~FunctionOptionsType() = default; + + virtual const char* type_name() const = 0; + virtual std::string Stringify(const FunctionOptions&) const = 0; + virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0; + virtual Result> Serialize(const FunctionOptions&) const; + virtual Result> Deserialize( + const Buffer& buffer) const; + virtual std::unique_ptr Copy(const FunctionOptions&) const = 0; +}; + +/// \brief Base class for specifying options configuring a function's behavior, +/// such as error handling. +class ARROW_EXPORT FunctionOptions : public util::EqualityComparable { + public: + virtual ~FunctionOptions() = default; + + const FunctionOptionsType* options_type() const { return options_type_; } + const char* type_name() const { return options_type()->type_name(); } + + bool Equals(const FunctionOptions& other) const; + std::string ToString() const; + std::unique_ptr Copy() const; + /// \brief Serialize an options struct to a buffer. + Result> Serialize() const; + /// \brief Deserialize an options struct from a buffer. + /// Note: this will only look for `type_name` in the default FunctionRegistry; + /// to use a custom FunctionRegistry, look up the FunctionOptionsType, then + /// call FunctionOptionsType::Deserialize(). + static Result> Deserialize( + const std::string& type_name, const Buffer& buffer); + + protected: + explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {} + const FunctionOptionsType* options_type_; +}; + +ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h new file mode 100644 index 0000000000000000000000000000000000000000..61caa2b570dd31dc988d34406f9b05c3573333e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/compare.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +enum class SortOrder { + /// Arrange values in increasing order + Ascending, + /// Arrange values in decreasing order + Descending, +}; + +enum class NullPlacement { + /// Place nulls and NaNs before any non-null values. + /// NaNs will come after nulls. + AtStart, + /// Place nulls and NaNs after any non-null values. + /// NaNs will come before nulls. + AtEnd, +}; + +/// \brief One sort key for PartitionNthIndices (TODO) and SortIndices +class ARROW_EXPORT SortKey : public util::EqualityComparable { + public: + explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending) + : target(std::move(target)), order(order) {} + + bool Equals(const SortKey& other) const; + std::string ToString() const; + + /// A FieldRef targeting the sort column. + FieldRef target; + /// How to order by this sort key. + SortOrder order; +}; + +class ARROW_EXPORT Ordering : public util::EqualityComparable { + public: + Ordering(std::vector sort_keys, + NullPlacement null_placement = NullPlacement::AtStart) + : sort_keys_(std::move(sort_keys)), null_placement_(null_placement) {} + /// true if data ordered by other is also ordered by this + /// + /// For example, if data is ordered by [a, b, c] then it is also ordered + /// by [a, b] but not by [b, c] or [a, b, c, d]. + /// + /// [a, b].IsSuborderOf([a, b, c]) - true + /// [a, b, c].IsSuborderOf([a, b, c]) - true + /// [b, c].IsSuborderOf([a, b, c]) - false + /// [a, b, c, d].IsSuborderOf([a, b, c]) - false + /// + /// The implicit ordering is not a suborder of any other ordering and + /// no other ordering is a suborder of it. The implicit ordering is not a + /// suborder of itself. + /// + /// The unordered ordering is a suborder of all other orderings but no + /// other ordering is a suborder of it. The unordered ordering is a suborder + /// of itself. + /// + /// The unordered ordering is a suborder of the implicit ordering. + bool IsSuborderOf(const Ordering& other) const; + + bool Equals(const Ordering& other) const; + std::string ToString() const; + + bool is_implicit() const { return is_implicit_; } + bool is_unordered() const { return !is_implicit_ && sort_keys_.empty(); } + + const std::vector& sort_keys() const { return sort_keys_; } + NullPlacement null_placement() const { return null_placement_; } + + static const Ordering& Implicit() { + static const Ordering kImplicit(true); + return kImplicit; + } + + static const Ordering& Unordered() { + static const Ordering kUnordered(false); + // It is also possible to get an unordered ordering by passing in an empty vector + // using the normal constructor. This is ok and useful when ordering comes from user + // input. + return kUnordered; + } + + private: + explicit Ordering(bool is_implicit) + : null_placement_(NullPlacement::AtStart), is_implicit_(is_implicit) {} + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys_; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement_; + bool is_implicit_ = false; +}; + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h new file mode 100644 index 0000000000000000000000000000000000000000..f31c4c1ba5920626578a4e4170e3cd2d28288545 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +class Function; +class FunctionOptionsType; + +/// \brief A mutable central function registry for built-in functions as well +/// as user-defined functions. Functions are implementations of +/// arrow::compute::Function. +/// +/// Generally, each function contains kernels which are implementations of a +/// function for a specific argument signature. After looking up a function in +/// the registry, one can either execute it eagerly with Function::Execute or +/// use one of the function's dispatch methods to pick a suitable kernel for +/// lower-level function execution. +class ARROW_EXPORT FunctionRegistry { + public: + ~FunctionRegistry(); + + /// \brief Construct a new registry. + /// + /// Most users only need to use the global registry. + static std::unique_ptr Make(); + + /// \brief Construct a new nested registry with the given parent. + /// + /// Most users only need to use the global registry. The returned registry never changes + /// its parent, even when an operation allows overwriting. + static std::unique_ptr Make(FunctionRegistry* parent); + + /// \brief Check whether a new function can be added to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status CanAddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Add a new function to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status AddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Check whether an alias can be added for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status CanAddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Add alias for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status AddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Check whether a new function options type can be added to the registry. + /// + /// \return Status::KeyError if a function options type with the same name is already + /// registered. + Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Add a new function options type to the registry. + /// + /// \returns Status::KeyError if a function options type with the same name is already + /// registered. + Status AddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Retrieve a function by name from the registry. + Result> GetFunction(const std::string& name) const; + + /// \brief Return vector of all entry names in the registry. + /// + /// Helpful for displaying a manifest of available functions. + std::vector GetFunctionNames() const; + + /// \brief Retrieve a function options type by name from the registry. + Result GetFunctionOptionsType( + const std::string& name) const; + + /// \brief The number of currently registered functions. + int num_functions() const; + + /// \brief The cast function object registered in AddFunction. + /// + /// Helpful for get cast function as needed. + const Function* cast_function() const; + + private: + FunctionRegistry(); + + // Use PIMPL pattern to not have std::unordered_map here + class FunctionRegistryImpl; + std::unique_ptr impl_; + + explicit FunctionRegistry(FunctionRegistryImpl* impl); +}; + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h new file mode 100644 index 0000000000000000000000000000000000000000..88dce160ce9362895e6b57f1d12afb6023f3eb48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h @@ -0,0 +1,294 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/mutex.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/type_fwd.h" + +#if defined(__clang__) || defined(__GNUC__) +#define BYTESWAP(x) __builtin_bswap64(x) +#define ROTL(x, n) (((x) << (n)) | ((x) >> ((-n) & 31))) +#define ROTL64(x, n) (((x) << (n)) | ((x) >> ((-n) & 63))) +#define PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +#elif defined(_MSC_VER) +#include +#define BYTESWAP(x) _byteswap_uint64(x) +#define ROTL(x, n) _rotl((x), (n)) +#define ROTL64(x, n) _rotl64((x), (n)) +#if defined(_M_X64) || defined(_M_I86) +#include // https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx +#define PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +#else +#define PREFETCH(ptr) (void)(ptr) /* disabled */ +#endif +#endif + +namespace arrow { +namespace util { + +// Some platforms typedef int64_t as long int instead of long long int, +// which breaks the _mm256_i64gather_epi64 and _mm256_i32gather_epi64 intrinsics +// which need long long. +// We use the cast to the type below in these intrinsics to make the code +// compile in all cases. +// +using int64_for_gather_t = const long long int; // NOLINT runtime-int + +// All MiniBatch... classes use TempVectorStack for vector allocations and can +// only work with vectors up to 1024 elements. +// +// They should only be allocated on the stack to guarantee the right sequence +// of allocation and deallocation of vectors from TempVectorStack. +// +class MiniBatch { + public: + static constexpr int kLogMiniBatchLength = 10; + static constexpr int kMiniBatchLength = 1 << kLogMiniBatchLength; +}; + +/// Storage used to allocate temporary vectors of a batch size. +/// Temporary vectors should resemble allocating temporary variables on the stack +/// but in the context of vectorized processing where we need to store a vector of +/// temporaries instead of a single value. +class ARROW_EXPORT TempVectorStack { + template + friend class TempVectorHolder; + + public: + Status Init(MemoryPool* pool, int64_t size) { + num_vectors_ = 0; + top_ = 0; + buffer_size_ = EstimatedAllocationSize(size); + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateResizableBuffer(size, pool)); + // Ensure later operations don't accidentally read uninitialized memory. + std::memset(buffer->mutable_data(), 0xFF, size); + buffer_ = std::move(buffer); + return Status::OK(); + } + + private: + static int64_t EstimatedAllocationSize(int64_t size) { + return PaddedAllocationSize(size) + 2 * sizeof(uint64_t); + } + + static int64_t PaddedAllocationSize(int64_t num_bytes) { + // Round up allocation size to multiple of 8 bytes + // to avoid returning temp vectors with unaligned address. + // + // Also add padding at the end to facilitate loads and stores + // using SIMD when number of vector elements is not divisible + // by the number of SIMD lanes. + // + return ::arrow::bit_util::RoundUp(num_bytes, sizeof(int64_t)) + kPadding; + } + void alloc(uint32_t num_bytes, uint8_t** data, int* id); + void release(int id, uint32_t num_bytes); + static constexpr uint64_t kGuard1 = 0x3141592653589793ULL; + static constexpr uint64_t kGuard2 = 0x0577215664901532ULL; + static constexpr int64_t kPadding = 64; + int num_vectors_; + int64_t top_; + std::unique_ptr buffer_; + int64_t buffer_size_; +}; + +template +class TempVectorHolder { + friend class TempVectorStack; + + public: + ~TempVectorHolder() { stack_->release(id_, num_elements_ * sizeof(T)); } + T* mutable_data() { return reinterpret_cast(data_); } + TempVectorHolder(TempVectorStack* stack, uint32_t num_elements) { + stack_ = stack; + num_elements_ = num_elements; + stack_->alloc(num_elements * sizeof(T), &data_, &id_); + } + + private: + TempVectorStack* stack_; + uint8_t* data_; + int id_; + uint32_t num_elements_; +}; + +namespace bit_util { + +ARROW_EXPORT void bits_to_indexes(int bit_to_search, int64_t hardware_flags, + const int num_bits, const uint8_t* bits, + int* num_indexes, uint16_t* indexes, + int bit_offset = 0); + +ARROW_EXPORT void bits_filter_indexes(int bit_to_search, int64_t hardware_flags, + const int num_bits, const uint8_t* bits, + const uint16_t* input_indexes, int* num_indexes, + uint16_t* indexes, int bit_offset = 0); + +// Input and output indexes may be pointing to the same data (in-place filtering). +ARROW_EXPORT void bits_split_indexes(int64_t hardware_flags, const int num_bits, + const uint8_t* bits, int* num_indexes_bit0, + uint16_t* indexes_bit0, uint16_t* indexes_bit1, + int bit_offset = 0); + +// Bit 1 is replaced with byte 0xFF. +ARROW_EXPORT void bits_to_bytes(int64_t hardware_flags, const int num_bits, + const uint8_t* bits, uint8_t* bytes, int bit_offset = 0); + +// Return highest bit of each byte. +ARROW_EXPORT void bytes_to_bits(int64_t hardware_flags, const int num_bits, + const uint8_t* bytes, uint8_t* bits, int bit_offset = 0); + +ARROW_EXPORT bool are_all_bytes_zero(int64_t hardware_flags, const uint8_t* bytes, + uint32_t num_bytes); + +#if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2) +// The functions below use BMI2 instructions, be careful before calling! + +namespace avx2 { +ARROW_EXPORT void bits_filter_indexes_avx2(int bit_to_search, const int num_bits, + const uint8_t* bits, + const uint16_t* input_indexes, + int* num_indexes, uint16_t* indexes); +ARROW_EXPORT void bits_to_indexes_avx2(int bit_to_search, const int num_bits, + const uint8_t* bits, int* num_indexes, + uint16_t* indexes, uint16_t base_index = 0); +ARROW_EXPORT void bits_to_bytes_avx2(const int num_bits, const uint8_t* bits, + uint8_t* bytes); +ARROW_EXPORT void bytes_to_bits_avx2(const int num_bits, const uint8_t* bytes, + uint8_t* bits); +ARROW_EXPORT bool are_all_bytes_zero_avx2(const uint8_t* bytes, uint32_t num_bytes); +} // namespace avx2 + +#endif + +} // namespace bit_util +} // namespace util + +namespace compute { + +/// Modify an Expression with pre-order and post-order visitation. +/// `pre` will be invoked on each Expression. `pre` will visit Calls before their +/// arguments, `post_call` will visit Calls (and no other Expressions) after their +/// arguments. Visitors should return the Identical expression to indicate no change; this +/// will prevent unnecessary construction in the common case where a modification is not +/// possible/necessary/... +/// +/// If an argument was modified, `post_call` visits a reconstructed Call with the modified +/// arguments but also receives a pointer to the unmodified Expression as a second +/// argument. If no arguments were modified the unmodified Expression* will be nullptr. +template +Result ModifyExpression(Expression expr, const PreVisit& pre, + const PostVisitCall& post_call) { + ARROW_ASSIGN_OR_RAISE(expr, Result(pre(std::move(expr)))); + + auto call = expr.call(); + if (!call) return expr; + + bool at_least_one_modified = false; + std::vector modified_arguments; + + for (size_t i = 0; i < call->arguments.size(); ++i) { + ARROW_ASSIGN_OR_RAISE(auto modified_argument, + ModifyExpression(call->arguments[i], pre, post_call)); + + if (Identical(modified_argument, call->arguments[i])) { + continue; + } + + if (!at_least_one_modified) { + modified_arguments = call->arguments; + at_least_one_modified = true; + } + + modified_arguments[i] = std::move(modified_argument); + } + + if (at_least_one_modified) { + // reconstruct the call expression with the modified arguments + auto modified_call = *call; + modified_call.arguments = std::move(modified_arguments); + return post_call(Expression(std::move(modified_call)), &expr); + } + + return post_call(std::move(expr), NULLPTR); +} + +// Helper class to calculate the modified number of rows to process using SIMD. +// +// Some array elements at the end will be skipped in order to avoid buffer +// overrun, when doing memory loads and stores using larger word size than a +// single array element. +// +class TailSkipForSIMD { + public: + static int64_t FixBitAccess(int num_bytes_accessed_together, int64_t num_rows, + int bit_offset) { + int64_t num_bytes = bit_util::BytesForBits(num_rows + bit_offset); + int64_t num_bytes_safe = + std::max(static_cast(0LL), num_bytes - num_bytes_accessed_together + 1); + int64_t num_rows_safe = + std::max(static_cast(0LL), 8 * num_bytes_safe - bit_offset); + return std::min(num_rows_safe, num_rows); + } + static int64_t FixBinaryAccess(int num_bytes_accessed_together, int64_t num_rows, + int64_t length) { + int64_t num_rows_to_skip = bit_util::CeilDiv(length, num_bytes_accessed_together); + int64_t num_rows_safe = + std::max(static_cast(0LL), num_rows - num_rows_to_skip); + return num_rows_safe; + } + static int64_t FixVarBinaryAccess(int num_bytes_accessed_together, int64_t num_rows, + const uint32_t* offsets) { + // Do not process rows that could read past the end of the buffer using N + // byte loads/stores. + // + int64_t num_rows_safe = num_rows; + while (num_rows_safe > 0 && + offsets[num_rows_safe] + num_bytes_accessed_together > offsets[num_rows]) { + --num_rows_safe; + } + return num_rows_safe; + } + static int FixSelection(int64_t num_rows_safe, int num_selected, + const uint16_t* selection) { + int num_selected_safe = num_selected; + while (num_selected_safe > 0 && selection[num_selected_safe - 1] >= num_rows_safe) { + --num_selected_safe; + } + return num_selected_safe; + } +}; + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..93b327bf3ae2b63bc4439d77440b54d10e45810a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { + +class PromotionGraph; + +class ARROW_EXPORT ChunkedArrayBuilder { + public: + virtual ~ChunkedArrayBuilder() = default; + + /// Spawn a task that will try to convert and insert the given JSON block + virtual void Insert(int64_t block_index, + const std::shared_ptr& unconverted_field, + const std::shared_ptr& unconverted) = 0; + + /// Return the final chunked array. + /// Every chunk must be inserted before this is called! + virtual Status Finish(std::shared_ptr* out) = 0; + + /// Finish current task group and substitute a new one + virtual Status ReplaceTaskGroup( + const std::shared_ptr& task_group) = 0; + + protected: + explicit ChunkedArrayBuilder( + const std::shared_ptr& task_group) + : task_group_(task_group) {} + + std::shared_ptr task_group_; +}; + +/// create a chunked builder +/// +/// if unexpected fields and promotion need to be handled, promotion_graph must be +/// non-null +ARROW_EXPORT Status MakeChunkedArrayBuilder( + const std::shared_ptr& task_group, MemoryPool* pool, + const PromotionGraph* promotion_graph, const std::shared_ptr& type, + std::shared_ptr* out); + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h new file mode 100644 index 0000000000000000000000000000000000000000..9ed85126da1412774bc216737b7f4abc3795815c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/delimiting.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { + +struct ParseOptions; + +ARROW_EXPORT +std::unique_ptr MakeChunker(const ParseOptions& options); + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..9a812dd3c3afaec0ccc36f3bb72fa2d1a459f4e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; +class Field; +class MemoryPool; + +namespace json { + +/// \brief interface for conversion of Arrays +/// +/// Converters are not required to be correct for arbitrary input- only +/// for unconverted arrays emitted by a corresponding parser. +class ARROW_EXPORT Converter { + public: + virtual ~Converter() = default; + + /// convert an array + /// on failure, this converter may be promoted to another converter which + /// *can* convert the given input. + virtual Status Convert(const std::shared_ptr& in, + std::shared_ptr* out) = 0; + + std::shared_ptr out_type() const { return out_type_; } + + MemoryPool* pool() { return pool_; } + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Converter); + + Converter(MemoryPool* pool, const std::shared_ptr& out_type) + : pool_(pool), out_type_(out_type) {} + + MemoryPool* pool_; + std::shared_ptr out_type_; +}; + +/// \brief produce a single converter to the specified out_type +ARROW_EXPORT Status MakeConverter(const std::shared_ptr& out_type, + MemoryPool* pool, std::shared_ptr* out); + +class ARROW_EXPORT PromotionGraph { + public: + virtual ~PromotionGraph() = default; + + /// \brief produce a valid field which will be inferred as null + virtual std::shared_ptr Null(const std::string& name) const = 0; + + /// \brief given an unexpected field encountered during parsing, return a type to which + /// it may be convertible (may return null if none is available) + virtual std::shared_ptr Infer( + const std::shared_ptr& unexpected_field) const = 0; + + /// \brief given a type to which conversion failed, return a promoted type to which + /// conversion may succeed (may return null if none is available) + virtual std::shared_ptr Promote( + const std::shared_ptr& failed, + const std::shared_ptr& unexpected_field) const = 0; + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(PromotionGraph); + PromotionGraph() = default; +}; + +ARROW_EXPORT const PromotionGraph* GetPromotionGraph(); + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_parser.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..8035695e537cb9a022cd694993185f687ccdab04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_parser.h @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { +namespace internal { + +/// This class is a helper to parse a json object from a string. +/// It uses rapidjson::Document in implementation. +class ARROW_EXPORT ObjectParser { + public: + ObjectParser(); + ~ObjectParser(); + + Status Parse(std::string_view json); + + Result GetString(const char* key) const; + + Result GetBool(const char* key) const; + + // Get all members of the object as a map from string keys to string values + Result> GetStringMap() const; + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..b15b09dbdacfcb56cd4b37e0eb144d9ebe46c915 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { +namespace internal { + +/// This class is a helper to serialize a json object to a string. +/// It uses rapidjson in implementation. +class ARROW_EXPORT ObjectWriter { + public: + ObjectWriter(); + ~ObjectWriter(); + + void SetString(std::string_view key, std::string_view value); + void SetBool(std::string_view key, bool value); + + std::string Serialize(); + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h new file mode 100644 index 0000000000000000000000000000000000000000..d7edab9ceddb4d4e2d5c79b8652d7d47d0557b55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/json/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class DataType; +class Schema; + +namespace json { + +enum class UnexpectedFieldBehavior : char { + /// Unexpected JSON fields are ignored + Ignore, + /// Unexpected JSON fields error out + Error, + /// Unexpected JSON fields are type-inferred and included in the output + InferType +}; + +struct ARROW_EXPORT ParseOptions { + // Parsing options + + /// Optional explicit schema (disables type inference on those fields) + std::shared_ptr explicit_schema; + + /// Whether objects may be printed across multiple lines (for example pretty-printed) + /// + /// If true, parsing may be slower. + bool newlines_in_values = false; + + /// How JSON fields outside of explicit_schema (if given) are treated + UnexpectedFieldBehavior unexpected_field_behavior = UnexpectedFieldBehavior::InferType; + + /// Create parsing options with default values + static ParseOptions Defaults(); +}; + +struct ARROW_EXPORT ReadOptions { + // Reader options + + /// Whether to use the global CPU thread pool + bool use_threads = true; + /// Block size we request from the IO layer; also determines the size of + /// chunks when use_threads is true + int32_t block_size = 1 << 20; // 1 MB + + /// Create read options with default values + static ReadOptions Defaults(); +}; + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h new file mode 100644 index 0000000000000000000000000000000000000000..aca416dbb7b5b4915cb8d1f74d932989cde286dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/json/options.h" +#include "arrow/status.h" +#include "arrow/util/key_value_metadata.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class Buffer; +class MemoryPool; +class KeyValueMetadata; +class ResizableBuffer; + +namespace json { + +struct Kind { + enum type : uint8_t { + kNull, + kBoolean, + kNumber, + kString, + kArray, + kObject, + kNumberOrString + }; + + static const std::string& Name(Kind::type); + + static const std::shared_ptr& Tag(Kind::type); + + static Kind::type FromTag(const std::shared_ptr& tag); + + static Status ForType(const DataType& type, Kind::type* kind); +}; + +/// \class BlockParser +/// \brief A reusable block-based parser for JSON data +/// +/// The parser takes a block of newline delimited JSON data and extracts Arrays +/// of unconverted strings which can be fed to a Converter to obtain a usable Array. +/// +/// Note that in addition to parse errors (such as malformed JSON) some conversion +/// errors are caught at parse time: +/// - A null value in non-nullable column +/// - Change in the JSON kind of a column. For example, if an explicit schema is provided +/// which stipulates that field "a" is integral, a row of {"a": "not a number"} will +/// result in an error. This also applies to fields outside an explicit schema. +class ARROW_EXPORT BlockParser { + public: + virtual ~BlockParser() = default; + + /// \brief Reserve storage for scalars parsed from a block of json + virtual Status ReserveScalarStorage(int64_t nbytes) = 0; + + /// \brief Parse a block of data + virtual Status Parse(const std::shared_ptr& json) = 0; + + /// \brief Extract parsed data + virtual Status Finish(std::shared_ptr* parsed) = 0; + + /// \brief Return the number of parsed rows + int32_t num_rows() const { return num_rows_; } + + /// \brief Construct a BlockParser + /// + /// \param[in] pool MemoryPool to use when constructing parsed array + /// \param[in] options ParseOptions to use when parsing JSON + /// \param[out] out constructed BlockParser + static Status Make(MemoryPool* pool, const ParseOptions& options, + std::unique_ptr* out); + + static Status Make(const ParseOptions& options, std::unique_ptr* out); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(BlockParser); + + explicit BlockParser(MemoryPool* pool) : pool_(pool) {} + + MemoryPool* pool_; + int32_t num_rows_ = 0; +}; + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/rapidjson_defs.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/rapidjson_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..9ed81d000c555c0118acc2b71a1439c5db0abf3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/rapidjson_defs.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Include this file before including any RapidJSON headers. + +#pragma once + +#define RAPIDJSON_HAS_STDSTRING 1 +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 + +// rapidjson will be defined in namespace arrow::rapidjson +#define RAPIDJSON_NAMESPACE arrow::rapidjson +#define RAPIDJSON_NAMESPACE_BEGIN \ + namespace arrow { \ + namespace rapidjson { +#define RAPIDJSON_NAMESPACE_END \ + } \ + } + +// enable SIMD whitespace skipping, if available +#if defined(ARROW_HAVE_SSE4_2) +#define RAPIDJSON_SSE2 1 +#define RAPIDJSON_SSE42 1 +#endif + +#if defined(ARROW_HAVE_NEON) +#define RAPIDJSON_NEON 1 +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/reader.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..b7849a83ba1f88e54961df5a1e9739afe24ba026 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/reader.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/json/options.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { + +/// A class that reads an entire JSON file into a Arrow Table +/// +/// The file is expected to consist of individual line-separated JSON objects +class ARROW_EXPORT TableReader { + public: + virtual ~TableReader() = default; + + /// Read the entire JSON file and convert it to a Arrow Table + virtual Result> Read() = 0; + + /// Create a TableReader instance + static Result> Make(MemoryPool* pool, + std::shared_ptr input, + const ReadOptions&, + const ParseOptions&); +}; + +ARROW_EXPORT Result> ParseOne(ParseOptions options, + std::shared_ptr json); + +/// \brief A class that reads a JSON file incrementally +/// +/// JSON data is read from a stream in fixed-size blocks (configurable with +/// `ReadOptions::block_size`). Each block is converted to a `RecordBatch`. Yielded +/// batches have a consistent schema but may differ in row count. +/// +/// The supplied `ParseOptions` are used to determine a schema, based either on a +/// provided explicit schema or inferred from the first non-empty block. +/// Afterwards, the target schema is frozen. If `UnexpectedFieldBehavior::InferType` is +/// specified, unexpected fields will only be inferred for the first block. Afterwards +/// they'll be treated as errors. +/// +/// If `ReadOptions::use_threads` is `true`, each block's parsing/decoding task will be +/// parallelized on the given `cpu_executor` (with readahead corresponding to the +/// executor's capacity). If an executor isn't provided, the global thread pool will be +/// used. +/// +/// If `ReadOptions::use_threads` is `false`, computations will be run on the calling +/// thread and `cpu_executor` will be ignored. +class ARROW_EXPORT StreamingReader : public RecordBatchReader { + public: + virtual ~StreamingReader() = default; + + /// \brief Read the next `RecordBatch` asynchronously + /// This function is async-reentrant (but not synchronously reentrant). However, if + /// threading is disabled, this will block until completion. + virtual Future> ReadNextAsync() = 0; + + /// Get the number of bytes which have been successfully converted to record batches + /// and consumed + [[nodiscard]] virtual int64_t bytes_processed() const = 0; + + /// \brief Create a `StreamingReader` from an `InputStream` + /// Blocks until the initial batch is loaded + /// + /// \param[in] stream JSON source stream + /// \param[in] read_options Options for reading + /// \param[in] parse_options Options for chunking, parsing, and conversion + /// \param[in] io_context Context for IO operations (optional) + /// \param[in] cpu_executor Executor for computation tasks (optional) + /// \return The initialized reader + static Result> Make( + std::shared_ptr stream, const ReadOptions& read_options, + const ParseOptions& parse_options, + const io::IOContext& io_context = io::default_io_context(), + ::arrow::internal::Executor* cpu_executor = NULLPTR); + + /// \brief Create a `StreamingReader` from an `InputStream` asynchronously + /// Returned future completes after loading the first batch + /// + /// \param[in] stream JSON source stream + /// \param[in] read_options Options for reading + /// \param[in] parse_options Options for chunking, parsing, and conversion + /// \param[in] io_context Context for IO operations (optional) + /// \param[in] cpu_executor Executor for computation tasks (optional) + /// \return Future for the initialized reader + static Future> MakeAsync( + std::shared_ptr stream, const ReadOptions& read_options, + const ParseOptions& parse_options, + const io::IOContext& io_context = io::default_io_context(), + ::arrow::internal::Executor* cpu_executor = NULLPTR); +}; + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/test_common.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..2f819779bdb5940b081a2a41756d3a6510260476 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/test_common.h @@ -0,0 +1,330 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_binary.h" +#include "arrow/io/memory.h" +#include "arrow/json/converter.h" +#include "arrow/json/options.h" +#include "arrow/json/parser.h" +#include "arrow/json/rapidjson_defs.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/type.h" +#include "arrow/util/checked_cast.h" +#include "arrow/visit_type_inline.h" + +#include "rapidjson/document.h" +#include "rapidjson/prettywriter.h" +#include "rapidjson/reader.h" +#include "rapidjson/writer.h" + +namespace arrow { + +using internal::checked_cast; + +namespace json { + +namespace rj = arrow::rapidjson; + +using rj::StringBuffer; +using std::string_view; +using Writer = rj::Writer; + +struct GenerateOptions { + // Probability of a field being written + double field_probability = 1.0; + // Probability of a value being null + double null_probability = 0.2; + // Whether to randomize the order of written fields + bool randomize_field_order = false; + + static constexpr GenerateOptions Defaults() { return GenerateOptions{}; } +}; + +inline static Status OK(bool ok) { return ok ? Status::OK() : Status::Invalid(""); } + +template +inline static Status Generate( + const std::shared_ptr& type, Engine& e, Writer* writer, + const GenerateOptions& options = GenerateOptions::Defaults()); + +template +inline static Status Generate( + const std::vector>& fields, Engine& e, Writer* writer, + const GenerateOptions& options = GenerateOptions::Defaults()); + +template +inline static Status Generate( + const std::shared_ptr& schm, Engine& e, Writer* writer, + const GenerateOptions& options = GenerateOptions::Defaults()) { + return Generate(schm->fields(), e, writer, options); +} + +template +struct GenerateImpl { + Status Visit(const NullType&) { return OK(writer.Null()); } + + Status Visit(const BooleanType&) { + return OK(writer.Bool(std::uniform_int_distribution{}(e)&1)); + } + + template + enable_if_physical_unsigned_integer Visit(const T&) { + auto val = std::uniform_int_distribution<>{}(e); + return OK(writer.Uint64(static_cast(val))); + } + + template + enable_if_physical_signed_integer Visit(const T&) { + auto val = std::uniform_int_distribution<>{}(e); + return OK(writer.Int64(static_cast(val))); + } + + template + enable_if_physical_floating_point Visit(const T&) { + auto val = std::normal_distribution{0, 1 << 10}(e); + return OK(writer.Double(val)); + } + + Status GenerateAscii(const DataType&) { + auto size = std::poisson_distribution<>{4}(e); + std::uniform_int_distribution gen_char(32, 126); // FIXME generate UTF8 + std::string s(size, '\0'); + for (char& ch : s) ch = static_cast(gen_char(e)); + return OK(writer.String(s.c_str())); + } + + template + enable_if_base_binary Visit(const T& t) { + return GenerateAscii(t); + } + + Status Visit(const BinaryViewType& t) { return GenerateAscii(t); } + + template + enable_if_list_like Visit(const T& t) { + auto size = std::poisson_distribution<>{4}(e); + writer.StartArray(); + for (int i = 0; i < size; ++i) { + RETURN_NOT_OK(Generate(t.value_type(), e, &writer, options)); + } + return OK(writer.EndArray(size)); + } + + Status Visit(const ListViewType& t) { return NotImplemented(t); } + + Status Visit(const LargeListViewType& t) { return NotImplemented(t); } + + Status Visit(const StructType& t) { return Generate(t.fields(), e, &writer, options); } + + Status Visit(const DayTimeIntervalType& t) { return NotImplemented(t); } + + Status Visit(const MonthDayNanoIntervalType& t) { return NotImplemented(t); } + + Status Visit(const DictionaryType& t) { return NotImplemented(t); } + + Status Visit(const ExtensionType& t) { return NotImplemented(t); } + + Status Visit(const Decimal128Type& t) { return NotImplemented(t); } + + Status Visit(const FixedSizeBinaryType& t) { return NotImplemented(t); } + + Status Visit(const UnionType& t) { return NotImplemented(t); } + + Status Visit(const RunEndEncodedType& t) { return NotImplemented(t); } + + Status NotImplemented(const DataType& t) { + return Status::NotImplemented("random generation of arrays of type ", t); + } + + Engine& e; + rj::Writer& writer; + const GenerateOptions& options; +}; + +template +inline static Status Generate(const std::shared_ptr& type, Engine& e, + Writer* writer, const GenerateOptions& options) { + if (std::bernoulli_distribution(options.null_probability)(e)) { + writer->Null(); + return Status::OK(); + } + GenerateImpl visitor = {e, *writer, options}; + return VisitTypeInline(*type, &visitor); +} + +template +inline static Status Generate(const std::vector>& fields, + Engine& e, Writer* writer, const GenerateOptions& options) { + RETURN_NOT_OK(OK(writer->StartObject())); + + int num_fields = 0; + auto write_field = [&](const Field& f) { + ++num_fields; + writer->Key(f.name().c_str()); + return Generate(f.type(), e, writer, options); + }; + + std::bernoulli_distribution bool_dist(options.field_probability); + if (options.randomize_field_order) { + std::vector indices; + indices.reserve(static_cast(fields.size() * options.field_probability)); + for (size_t i = 0; i < fields.size(); ++i) { + if (bool_dist(e)) { + indices.push_back(i); + } + } + std::shuffle(indices.begin(), indices.end(), e); + for (auto i : indices) { + RETURN_NOT_OK(write_field(*fields[i])); + } + } else { + for (const auto& f : fields) { + if (bool_dist(e)) { + RETURN_NOT_OK(write_field(*f)); + } + } + } + + return OK(writer->EndObject(num_fields)); +} + +inline static Status MakeStream(string_view src_str, + std::shared_ptr* out) { + auto src = std::make_shared(src_str); + *out = std::make_shared(src); + return Status::OK(); +} + +// scalar values (numbers and strings) are parsed into a +// dictionary. This can be decoded for ease of comparison +inline static Status DecodeStringDictionary(const DictionaryArray& dict_array, + std::shared_ptr* decoded) { + const StringArray& dict = checked_cast(*dict_array.dictionary()); + const Int32Array& indices = checked_cast(*dict_array.indices()); + StringBuilder builder; + RETURN_NOT_OK(builder.Resize(indices.length())); + for (int64_t i = 0; i < indices.length(); ++i) { + if (indices.IsNull(i)) { + builder.UnsafeAppendNull(); + continue; + } + auto value = dict.GetView(indices.GetView(i)); + RETURN_NOT_OK(builder.ReserveData(value.size())); + builder.UnsafeAppend(value); + } + return builder.Finish(decoded); +} + +inline static Status ParseFromString(ParseOptions options, string_view src_str, + std::shared_ptr* parsed) { + auto src = std::make_shared(src_str); + std::unique_ptr parser; + RETURN_NOT_OK(BlockParser::Make(options, &parser)); + RETURN_NOT_OK(parser->Parse(src)); + return parser->Finish(parsed); +} + +inline static Status ParseFromString(ParseOptions options, string_view src_str, + std::shared_ptr* parsed) { + std::shared_ptr parsed_non_struct; + RETURN_NOT_OK(ParseFromString(options, src_str, &parsed_non_struct)); + *parsed = internal::checked_pointer_cast(parsed_non_struct); + return Status::OK(); +} + +static inline std::string PrettyPrint(string_view one_line) { + rj::Document document; + + // Must pass size to avoid ASAN issues. + document.Parse(one_line.data(), one_line.size()); + rj::StringBuffer sb; + rj::PrettyWriter writer(sb); + document.Accept(writer); + return sb.GetString(); +} + +template +std::string RowsOfOneColumn(std::string_view name, std::initializer_list values, + decltype(std::to_string(*values.begin()))* = nullptr) { + std::stringstream ss; + for (auto value : values) { + ss << R"({")" << name << R"(":)" << std::to_string(value) << "}\n"; + } + return ss.str(); +} + +inline std::string RowsOfOneColumn(std::string_view name, + std::initializer_list values) { + std::stringstream ss; + for (auto value : values) { + ss << R"({")" << name << R"(":)" << value << "}\n"; + } + return ss.str(); +} + +inline static std::string scalars_only_src() { + return R"( + { "hello": 3.5, "world": false, "yo": "thing" } + { "hello": 3.25, "world": null } + { "hello": 3.125, "world": null, "yo": "\u5fcd" } + { "hello": 0.0, "world": true, "yo": null } + )"; +} + +inline static std::string nested_src() { + return R"( + { "hello": 3.5, "world": false, "yo": "thing", "arr": [1, 2, 3], "nuf": {} } + { "hello": 3.25, "world": null, "arr": [2], "nuf": null } + { "hello": 3.125, "world": null, "yo": "\u5fcd", "arr": [], "nuf": { "ps": 78 } } + { "hello": 0.0, "world": true, "yo": null, "arr": null, "nuf": { "ps": 90 } } + )"; +} + +inline static std::string null_src() { + return R"( + { "plain": null, "list1": [], "list2": [], "struct": { "plain": null } } + { "plain": null, "list1": [], "list2": [null], "struct": {} } + )"; +} + +inline static std::string unquoted_decimal_src() { + return R"( + { "price": 30.04, "cost":30.001 } + { "price": 1.23, "cost":1.229 } + )"; +} + +inline static std::string mixed_decimal_src() { + return R"( + { "price": 30.04, "cost": 30.001 } + { "price": "1.23", "cost": "1.229" } + )"; +} + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/type_fwd.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..67e2e1bb4065d0bc238d04073f673a699c5da4ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/json/type_fwd.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +namespace arrow { +namespace json { + +class TableReader; +struct ReadOptions; +struct ParseOptions; + +} // namespace json +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..7066bbb63d2a5775454d5cffc82df7faf0056db8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/future.h" + +namespace arrow { +namespace util { + +template +AsyncGenerator AsyncVectorIt(std::vector v) { + return MakeVectorGenerator(std::move(v)); +} + +template +AsyncGenerator FailAt(AsyncGenerator src, int failing_index) { + auto index = std::make_shared>(0); + return [src, index, failing_index]() { + auto idx = index->fetch_add(1); + if (idx >= failing_index) { + return Future::MakeFinished(Status::Invalid("XYZ")); + } + return src(); + }; +} + +template +AsyncGenerator SlowdownABit(AsyncGenerator source) { + return MakeMappedGenerator(std::move(source), [](const T& res) { + return SleepABitAsync().Then([res]() { return res; }); + }); +} + +template +class TrackingGenerator { + public: + explicit TrackingGenerator(AsyncGenerator source) + : state_(std::make_shared(std::move(source))) {} + + Future operator()() { + state_->num_read++; + return state_->source(); + } + + int num_read() { return state_->num_read.load(); } + + private: + struct State { + explicit State(AsyncGenerator source) : source(std::move(source)), num_read(0) {} + + AsyncGenerator source; + std::atomic num_read; + }; + + std::shared_ptr state_; +}; + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h new file mode 100644 index 0000000000000000000000000000000000000000..09e8f49dea9eb1023ce7db9813b240eed1cc0563 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h @@ -0,0 +1,231 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/array/builder_time.h" +#include "arrow/buffer.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { + +// ArrayFromVector: construct an Array from vectors of C values + +template +void ArrayFromVector(const std::shared_ptr& type, + const std::vector& is_valid, const std::vector& values, + std::shared_ptr* out) { + auto type_id = TYPE::type_id; + ASSERT_EQ(type_id, type->id()) + << "template parameter and concrete DataType instance don't agree"; + + std::unique_ptr builder_ptr; + ASSERT_OK(MakeBuilder(default_memory_pool(), type, &builder_ptr)); + // Get the concrete builder class to access its Append() specializations + auto& builder = dynamic_cast::BuilderType&>(*builder_ptr); + + for (size_t i = 0; i < values.size(); ++i) { + if (is_valid[i]) { + ASSERT_OK(builder.Append(values[i])); + } else { + ASSERT_OK(builder.AppendNull()); + } + } + ASSERT_OK(builder.Finish(out)); +} + +template +void ArrayFromVector(const std::shared_ptr& type, + const std::vector& values, std::shared_ptr* out) { + auto type_id = TYPE::type_id; + ASSERT_EQ(type_id, type->id()) + << "template parameter and concrete DataType instance don't agree"; + + std::unique_ptr builder_ptr; + ASSERT_OK(MakeBuilder(default_memory_pool(), type, &builder_ptr)); + // Get the concrete builder class to access its Append() specializations + auto& builder = dynamic_cast::BuilderType&>(*builder_ptr); + + for (size_t i = 0; i < values.size(); ++i) { + ASSERT_OK(builder.Append(values[i])); + } + ASSERT_OK(builder.Finish(out)); +} + +// Overloads without a DataType argument, for parameterless types + +template +void ArrayFromVector(const std::vector& is_valid, const std::vector& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ArrayFromVector(type, is_valid, values, out); +} + +template +void ArrayFromVector(const std::vector& values, std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ArrayFromVector(type, values, out); +} + +// ChunkedArrayFromVector: construct a ChunkedArray from vectors of C values + +template +void ChunkedArrayFromVector(const std::shared_ptr& type, + const std::vector>& is_valid, + const std::vector>& values, + std::shared_ptr* out) { + ArrayVector chunks; + ASSERT_EQ(is_valid.size(), values.size()); + for (size_t i = 0; i < values.size(); ++i) { + std::shared_ptr array; + ArrayFromVector(type, is_valid[i], values[i], &array); + chunks.push_back(array); + } + *out = std::make_shared(chunks); +} + +template +void ChunkedArrayFromVector(const std::shared_ptr& type, + const std::vector>& values, + std::shared_ptr* out) { + ArrayVector chunks; + for (size_t i = 0; i < values.size(); ++i) { + std::shared_ptr array; + ArrayFromVector(type, values[i], &array); + chunks.push_back(array); + } + *out = std::make_shared(chunks); +} + +// Overloads without a DataType argument, for parameterless types + +template +void ChunkedArrayFromVector(const std::vector>& is_valid, + const std::vector>& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ChunkedArrayFromVector(type, is_valid, values, out); +} + +template +void ChunkedArrayFromVector(const std::vector>& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ChunkedArrayFromVector(type, values, out); +} + +template +void FinishAndCheckPadding(BuilderType* builder, std::shared_ptr* out) { + ASSERT_OK_AND_ASSIGN(*out, builder->Finish()); + AssertZeroPadded(**out); + TestInitialized(**out); +} + +template +Status MakeArray(const std::vector& valid_bytes, const std::vector& values, + int64_t size, Builder* builder, std::shared_ptr* out) { + // Append the first 1000 + for (int64_t i = 0; i < size; ++i) { + if (valid_bytes[i] > 0) { + RETURN_NOT_OK(builder->Append(values[i])); + } else { + RETURN_NOT_OK(builder->AppendNull()); + } + } + return builder->Finish(out); +} + +template +struct VisitBuilder { + template ::BuilderType, + // need to let SFINAE drop this Visit when it would result in + // [](NullBuilder*){}(double_builder) + typename = decltype(std::declval()(std::declval()))> + Status Visit(const T&, ArrayBuilder* builder, Fn&& fn) { + fn(internal::checked_cast(builder)); + return Status::OK(); + } + + Status Visit(const DataType& t, ArrayBuilder* builder, Fn&& fn) { + return Status::NotImplemented("visiting builders of type ", t); + } +}; + +template +Result> ArrayFromBuilderVisitor( + const std::shared_ptr& type, int64_t initial_capacity, + int64_t visitor_repetitions, Fn&& fn) { + std::unique_ptr builder; + RETURN_NOT_OK(MakeBuilder(default_memory_pool(), type, &builder)); + + if (initial_capacity != 0) { + RETURN_NOT_OK(builder->Resize(initial_capacity)); + } + + VisitBuilder visitor; + for (int64_t i = 0; i < visitor_repetitions; ++i) { + RETURN_NOT_OK( + VisitTypeInline(*builder->type(), &visitor, builder.get(), std::forward(fn))); + } + + std::shared_ptr out; + RETURN_NOT_OK(builder->Finish(&out)); + return std::move(out); +} + +template +Result> ArrayFromBuilderVisitor( + const std::shared_ptr& type, int64_t length, Fn&& fn) { + return ArrayFromBuilderVisitor(type, length, length, std::forward(fn)); +} + +template +static inline Status GetBitmapFromVector(const std::vector& is_valid, + std::shared_ptr* result) { + size_t length = is_valid.size(); + + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateEmptyBitmap(length)); + + uint8_t* bitmap = buffer->mutable_data(); + for (size_t i = 0; i < static_cast(length); ++i) { + if (is_valid[i]) { + bit_util::SetBit(bitmap, i); + } + } + + *result = buffer; + return Status::OK(); +} + +template +inline void BitmapFromVector(const std::vector& is_valid, + std::shared_ptr* out) { + ASSERT_OK(GetBitmapFromVector(is_valid, out)); +} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e34fc858d07f60ac31b73d1e84b5dc1cf4189b3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/thread_pool.h" + +namespace arrow { + +/// An executor which synchronously runs the task as part of the SpawnReal call. +class MockExecutor : public internal::Executor { + public: + int GetCapacity() override { return 0; } + + Status SpawnReal(internal::TaskHints hints, internal::FnOnce task, StopToken, + StopCallback&&) override { + spawn_count++; + std::move(task)(); + return Status::OK(); + } + + int spawn_count = 0; +}; + +/// An executor which does not actually run the task. Can be used to simulate situations +/// where the executor schedules a task in a long queue and doesn't get around to running +/// it for a while +class DelayedExecutor : public internal::Executor { + public: + int GetCapacity() override { return 0; } + + Status SpawnReal(internal::TaskHints hints, internal::FnOnce task, StopToken, + StopCallback&&) override { + captured_tasks.push_back(std::move(task)); + return Status::OK(); + } + + std::vector> captured_tasks; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h new file mode 100644 index 0000000000000000000000000000000000000000..4ec8845864b72807d7e68230b25256ba53127f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h @@ -0,0 +1,321 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { + +class ARROW_TESTING_EXPORT ConstantArrayGenerator { + public: + /// \brief Generates a constant BooleanArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Boolean(int64_t size, bool value = false); + + /// \brief Generates a constant UInt8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt8(int64_t size, uint8_t value = 0); + + /// \brief Generates a constant Int8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int8(int64_t size, int8_t value = 0); + + /// \brief Generates a constant UInt16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt16(int64_t size, uint16_t value = 0); + + /// \brief Generates a constant UInt16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int16(int64_t size, int16_t value = 0); + + /// \brief Generates a constant UInt32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt32(int64_t size, uint32_t value = 0); + + /// \brief Generates a constant UInt32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int32(int64_t size, int32_t value = 0); + + /// \brief Generates a constant UInt64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt64(int64_t size, uint64_t value = 0); + + /// \brief Generates a constant UInt64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int64(int64_t size, int64_t value = 0); + + /// \brief Generates a constant Float32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Float32(int64_t size, float value = 0); + + /// \brief Generates a constant Float64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Float64(int64_t size, double value = 0); + + /// \brief Generates a constant StringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr String(int64_t size, std::string value = ""); + + template + static std::shared_ptr Numeric(int64_t size, CType value = 0) { + switch (ArrowType::type_id) { + case Type::BOOL: + return Boolean(size, static_cast(value)); + case Type::UINT8: + return UInt8(size, static_cast(value)); + case Type::INT8: + return Int8(size, static_cast(value)); + case Type::UINT16: + return UInt16(size, static_cast(value)); + case Type::INT16: + return Int16(size, static_cast(value)); + case Type::UINT32: + return UInt32(size, static_cast(value)); + case Type::INT32: + return Int32(size, static_cast(value)); + case Type::UINT64: + return UInt64(size, static_cast(value)); + case Type::INT64: + return Int64(size, static_cast(value)); + case Type::FLOAT: + return Float32(size, static_cast(value)); + case Type::DOUBLE: + return Float64(size, static_cast(value)); + case Type::INTERVAL_DAY_TIME: + case Type::DATE32: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int32(size, static_cast(value))->View(date32())); + return viewed; + } + case Type::INTERVAL_MONTHS: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int32(size, static_cast(value)) + ->View(std::make_shared())); + return viewed; + } + case Type::TIME32: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int32(size, static_cast(value)) + ->View(std::make_shared(TimeUnit::SECOND))); + return viewed; + } + case Type::TIME64: { + EXPECT_OK_AND_ASSIGN(auto viewed, Int64(size, static_cast(value)) + ->View(std::make_shared())); + return viewed; + } + case Type::DATE64: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int64(size, static_cast(value))->View(date64())); + return viewed; + } + case Type::TIMESTAMP: { + EXPECT_OK_AND_ASSIGN( + auto viewed, Int64(size, static_cast(value)) + ->View(std::make_shared(TimeUnit::SECOND))); + return viewed; + } + default: + return nullptr; + } + } + + /// \brief Generates a constant Array of zeroes + /// + /// \param[in] size the size of the array to generate + /// \param[in] type the type of the Array + /// + /// \return a generated Array + static std::shared_ptr Zeroes(int64_t size, + const std::shared_ptr& type); + + /// \brief Generates a RecordBatch of zeroes + /// + /// \param[in] size the size of the array to generate + /// \param[in] schema to conform to + /// + /// This function is handy to return of RecordBatch of a desired shape. + /// + /// \return a generated RecordBatch + static std::shared_ptr Zeroes(int64_t size, + const std::shared_ptr& schema); + + /// \brief Generates a RecordBatchReader by repeating a RecordBatch + /// + /// \param[in] n_batch the number of times it repeats batch + /// \param[in] batch the RecordBatch to repeat + /// + /// \return a generated RecordBatchReader + static std::shared_ptr Repeat( + int64_t n_batch, const std::shared_ptr batch); + + /// \brief Generates a RecordBatchReader of zeroes batches + /// + /// \param[in] n_batch the number of RecordBatch + /// \param[in] batch_size the size of each RecordBatch + /// \param[in] schema to conform to + /// + /// \return a generated RecordBatchReader + static std::shared_ptr Zeroes(int64_t n_batch, int64_t batch_size, + const std::shared_ptr& schema); +}; + +ARROW_TESTING_EXPORT +Result> ScalarVectorToArray(const ScalarVector& scalars); + +namespace gen { + +class ARROW_TESTING_EXPORT ArrayGenerator { + public: + virtual ~ArrayGenerator() = default; + virtual Result> Generate(int64_t num_rows) = 0; + virtual std::shared_ptr type() const = 0; +}; + +// Same as DataGenerator below but instead of returning Result an ok status is EXPECT'd +class ARROW_TESTING_EXPORT GTestDataGenerator { + public: + virtual ~GTestDataGenerator() = default; + virtual std::shared_ptr<::arrow::RecordBatch> RecordBatch(int64_t num_rows) = 0; + virtual std::vector> RecordBatches( + int64_t rows_per_batch, int num_batches) = 0; + + virtual ::arrow::compute::ExecBatch ExecBatch(int64_t num_rows) = 0; + virtual std::vector<::arrow::compute::ExecBatch> ExecBatches(int64_t rows_per_batch, + int num_batches) = 0; + + virtual std::shared_ptr<::arrow::Table> Table(int64_t rows_per_chunk, + int num_chunks = 1) = 0; + virtual std::shared_ptr<::arrow::Schema> Schema() = 0; +}; + +class ARROW_TESTING_EXPORT DataGenerator { + public: + virtual ~DataGenerator() = default; + virtual Result> RecordBatch(int64_t num_rows) = 0; + virtual Result>> RecordBatches( + int64_t rows_per_batch, int num_batches) = 0; + + virtual Result<::arrow::compute::ExecBatch> ExecBatch(int64_t num_rows) = 0; + virtual Result> ExecBatches( + int64_t rows_per_batch, int num_batches) = 0; + + virtual Result> Table(int64_t rows_per_chunk, + int num_chunks = 1) = 0; + virtual std::shared_ptr<::arrow::Schema> Schema() = 0; + /// @brief Converts this generator to a variant that fails (in a googletest sense) + /// if any error is encountered. + virtual std::unique_ptr FailOnError() = 0; +}; + +/// @brief A potentially named field +/// +/// If name is not specified then a name will be generated automatically (e.g. f0, f1) +struct ARROW_TESTING_EXPORT GeneratorField { + public: + GeneratorField(std::shared_ptr gen) // NOLINT implicit conversion + : name(), gen(std::move(gen)) {} + GeneratorField(std::string name, std::shared_ptr gen) + : name(std::move(name)), gen(std::move(gen)) {} + + std::optional name; + std::shared_ptr gen; +}; + +/// Create a table generator with the given fields +ARROW_TESTING_EXPORT std::shared_ptr Gen( + std::vector column_gens); + +/// make a generator that returns a constant value +ARROW_TESTING_EXPORT std::shared_ptr Constant( + std::shared_ptr value); +/// make a generator that returns an incrementing value +/// +/// Note: overflow is not prevented standard unsigned integer overflow applies +ARROW_TESTING_EXPORT std::shared_ptr Step(uint32_t start = 0, + uint32_t step = 1, + bool signed_int = false); +/// make a generator that returns a random value +ARROW_TESTING_EXPORT std::shared_ptr Random( + std::shared_ptr type); +/// TODO(if-needed) could add a repeat-scalars generator, e.g. Repeat({1, 2, 3}) for +/// 1,2,3,1,2,3,1 +/// +/// TODO(if-needed) could add a repeat-from-json generator e.g. Repeat(int32(), "[1, 2, +/// 3]")), same behavior as repeat-scalars + +} // namespace gen + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..c934dd279389068d5ebf60695284d403b2e2dcd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +// GTest < 1.11 +#ifndef GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST +#define GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(A) +#endif +// GTest < 1.10 +#ifndef TYPED_TEST_SUITE +#define TYPED_TEST_SUITE TYPED_TEST_CASE +#define TYPED_TEST_SUITE_P TYPED_TEST_CASE_P +#define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P +#define REGISTER_TYPED_TEST_SUITE_P REGISTER_TYPED_TEST_CASE_P +#define INSTANTIATE_TYPED_TEST_SUITE_P INSTANTIATE_TYPED_TEST_CASE_P +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h new file mode 100644 index 0000000000000000000000000000000000000000..1d97a3ada724aec082180aea39a37386adca273c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h @@ -0,0 +1,698 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/testing/uniform_real.h" +#include "arrow/testing/visibility.h" +#include "arrow/type.h" + +namespace arrow { + +class Array; + +namespace random { + +using SeedType = int32_t; +constexpr SeedType kSeedMax = std::numeric_limits::max(); + +class ARROW_TESTING_EXPORT RandomArrayGenerator { + public: + explicit RandomArrayGenerator(SeedType seed) + : seed_distribution_(static_cast(1), kSeedMax), seed_rng_(seed) {} + + /// \brief Generate a null bitmap + /// + /// \param[in] size the size of the bitmap to generate + /// \param[in] null_probability the probability of a bit being zero + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Buffer + std::shared_ptr NullBitmap(int64_t size, double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random BooleanArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] true_probability the probability of a value being 1 / bit-set + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Boolean(int64_t size, double true_probability, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + /// \brief Generate a random UInt8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt8(int64_t size, uint8_t min, uint8_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int8(int64_t size, int8_t min, int8_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt16(int64_t size, uint16_t min, uint16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int16(int64_t size, int16_t min, int16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt32(int64_t size, uint32_t min, uint32_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int32(int64_t size, int32_t min, int32_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt64(int64_t size, uint64_t min, uint64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int64(int64_t size, int64_t min, int64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random HalfFloatArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the distribution + /// \param[in] max the upper bound of the distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float16(int64_t size, int16_t min, int16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random FloatArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] nan_probability the probability of a value being NaN + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float32(int64_t size, float min, float max, + double null_probability = 0, double nan_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random DoubleArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] nan_probability the probability of a value being NaN + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float64(int64_t size, double min, double max, + double null_probability = 0, double nan_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Date64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Date64(int64_t size, int64_t min, int64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + template + std::shared_ptr Numeric(int64_t size, CType min, CType max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()) { + switch (ArrowType::type_id) { + case Type::UINT8: + return UInt8(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT8: + return Int8(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT16: + return UInt16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT16: + return Int16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT32: + return UInt32(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT32: + return Int32(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT64: + return UInt64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT64: + return Int64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::HALF_FLOAT: + return Float16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::FLOAT: + return Float32(size, static_cast(min), static_cast(max), + null_probability, /*nan_probability=*/0, alignment, memory_pool); + case Type::DOUBLE: + return Float64(size, static_cast(min), static_cast(max), + null_probability, /*nan_probability=*/0, alignment, memory_pool); + case Type::DATE64: + return Date64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + default: + return nullptr; + } + } + + /// \brief Generate a random Decimal128Array + /// + /// \param[in] type the type of the array to generate + /// (must be an instance of Decimal128Type) + /// \param[in] size the size of the array to generate + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Decimal128(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Decimal256Array + /// + /// \param[in] type the type of the array to generate + /// (must be an instance of Decimal256Type) + /// \param[in] size the size of the array to generate + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Decimal256(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate an array of offsets (for use in e.g. ListArray::FromArrays) + /// + /// \param[in] size the size of the array to generate + /// \param[in] first_offset the first offset value (usually 0) + /// \param[in] last_offset the last offset value (usually the size of the child array) + /// \param[in] null_probability the probability of an offset being null + /// \param[in] force_empty_nulls if true, null offsets must have 0 "length" + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Offsets(int64_t size, int32_t first_offset, int32_t last_offset, + double null_probability = 0, + bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + std::shared_ptr LargeOffsets(int64_t size, int64_t first_offset, + int64_t last_offset, double null_probability = 0, + bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr String(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringViewArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] max_data_buffer_length the data buffer size at which + /// a new chunk will be generated + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr StringView(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + std::optional max_data_buffer_length = {}, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random LargeStringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr LargeString(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringArray with repeated values + /// + /// \param[in] size the size of the array to generate + /// \param[in] unique the number of unique string values used + /// to populate the array + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr StringWithRepeats( + int64_t size, int64_t unique, int32_t min_length, int32_t max_length, + double null_probability = 0, int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Like StringWithRepeats but return BinaryArray + std::shared_ptr BinaryWithRepeats( + int64_t size, int64_t unique, int32_t min_length, int32_t max_length, + double null_probability = 0, int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random FixedSizeBinaryArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] byte_width the byte width of fixed-size binary items + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr FixedSizeBinary(int64_t size, int32_t byte_width, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random ListArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr List(const Array& values, int64_t size, + double null_probability = 0, bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random ListViewArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// must be set to 0 + /// \param[in] coverage proportion of the values array covered by list-views + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr ListView(const Array& values, int64_t size, + double null_probability = 0, + bool force_empty_nulls = false, double coverage = 1.0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random LargeListViewArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// must be set to 0 + /// \param[in] coverage proportion of the values array covered by list-views + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr LargeListView(const Array& values, int64_t size, + double null_probability = 0, + bool force_empty_nulls = false, + double coverage = 1.0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random MapArray + /// + /// \param[in] keys The underlying keys array + /// \param[in] items The underlying items array + /// \param[in] size The size of the generated map array + /// \param[in] null_probability the probability of a map value being null + /// \param[in] force_empty_nulls if true, null map entries must have 0 length + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Map(const std::shared_ptr& keys, + const std::shared_ptr& items, int64_t size, + double null_probability = 0, bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random RunEndEncodedArray + /// + /// \param[in] value_type The DataType of the encoded values + /// \param[in] logical_size The logical length of the generated array + /// \param[in] null_probability the probability of a value being null + /// + /// \return a generated Array + std::shared_ptr RunEndEncoded(std::shared_ptr value_type, + int64_t logical_size, + double null_probability = 0.0); + + /// \brief Generate a random SparseUnionArray + /// + /// The type ids are chosen randomly, according to a uniform distribution, + /// amongst the given child fields. + /// + /// \param[in] fields Vector of Arrays containing the data for each union field + /// \param[in] size The size of the generated sparse union array + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + std::shared_ptr SparseUnion(const ArrayVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random DenseUnionArray + /// + /// The type ids are chosen randomly, according to a uniform distribution, + /// amongst the given child fields. The offsets are incremented along + /// each child field. + /// + /// \param[in] fields Vector of Arrays containing the data for each union field + /// \param[in] size The size of the generated sparse union array + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + std::shared_ptr DenseUnion(const ArrayVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Array of the specified type, size, and null_probability. + /// + /// Generation parameters other than size and null_probability are determined based on + /// the type of Array to be generated. + /// If boolean the probabilities of true,false values are 0.25,0.75 respectively. + /// If numeric min,max will be the least and greatest representable values. + /// If string min_length,max_length will be 0,sqrt(size) respectively. + /// + /// \param[in] type the type of Array to generate + /// \param[in] size the size of the Array to generate + /// \param[in] null_probability the probability of a slot being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// \return a generated Array + std::shared_ptr ArrayOf(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate an array with random data based on the given field. See BatchOf + /// for usage info. + std::shared_ptr ArrayOf(const Field& field, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a record batch with random data of the specified length. + /// + /// Generation options are read from key-value metadata for each field, and may be + /// specified at any nesting level. For example, generation options for the child + /// values of a list array can be specified by constructing the list type with + /// list(field("item", int8(), options_metadata)) + /// + /// The following options are supported: + /// + /// For all types except NullType: + /// - null_probability (double): range [0.0, 1.0] the probability of a null value. + /// Default/value is 0.0 if the field is marked non-nullable, else it is 0.01 + /// + /// For all numeric types T: + /// - min (T::c_type): the minimum value to generate (inclusive), default + /// std::numeric_limits::min() + /// - max (T::c_type): the maximum value to generate (inclusive), default + /// std::numeric_limits::max() + /// Note this means that, for example, min/max are int16_t values for HalfFloatType. + /// + /// For floating point types T for which is_physical_floating_type: + /// - nan_probability (double): range [0.0, 1.0] the probability of a NaN value. + /// + /// For BooleanType: + /// - true_probability (double): range [0.0, 1.0] the probability of a true. + /// + /// For DictionaryType: + /// - values (int32_t): the size of the dictionary. + /// Other properties are passed to the generator for the dictionary indices. However, + /// min and max cannot be specified. Note it is not possible to otherwise customize + /// the generation of dictionary values. + /// + /// For list, string, and binary types T, including their large variants: + /// - min_length (T::offset_type): the minimum length of the child to generate, + /// default 0 + /// - max_length (T::offset_type): the minimum length of the child to generate, + /// default 1024 + /// + /// For string and binary types T (not including their large or view variants): + /// - unique (int32_t): if positive, this many distinct values will be generated + /// and all array values will be one of these values, default -1 + /// + /// For string and binary view types T: + /// - max_data_buffer_length (int64_t): the data buffer size at which a new chunk + /// will be generated, default 32KB + /// + /// For MapType: + /// - values (int32_t): the number of key-value pairs to generate, which will be + /// partitioned among the array values. + /// + /// For extension types: + /// - extension_allow_random_storage (bool): in general an extension array may have + /// invariants on its storage beyond those already imposed by the arrow format, + /// which may result in an invalid array if we just wrap randomly generated + /// storage. Set this flag to explicitly allow wrapping of randomly generated + /// storage. + std::shared_ptr BatchOf( + const FieldVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + SeedType seed() { return seed_distribution_(seed_rng_); } + + private: + std::uniform_int_distribution seed_distribution_; + std::default_random_engine seed_rng_; +}; + +/// Generate a batch with random data. See RandomArrayGenerator::BatchOf. +ARROW_TESTING_EXPORT +std::shared_ptr GenerateBatch( + const FieldVector& fields, int64_t size, SeedType seed, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + +/// Generate an array with random data. See RandomArrayGenerator::BatchOf. +ARROW_TESTING_EXPORT +std::shared_ptr GenerateArray( + const Field& field, int64_t size, SeedType seed, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + +} // namespace random + +// +// Assorted functions +// + +ARROW_TESTING_EXPORT +void rand_day_millis(int64_t N, std::vector* out); +ARROW_TESTING_EXPORT +void rand_month_day_nanos(int64_t N, + std::vector* out); + +template +void randint(int64_t N, T lower, T upper, std::vector* out) { + const int random_seed = 0; + std::default_random_engine gen(random_seed); + std::uniform_int_distribution d(lower, upper); + out->resize(N, static_cast(0)); + std::generate(out->begin(), out->end(), [&d, &gen] { return static_cast(d(gen)); }); +} + +template +void random_real(int64_t n, uint32_t seed, T min_value, T max_value, + std::vector* out) { + std::default_random_engine gen(seed); + ::arrow::random::uniform_real_distribution d(min_value, max_value); + out->resize(n, static_cast(0)); + std::generate(out->begin(), out->end(), [&d, &gen] { return static_cast(d(gen)); }); +} + +template +void rand_uniform_int(int64_t n, uint32_t seed, T min_value, T max_value, U* out) { + assert(out || (n == 0)); + std::default_random_engine gen(seed); + std::uniform_int_distribution d(min_value, max_value); + std::generate(out, out + n, [&d, &gen] { return static_cast(d(gen)); }); +} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h new file mode 100644 index 0000000000000000000000000000000000000000..8aa04a83288d9f8ce39a2d7c92b528ac9742bf98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Random real generation is very slow on Arm if built with clang + libstdc++ +// due to software emulated long double arithmetic. +// This file ports some random real libs from llvm libc++ library, which are +// free from long double calculation. +// It improves performance significantly on both Arm (~100x) and x86 (~8x) in +// generating random reals when built with clang + gnu libstdc++. +// Based on: https://github.com/llvm/llvm-project/tree/main/libcxx + +#pragma once + +#include + +#include + +namespace arrow { +namespace random { + +namespace detail { + +// std::generate_canonical, simplified +// https://en.cppreference.com/w/cpp/numeric/random/generate_canonical +template +RealType generate_canonical(Rng& rng) { + const size_t b = std::numeric_limits::digits; + const size_t log2R = 63 - ::arrow::bit_util::CountLeadingZeros( + static_cast(Rng::max() - Rng::min()) + 1); + const size_t k = b / log2R + (b % log2R != 0) + (b == 0); + const RealType r = static_cast(Rng::max() - Rng::min()) + 1; + RealType base = r; + RealType sp = static_cast(rng() - Rng::min()); + for (size_t i = 1; i < k; ++i, base *= r) { + sp += (rng() - Rng::min()) * base; + } + return sp / base; +} + +} // namespace detail + +// std::uniform_real_distribution, simplified +// https://en.cppreference.com/w/cpp/numeric/random/uniform_real_distribution +template +struct uniform_real_distribution { + const RealType a, b; + + explicit uniform_real_distribution(RealType a = 0, RealType b = 1) : a(a), b(b) {} + + template + RealType operator()(Rng& rng) { + return (b - a) * detail::generate_canonical(rng) + a; + } +}; + +// std::bernoulli_distribution, simplified +// https://en.cppreference.com/w/cpp/numeric/random/bernoulli_distribution +struct bernoulli_distribution { + const double p; + + explicit bernoulli_distribution(double p = 0.5) : p(p) {} + + template + bool operator()(Rng& rng) { + return detail::generate_canonical(rng) < p; + } +}; + +} // namespace random +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h new file mode 100644 index 0000000000000000000000000000000000000000..b4b2785a36292df93777c2c4e76eea27f3c4a0c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h @@ -0,0 +1,140 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { + +template +Status CopyBufferFromVector(const std::vector& values, MemoryPool* pool, + std::shared_ptr* result) { + int64_t nbytes = static_cast(values.size()) * sizeof(T); + + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(nbytes, pool)); + auto immutable_data = reinterpret_cast(values.data()); + std::copy(immutable_data, immutable_data + nbytes, buffer->mutable_data()); + memset(buffer->mutable_data() + nbytes, 0, + static_cast(buffer->capacity() - nbytes)); + + *result = std::move(buffer); + return Status::OK(); +} + +// Sets approximately pct_null of the first n bytes in null_bytes to zero +// and the rest to non-zero (true) values. +ARROW_TESTING_EXPORT void random_null_bytes(int64_t n, double pct_null, + uint8_t* null_bytes); +ARROW_TESTING_EXPORT void random_is_valid(int64_t n, double pct_null, + std::vector* is_valid, + int random_seed = 0); +ARROW_TESTING_EXPORT void random_bytes(int64_t n, uint32_t seed, uint8_t* out); +ARROW_TESTING_EXPORT std::string random_string(int64_t n, uint32_t seed); +ARROW_TESTING_EXPORT int32_t DecimalSize(int32_t precision); +ARROW_TESTING_EXPORT void random_ascii(int64_t n, uint32_t seed, uint8_t* out); +ARROW_TESTING_EXPORT int64_t CountNulls(const std::vector& valid_bytes); + +ARROW_TESTING_EXPORT Status MakeRandomByteBuffer(int64_t length, MemoryPool* pool, + std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT uint64_t random_seed(); + +#define DECL_T() typedef typename TestFixture::T T; + +#define DECL_TYPE() typedef typename TestFixture::Type Type; + +// ---------------------------------------------------------------------- +// A RecordBatchReader for serving a sequence of in-memory record batches + +class BatchIterator : public RecordBatchReader { + public: + BatchIterator(const std::shared_ptr& schema, + const std::vector>& batches) + : schema_(schema), batches_(batches), position_(0) {} + + std::shared_ptr schema() const override { return schema_; } + + Status ReadNext(std::shared_ptr* out) override { + if (position_ >= batches_.size()) { + *out = nullptr; + } else { + *out = batches_[position_++]; + } + return Status::OK(); + } + + private: + std::shared_ptr schema_; + std::vector> batches_; + size_t position_; +}; + +static inline std::vector (*)(FieldVector, std::vector)> +UnionTypeFactories() { + return {sparse_union, dense_union}; +} + +// Return the value of the ARROW_TEST_DATA environment variable or return error +// Status +ARROW_TESTING_EXPORT Status GetTestResourceRoot(std::string*); + +// Return the value of the ARROW_TIMEZONE_DATABASE environment variable +ARROW_TESTING_EXPORT std::optional GetTestTimezoneDatabaseRoot(); + +// Set the Timezone database based on the ARROW_TIMEZONE_DATABASE env variable +// This is only relevant on Windows, since other OSs have compatible databases built-in +ARROW_TESTING_EXPORT Status InitTestTimezoneDatabase(); + +// Get a TCP port number to listen on. This is a different number every time, +// as reusing the same port across tests can produce spurious bind errors on +// Windows. +ARROW_TESTING_EXPORT int GetListenPort(); + +// Get a IPv4 "address:port" to listen on. The address will be a loopback +// address. Compared to GetListenPort(), this will minimize the risk of +// port conflicts. +ARROW_TESTING_EXPORT std::string GetListenAddress(); + +ARROW_TESTING_EXPORT +const std::vector>& all_dictionary_index_types(); + +// Get a list of supported hardware flags from the given candidates. +// The result will always contain 0, meaning no optional CPU feature enabled at all. +ARROW_TESTING_EXPORT +std::vector GetSupportedHardwareFlags( + const std::vector& candidate_flags); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h new file mode 100644 index 0000000000000000000000000000000000000000..4750e697fc7972e8ad57766ffd1134cf3e99fd14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h @@ -0,0 +1,466 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/bitmap_writer.h" +#include "arrow/util/compare.h" +#include "arrow/util/endian.h" +#include "arrow/util/functional.h" +#include "arrow/util/span.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class BooleanArray; + +namespace internal { + +class ARROW_EXPORT Bitmap : public util::ToStringOstreamable, + public util::EqualityComparable { + public: + Bitmap() = default; + + Bitmap(const std::shared_ptr& buffer, int64_t offset, int64_t length) + : data_(buffer->data()), offset_(offset), length_(length) { + if (buffer->is_mutable()) { + mutable_data_ = buffer->mutable_data(); + } + } + + Bitmap(const void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), offset_(offset), length_(length) {} + + Bitmap(void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), + mutable_data_(reinterpret_cast(data)), + offset_(offset), + length_(length) {} + + Bitmap Slice(int64_t offset) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length_ - offset}; + } else { + return {data_, offset_ + offset, length_ - offset}; + } + } + + Bitmap Slice(int64_t offset, int64_t length) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length}; + } else { + return {data_, offset_ + offset, length}; + } + } + + std::string ToString() const; + + bool Equals(const Bitmap& other) const; + + std::string Diff(const Bitmap& other) const; + + bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); } + + bool operator[](int64_t i) const { return GetBit(i); } + + void SetBitTo(int64_t i, bool v) const { + bit_util::SetBitTo(mutable_data_, i + offset_, v); + } + + void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); } + + void CopyFrom(const Bitmap& other); + void CopyFromInverted(const Bitmap& other); + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps, N); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const std::array& bitmaps, Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit words of bits from each bitmap as array + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// + /// TODO(bkietz) allow for early termination + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + // local, mutable variables which will be sliced/decremented to represent consumption: + Bitmap bitmaps[N]; + int64_t offsets[N]; + int64_t bit_length = BitLength(bitmaps_arg, N); + util::span words[N]; + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps_arg[i]; + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + + auto consume = [&](int64_t consumed_bits) { + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits); + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + bit_length -= consumed_bits; + }; + + std::array visited_words; + visited_words.fill(0); + + if (bit_length <= kBitWidth * 2) { + // bitmaps fit into one or two words so don't bother with optimization + while (bit_length > 0) { + auto leading_bits = std::min(bit_length, kBitWidth); + SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + return 0; + } + + int64_t max_offset = *std::max_element(offsets, offsets + N); + int64_t min_offset = *std::min_element(offsets, offsets + N); + if (max_offset > 0) { + // consume leading bits + auto leading_bits = kBitWidth - min_offset; + SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + assert(*std::min_element(offsets, offsets + N) == 0); + + int64_t whole_word_count = bit_length / kBitWidth; + assert(whole_word_count >= 1); + + if (min_offset == max_offset) { + // all offsets were identical, all leading bits have been consumed + assert( + std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; })); + + for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) { + for (size_t i = 0; i < N; ++i) { + visited_words[i] = words[i][word_i]; + } + visitor(visited_words); + } + consume(whole_word_count * kBitWidth); + } else { + // leading bits from potentially incomplete words have been consumed + + // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely + // within the bitmap for all i + for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) { + for (size_t i = 0; i < N; ++i) { + if (offsets[i] == 0) { + visited_words[i] = words[i][word_i]; + } else { + auto words0 = bit_util::ToLittleEndian(words[i][word_i]); + auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]); + visited_words[i] = bit_util::FromLittleEndian( + (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i]))); + } + } + visitor(visited_words); + } + consume((whole_word_count - 1) * kBitWidth); + + SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words); + + visitor(visited_words); + consume(kBitWidth); + } + + // load remaining bits + if (bit_length > 0) { + SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words); + visitor(visited_words); + } + + return min_offset; + } + + template >::type::value_type> + static void RunVisitWordsAndWriteLoop(int64_t bit_length, + std::array& readers, + std::array& writers, + Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + std::array visited_words; + std::array output_words; + + // every reader will have same number of words, since they are same length'ed + // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond + // Word boundary, every Word would have to be created from 2 adjoining Words + auto n_words = readers[0].words(); + bit_length -= n_words * kBitWidth; + while (n_words--) { + // first collect all words to visited_words array + for (size_t i = 0; i < N; i++) { + visited_words[i] = readers[i].NextWord(); + } + visitor(visited_words, &output_words); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextWord(output_words[i]); + } + } + + // every reader will have same number of trailing bytes, because of the above reason + // tailing portion could be more than one word! (ref: BitmapWordReader constructor) + // remaining full/ partial words to write + + if (bit_length) { + // convert the word visitor lambda to a byte_visitor + auto byte_visitor = [&](const std::array& in, + std::array* out) { + std::array in_words; + std::array out_words; + std::copy(in.begin(), in.end(), in_words.begin()); + visitor(in_words, &out_words); + for (size_t i = 0; i < M; i++) { + out->at(i) = static_cast(out_words[i]); + } + }; + + std::array visited_bytes; + std::array output_bytes; + int n_bytes = readers[0].trailing_bytes(); + while (n_bytes--) { + visited_bytes.fill(0); + output_bytes.fill(0); + int valid_bits; + for (size_t i = 0; i < N; i++) { + visited_bytes[i] = readers[i].NextTrailingByte(valid_bits); + } + byte_visitor(visited_bytes, &output_bytes); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextTrailingByte(output_bytes[i], valid_bits); + } + } + } + } + + /// \brief Visit words of bits from each input bitmap as array and collects + /// outputs to an array, to be written into the output bitmaps accordingly. + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// Visitor is expected to have the following signature + /// [](const std::array& in_words, std::array* out_words){...} + /// + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static void VisitWordsAndWrite(const std::array& bitmaps_arg, + std::array* out_bitmaps_arg, + Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps_arg); + assert(bit_length == BitLength(*out_bitmaps_arg)); + + // if both input and output bitmaps have no byte offset, then use special template + if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; }) && + std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; })) { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = BitmapWordReader( + in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter( + out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } else { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = + BitmapWordReader(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter(out_bitmap.mutable_data_, out_bitmap.offset_, + out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } + } + + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return mutable_data_; } + + /// offset of first bit relative to buffer().data() + int64_t offset() const { return offset_; } + + /// number of bits in this Bitmap + int64_t length() const { return length_; } + + /// span of all bytes which contain any bit in this Bitmap + util::span bytes() const { + auto byte_offset = offset_ / 8; + auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset; + return {data_ + byte_offset, static_cast(byte_count)}; + } + + private: + /// span of all Words which contain any bit in this Bitmap + /// + /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36) + /// words() would span bits [16, 48). + /// + /// 0 16 32 48 64 + /// |-------|-------|------|------| (buffer) + /// [ ] (bitmap) + /// |-------|------| (returned words) + /// + /// \warning The words may contain bytes which lie outside the buffer or are + /// uninitialized. + template + util::span words() const { + auto bytes_addr = reinterpret_cast(bytes().data()); + auto words_addr = bytes_addr - bytes_addr % sizeof(Word); + auto word_byte_count = + bit_util::RoundUpToPowerOf2(static_cast(bytes_addr + bytes().size()), + static_cast(sizeof(Word))) - + words_addr; + return {reinterpret_cast(words_addr), + static_cast(word_byte_count / sizeof(Word))}; + } + + /// offset of first bit relative to words().data() + template + int64_t word_offset() const { + return offset_ + 8 * (reinterpret_cast(data_) - + reinterpret_cast(words().data())); + } + + /// load words from bitmaps bitwise + template + static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset, + int64_t out_length, bool set_trailing_bits, + std::array* out) { + out->fill(0); + + int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0; + + Bitmap slices[N], out_bitmaps[N]; + for (size_t i = 0; i < N; ++i) { + slices[i] = bitmaps[i].Slice(offset, out_length); + out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length); + } + + int64_t bit_i = 0; + Bitmap::VisitBits(slices, [&](std::bitset bits) { + for (size_t i = 0; i < N; ++i) { + out_bitmaps[i].SetBitTo(bit_i, bits[i]); + } + ++bit_i; + }); + } + + /// assert bitmaps have identical length and return that length + static int64_t BitLength(const Bitmap* bitmaps, size_t N); + + template + static int64_t BitLength(const std::array& bitmaps) { + for (size_t i = 1; i < N; ++i) { + assert(bitmaps[i].length() == bitmaps[0].length()); + } + return bitmaps[0].length(); + } + + const uint8_t* data_ = NULLPTR; + uint8_t* mutable_data_ = NULLPTR; + int64_t offset_ = 0, length_ = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a9d900b2588d9d556fd1995de1d60d8583edfca7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h @@ -0,0 +1,244 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; + +namespace internal { + +// ---------------------------------------------------------------------- +// Bitmap utilities + +/// Copy a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// +/// \return Status message +ARROW_EXPORT +Result> CopyBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Copy a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void CopyBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Invert a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void InvertBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Invert a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// +/// \return Status message +ARROW_EXPORT +Result> InvertBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Reverse a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to reverse +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void ReverseBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Reverse a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to reverse +/// +/// \return Status message +ARROW_EXPORT +Result> ReverseBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Compute the number of 1's in the given data array +/// +/// \param[in] data a packed LSB-ordered bitmap as a byte array +/// \param[in] bit_offset a bitwise offset into the bitmap +/// \param[in] length the number of bits to inspect in the bitmap relative to +/// the offset +/// +/// \return The number of set (1) bits in the range +ARROW_EXPORT +int64_t CountSetBits(const uint8_t* data, int64_t bit_offset, int64_t length); + +/// Compute the number of 1's in the result of an "and" (&) of two bitmaps +/// +/// \param[in] left_bitmap a packed LSB-ordered bitmap as a byte array +/// \param[in] left_offset a bitwise offset into the left bitmap +/// \param[in] right_bitmap a packed LSB-ordered bitmap as a byte array +/// \param[in] right_offset a bitwise offset into the right bitmap +/// \param[in] length the length of the bitmaps (must be the same) +/// +/// \return The number of set (1) bits in the "and" of the two bitmaps +ARROW_EXPORT +int64_t CountAndSetBits(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length); + +ARROW_EXPORT +bool BitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length); + +// Same as BitmapEquals, but considers a NULL bitmap pointer the same as an +// all-ones bitmap. +ARROW_EXPORT +bool OptionalBitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length); + +ARROW_EXPORT +bool OptionalBitmapEquals(const std::shared_ptr& left, int64_t left_offset, + const std::shared_ptr& right, int64_t right_offset, + int64_t length); + +/// \brief Do a "bitmap and" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapAnd(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap and" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapAnd(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap or" for the given bit length on right and left buffers +/// starting at their respective bit-offsets and put the results in out_buffer +/// starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapOr(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap or" for the given bit length on right and left buffers +/// starting at their respective bit-offsets and put the results in out +/// starting at the given bit-offset. +ARROW_EXPORT +void BitmapOr(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap xor" for the given bit-length on right and left +/// buffers starting at their respective bit-offsets and put the results in +/// out_buffer starting at the given bit offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapXor(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap xor" for the given bit-length on right and left +/// buffers starting at their respective bit-offsets and put the results in +/// out starting at the given bit offset. +ARROW_EXPORT +void BitmapXor(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap and not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapAndNot(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap and not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapAndNot(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap or not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapOrNot(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap or not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapOrNot(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h new file mode 100644 index 0000000000000000000000000000000000000000..97f6b61a1f8cebd297a5f4a8fe4401b6073de45f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +namespace arrow { +namespace internal { + +template +inline OutputType checked_cast(InputType&& value) { + static_assert(std::is_class::type>::type>::value, + "checked_cast input type must be a class"); + static_assert(std::is_class::type>::type>::value, + "checked_cast output type must be a class"); +#ifdef NDEBUG + return static_cast(value); +#else + return dynamic_cast(value); +#endif +} + +template +std::shared_ptr checked_pointer_cast(std::shared_ptr r) noexcept { +#ifdef NDEBUG + return std::static_pointer_cast(std::move(r)); +#else + return std::dynamic_pointer_cast(std::move(r)); +#endif +} + +template +std::unique_ptr checked_pointer_cast(std::unique_ptr r) noexcept { +#ifdef NDEBUG + return std::unique_ptr(static_cast(r.release())); +#else + return std::unique_ptr(dynamic_cast(r.release())); +#endif +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h new file mode 100644 index 0000000000000000000000000000000000000000..161ad0bfddfc5a52040256a9cb39b5af96b876db --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; + +class ARROW_EXPORT BoundaryFinder { + public: + BoundaryFinder() = default; + + virtual ~BoundaryFinder(); + + /// \brief Find the position of the first delimiter inside block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindFirst(std::string_view partial, std::string_view block, + int64_t* out_pos) = 0; + + /// \brief Find the position of the last delimiter inside block + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the last delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindLast(std::string_view block, int64_t* out_pos) = 0; + + /// \brief Find the position of the Nth delimiter inside the block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + /// + /// The returned `num_found` is the number of delimiters actually found + virtual Status FindNth(std::string_view partial, std::string_view block, int64_t count, + int64_t* out_pos, int64_t* num_found) = 0; + + static constexpr int64_t kNoDelimiterFound = -1; + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(BoundaryFinder); +}; + +ARROW_EXPORT +std::shared_ptr MakeNewlineBoundaryFinder(); + +/// \brief A reusable block-based chunker for delimited data +/// +/// The chunker takes a block of delimited data and helps carve a sub-block +/// which begins and ends on delimiters (suitable for consumption by parsers +/// which can only parse whole objects). +class ARROW_EXPORT Chunker { + public: + explicit Chunker(std::shared_ptr delimiter); + ~Chunker(); + + /// \brief Carve up a chunk in a block of data to contain only whole objects + /// + /// Pre-conditions: + /// - `block` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// + /// Post-conditions: + /// - block == whole + partial + /// - `whole` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `partial` doesn't contain an entire delimited object + /// (IOW: `partial` is generally small) + /// + /// This method will look for the last delimiter in `block` and may + /// therefore be costly. + /// + /// \param[in] block data to be chunked + /// \param[out] whole subrange of block containing whole delimited objects + /// \param[out] partial subrange of block starting with a partial delimited object + Status Process(std::shared_ptr block, std::shared_ptr* whole, + std::shared_ptr* partial); + + /// \brief Carve the completion of a partial object out of a block + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + /// This method will look for the first delimiter in `block` and should + /// therefore be reasonably cheap. + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[out] completion subrange of block containing the completion of partial + /// \param[out] rest subrange of block containing what completion does not cover + Status ProcessWithPartial(std::shared_ptr partial, + std::shared_ptr block, + std::shared_ptr* completion, + std::shared_ptr* rest); + + /// \brief Like ProcessWithPartial, but for the last block of a file + /// + /// This method allows for a final delimited object without a trailing delimiter + /// (ProcessWithPartial would return an error in that case). + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// - `block` follows `partial` in file order and is the last data block + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + Status ProcessFinal(std::shared_ptr partial, std::shared_ptr block, + std::shared_ptr* completion, std::shared_ptr* rest); + + /// \brief Skip count number of rows + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - `count` is updated to indicate the number of rows that still need to be skipped + /// - If `count` is > 0 then `rest` is an incomplete block that should be a future + /// `partial` + /// - Else `rest` could be one or more valid blocks of delimited data which need to be + /// parsed + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[in] final whether this is the final chunk + /// \param[in,out] count number of rows that need to be skipped + /// \param[out] rest subrange of block containing what was not skipped + Status ProcessSkip(std::shared_ptr partial, std::shared_ptr block, + bool final, int64_t* count, std::shared_ptr* rest); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Chunker); + + std::shared_ptr boundary_finder_; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h new file mode 100644 index 0000000000000000000000000000000000000000..0a432fee2cd315d23bd35e0907e327efc7f419ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/float16.h @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +/// \brief Class representing an IEEE half-precision float, encoded as a `uint16_t` +/// +/// The exact format is as follows (from LSB to MSB): +/// - bits 0-10: mantissa +/// - bits 10-15: exponent +/// - bit 15: sign +/// +class ARROW_EXPORT Float16 { + public: + Float16() = default; + explicit Float16(float f) : Float16(FromFloat(f)) {} + explicit Float16(double d) : Float16(FromDouble(d)) {} + template >* = NULLPTR> + explicit Float16(T v) : Float16(static_cast(v)) {} + + /// \brief Create a `Float16` from its exact binary representation + constexpr static Float16 FromBits(uint16_t bits) { return Float16{bits, bool{}}; } + /// \brief Create a `Float16` from a 32-bit float (may lose precision) + static Float16 FromFloat(float f); + /// \brief Create a `Float16` from a 64-bit float (may lose precision) + static Float16 FromDouble(double d); + + /// \brief Read a `Float16` from memory in native-endian byte order + static Float16 FromBytes(const uint8_t* src) { + return FromBits(SafeLoadAs(src)); + } + + /// \brief Read a `Float16` from memory in little-endian byte order + static Float16 FromLittleEndian(const uint8_t* src) { + return FromBits(::arrow::bit_util::FromLittleEndian(SafeLoadAs(src))); + } + + /// \brief Read a `Float16` from memory in big-endian byte order + static Float16 FromBigEndian(const uint8_t* src) { + return FromBits(::arrow::bit_util::FromBigEndian(SafeLoadAs(src))); + } + + /// \brief Return the value's binary representation as a `uint16_t` + constexpr uint16_t bits() const { return bits_; } + + /// \brief Return true if the value is negative (sign bit is set) + constexpr bool signbit() const { return (bits_ & 0x8000) != 0; } + + /// \brief Return true if the value is NaN + constexpr bool is_nan() const { return (bits_ & 0x7fff) > 0x7c00; } + /// \brief Return true if the value is positive/negative infinity + constexpr bool is_infinity() const { return (bits_ & 0x7fff) == 0x7c00; } + /// \brief Return true if the value is finite and not NaN + constexpr bool is_finite() const { return (bits_ & 0x7c00) != 0x7c00; } + /// \brief Return true if the value is positive/negative zero + constexpr bool is_zero() const { return (bits_ & 0x7fff) == 0; } + + /// \brief Convert to a 32-bit float + float ToFloat() const; + /// \brief Convert to a 64-bit float + double ToDouble() const; + + explicit operator float() const { return ToFloat(); } + explicit operator double() const { return ToDouble(); } + + /// \brief Copy the value's bytes in native-endian byte order + void ToBytes(uint8_t* dest) const { std::memcpy(dest, &bits_, sizeof(bits_)); } + /// \brief Return the value's bytes in native-endian byte order + constexpr std::array ToBytes() const { +#if ARROW_LITTLE_ENDIAN + return ToLittleEndian(); +#else + return ToBigEndian(); +#endif + } + + /// \brief Copy the value's bytes in little-endian byte order + void ToLittleEndian(uint8_t* dest) const { + const auto bytes = ToLittleEndian(); + std::memcpy(dest, bytes.data(), bytes.size()); + } + /// \brief Return the value's bytes in little-endian byte order + constexpr std::array ToLittleEndian() const { +#if ARROW_LITTLE_ENDIAN + return {uint8_t(bits_ & 0xff), uint8_t(bits_ >> 8)}; +#else + return {uint8_t(bits_ >> 8), uint8_t(bits_ & 0xff)}; +#endif + } + + /// \brief Copy the value's bytes in big-endian byte order + void ToBigEndian(uint8_t* dest) const { + const auto bytes = ToBigEndian(); + std::memcpy(dest, bytes.data(), bytes.size()); + } + /// \brief Return the value's bytes in big-endian byte order + constexpr std::array ToBigEndian() const { +#if ARROW_LITTLE_ENDIAN + return {uint8_t(bits_ >> 8), uint8_t(bits_ & 0xff)}; +#else + return {uint8_t(bits_ & 0xff), uint8_t(bits_ >> 8)}; +#endif + } + + constexpr Float16 operator-() const { return FromBits(bits_ ^ 0x8000); } + constexpr Float16 operator+() const { return FromBits(bits_); } + + friend constexpr bool operator==(Float16 lhs, Float16 rhs) { + if (lhs.is_nan() || rhs.is_nan()) return false; + return Float16::CompareEq(lhs, rhs); + } + friend constexpr bool operator!=(Float16 lhs, Float16 rhs) { return !(lhs == rhs); } + + friend constexpr bool operator<(Float16 lhs, Float16 rhs) { + if (lhs.is_nan() || rhs.is_nan()) return false; + return Float16::CompareLt(lhs, rhs); + } + friend constexpr bool operator>(Float16 lhs, Float16 rhs) { return rhs < lhs; } + + friend constexpr bool operator<=(Float16 lhs, Float16 rhs) { + if (lhs.is_nan() || rhs.is_nan()) return false; + return !Float16::CompareLt(rhs, lhs); + } + friend constexpr bool operator>=(Float16 lhs, Float16 rhs) { return rhs <= lhs; } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, Float16 arg); + + protected: + uint16_t bits_; + + private: + constexpr Float16(uint16_t bits, bool) : bits_(bits) {} + + // Comparison helpers that assume neither operand is NaN + static constexpr bool CompareEq(Float16 lhs, Float16 rhs) { + return (lhs.bits() == rhs.bits()) || (lhs.is_zero() && rhs.is_zero()); + } + static constexpr bool CompareLt(Float16 lhs, Float16 rhs) { + if (lhs.signbit()) { + if (rhs.signbit()) { + // Both are negative + return lhs.bits() > rhs.bits(); + } else { + // Handle +/-0 + return !lhs.is_zero() || rhs.bits() != 0; + } + } else if (rhs.signbit()) { + return false; + } else { + // Both are positive + return lhs.bits() < rhs.bits(); + } + } +}; + +static_assert(std::is_trivial_v); + +} // namespace util +} // namespace arrow + +// TODO: Not complete +template <> +class std::numeric_limits { + using T = arrow::util::Float16; + + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + + static constexpr T min() { return T::FromBits(0b0000010000000000); } + static constexpr T max() { return T::FromBits(0b0111101111111111); } + static constexpr T lowest() { return -max(); } + + static constexpr T infinity() { return T::FromBits(0b0111110000000000); } + + static constexpr T quiet_NaN() { return T::FromBits(0b0111111111111111); } +}; diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h new file mode 100644 index 0000000000000000000000000000000000000000..dd1c38a78216e3f3d0d8c3f42c63a7d43f4bab44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { +namespace internal { + +// ---------------------------------------------------------------------- +// BEGIN Hash utilities from Boost + +namespace detail { + +#if defined(_MSC_VER) +#define ARROW_HASH_ROTL32(x, r) _rotl(x, r) +#else +#define ARROW_HASH_ROTL32(x, r) (x << r) | (x >> (32 - r)) +#endif + +template +inline void hash_combine_impl(SizeT& seed, SizeT value) { + seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2); +} + +inline void hash_combine_impl(uint32_t& h1, uint32_t k1) { + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + k1 *= c1; + k1 = ARROW_HASH_ROTL32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ARROW_HASH_ROTL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; +} + +#undef ARROW_HASH_ROTL32 + +} // namespace detail + +template +inline void hash_combine(std::size_t& seed, T const& v) { + std::hash hasher; + return ::arrow::internal::detail::hash_combine_impl(seed, hasher(v)); +} + +// END Hash utilities from Boost +// ---------------------------------------------------------------------- + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h new file mode 100644 index 0000000000000000000000000000000000000000..58deb8019d94155e4488af7e3047e599abb7197b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/list_util.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/array/data.h" +#include "arrow/result.h" + +namespace arrow { +namespace list_util { +namespace internal { + +/// \brief Calculate the smallest continuous range of values used by the +/// var-length list-like input (list, map and list-view types). +/// +/// \param input The input array such that is_var_length_list_like(input.type) +/// is true +/// \return A pair of (offset, length) describing the range +ARROW_EXPORT Result> RangeOfValuesUsed( + const ArraySpan& input); + +/// \brief Calculate the sum of the sizes of all valid lists or list-views +/// +/// This is usually the same as the length of the RangeOfValuesUsed() range, but +/// it can be: +/// - Smaller: when the child array contains many values that are not +/// referenced by the lists or list-views in the parent array +/// - Greater: when the list-views share child array ranges +/// +/// \param input The input array such that is_var_length_list_like(input.type) +/// is true +/// \return The sum of all list or list-view sizes +ARROW_EXPORT Result SumOfLogicalListSizes(const ArraySpan& input); + +} // namespace internal + +} // namespace list_util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e377773f62f810d330c40e565d5acda0aabd4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h @@ -0,0 +1,173 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +#include +#endif + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +ARROW_EXPORT std::string HexEncode(const uint8_t* data, size_t length); + +ARROW_EXPORT std::string Escape(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(std::string_view str); + +ARROW_EXPORT std::string Escape(std::string_view str); + +ARROW_EXPORT Status ParseHexValue(const char* hex_pair, uint8_t* out); + +ARROW_EXPORT Status ParseHexValues(std::string_view hex_string, uint8_t* out); + +namespace internal { + +/// Like std::string_view::starts_with in C++20 +inline bool StartsWith(std::string_view s, std::string_view prefix) { + return s.length() >= prefix.length() && + (s.empty() || s.substr(0, prefix.length()) == prefix); +} + +/// Like std::string_view::ends_with in C++20 +inline bool EndsWith(std::string_view s, std::string_view suffix) { + return s.length() >= suffix.length() && + (s.empty() || s.substr(s.length() - suffix.length()) == suffix); +} + +/// \brief Split a string with a delimiter +ARROW_EXPORT +std::vector SplitString(std::string_view v, char delim, + int64_t limit = 0); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Trim whitespace from left and right sides of string +ARROW_EXPORT +std::string TrimString(std::string value); + +ARROW_EXPORT +bool AsciiEqualsCaseInsensitive(std::string_view left, std::string_view right); + +ARROW_EXPORT +std::string AsciiToLower(std::string_view value); + +ARROW_EXPORT +std::string AsciiToUpper(std::string_view value); + +/// \brief Search for the first instance of a token and replace it or return nullopt if +/// the token is not found. +ARROW_EXPORT +std::optional Replace(std::string_view s, std::string_view token, + std::string_view replacement); + +/// \brief Get boolean value from string +/// +/// If "1", "true" (case-insensitive), returns true +/// If "0", "false" (case-insensitive), returns false +/// Otherwise, returns Status::Invalid +ARROW_EXPORT +arrow::Result ParseBoolean(std::string_view value); + +#if __has_include() + +namespace detail { +template +struct can_to_chars : public std::false_type {}; + +template +struct can_to_chars< + T, std::void_t(), std::declval(), + std::declval>()))>> + : public std::true_type {}; +} // namespace detail + +/// \brief Whether std::to_chars exists for the current value type. +/// +/// This is useful as some C++ libraries do not implement all specified overloads +/// for std::to_chars. +template +inline constexpr bool have_to_chars = detail::can_to_chars::value; + +/// \brief An ergonomic wrapper around std::to_chars, returning a std::string +/// +/// For most inputs, the std::string result will not incur any heap allocation +/// thanks to small string optimization. +/// +/// Compared to std::to_string, this function gives locale-agnostic results +/// and might also be faster. +template +std::string ToChars(T value, Args&&... args) { + if constexpr (!have_to_chars) { + // Some C++ standard libraries do not yet implement std::to_chars for all types, + // in which case we have to fallback to std::string. + return std::to_string(value); + } else { + // According to various sources, the GNU libstdc++ and Microsoft's C++ STL + // allow up to 15 bytes of small string optimization, while clang's libc++ + // goes up to 22 bytes. Choose the pessimistic value. + std::string out(15, 0); + auto res = std::to_chars(&out.front(), &out.back(), value, args...); + while (res.ec != std::errc{}) { + assert(res.ec == std::errc::value_too_large); + out.resize(out.capacity() * 2); + res = std::to_chars(&out.front(), &out.back(), value, args...); + } + const auto length = res.ptr - out.data(); + assert(length <= static_cast(out.length())); + out.resize(length); + return out; + } +} + +#else // !__has_include() + +template +inline constexpr bool have_to_chars = false; + +template +std::string ToChars(T value, Args&&... args) { + return std::to_string(value); +} + +#endif + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h new file mode 100644 index 0000000000000000000000000000000000000000..3bb72f0d9cb7d7bb8b9ce8f2a65cc9f954924ca3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cancel.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief A group of related tasks +/// +/// A TaskGroup executes tasks with the signature `Status()`. +/// Execution can be serial or parallel, depending on the TaskGroup +/// implementation. When Finish() returns, it is guaranteed that all +/// tasks have finished, or at least one has errored. +/// +/// Once an error has occurred any tasks that are submitted to the task group +/// will not run. The call to Append will simply return without scheduling the +/// task. +/// +/// If the task group is parallel it is possible that multiple tasks could be +/// running at the same time and one of those tasks fails. This will put the +/// task group in a failure state (so additional tasks cannot be run) however +/// it will not interrupt running tasks. Finish will not complete +/// until all running tasks have finished, even if one task fails. +/// +/// Once a task group has finished new tasks may not be added to it. If you need to start +/// a new batch of work then you should create a new task group. +class ARROW_EXPORT TaskGroup : public std::enable_shared_from_this { + public: + /// Add a Status-returning function to execute. Execution order is + /// undefined. The function may be executed immediately or later. + template + void Append(Function&& func) { + return AppendReal(std::forward(func)); + } + + /// Wait for execution of all tasks (and subgroups) to be finished, + /// or for at least one task (or subgroup) to error out. + /// The returned Status propagates the error status of the first failing + /// task (or subgroup). + virtual Status Finish() = 0; + + /// Returns a future that will complete the first time all tasks are finished. + /// This should be called only after all top level tasks + /// have been added to the task group. + /// + /// If you are using a TaskGroup asynchronously there are a few considerations to keep + /// in mind. The tasks should not block on I/O, etc (defeats the purpose of using + /// futures) and should not be doing any nested locking or you run the risk of the tasks + /// getting stuck in the thread pool waiting for tasks which cannot get scheduled. + /// + /// Primarily this call is intended to help migrate existing work written with TaskGroup + /// in mind to using futures without having to do a complete conversion on the first + /// pass. + virtual Future<> FinishAsync() = 0; + + /// The current aggregate error Status. Non-blocking, useful for stopping early. + virtual Status current_status() = 0; + + /// Whether some tasks have already failed. Non-blocking, useful for stopping early. + virtual bool ok() const = 0; + + /// How many tasks can typically be executed in parallel. + /// This is only a hint, useful for testing or debugging. + virtual int parallelism() = 0; + + static std::shared_ptr MakeSerial(StopToken = StopToken::Unstoppable()); + static std::shared_ptr MakeThreaded(internal::Executor*, + StopToken = StopToken::Unstoppable()); + + virtual ~TaskGroup() = default; + + protected: + TaskGroup() = default; + ARROW_DISALLOW_COPY_AND_ASSIGN(TaskGroup); + + virtual void AppendReal(FnOnce task) = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/ProducerConsumerQueue.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/ProducerConsumerQueue.h new file mode 100644 index 0000000000000000000000000000000000000000..0b7cfa1cb166fd7bf06474e27ae6d80a23edb400 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/ProducerConsumerQueue.h @@ -0,0 +1,217 @@ +// Vendored from git tag v2021.02.15.00 + +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author Bo Hu (bhu@fb.com) +// @author Jordan DeLong (delong.j@fb.com) + +// This file has been modified as part of Apache Arrow to conform to +// Apache Arrow's coding conventions + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace arrow_vendored { +namespace folly { + +// Vendored from folly/Portability.h +namespace { +#if defined(__arm__) +#define FOLLY_ARM 1 +#else +#define FOLLY_ARM 0 +#endif + +#if defined(__s390x__) +#define FOLLY_S390X 1 +#else +#define FOLLY_S390X 0 +#endif + +constexpr bool kIsArchArm = FOLLY_ARM == 1; +constexpr bool kIsArchS390X = FOLLY_S390X == 1; +} // namespace + +// Vendored from folly/lang/Align.h +namespace { + +constexpr std::size_t hardware_destructive_interference_size = + (kIsArchArm || kIsArchS390X) ? 64 : 128; + +} // namespace + +/* + * ProducerConsumerQueue is a one producer and one consumer queue + * without locks. + */ +template +struct ProducerConsumerQueue { + typedef T value_type; + + ProducerConsumerQueue(const ProducerConsumerQueue&) = delete; + ProducerConsumerQueue& operator=(const ProducerConsumerQueue&) = delete; + + // size must be >= 2. + // + // Also, note that the number of usable slots in the queue at any + // given time is actually (size-1), so if you start with an empty queue, + // IsFull() will return true after size-1 insertions. + explicit ProducerConsumerQueue(uint32_t size) + : size_(size), + records_(static_cast(std::malloc(sizeof(T) * size))), + readIndex_(0), + writeIndex_(0) { + assert(size >= 2); + if (!records_) { + throw std::bad_alloc(); + } + } + + ~ProducerConsumerQueue() { + // We need to destruct anything that may still exist in our queue. + // (No real synchronization needed at destructor time: only one + // thread can be doing this.) + if (!std::is_trivially_destructible::value) { + size_t readIndex = readIndex_; + size_t endIndex = writeIndex_; + while (readIndex != endIndex) { + records_[readIndex].~T(); + if (++readIndex == size_) { + readIndex = 0; + } + } + } + + std::free(records_); + } + + template + bool Write(Args&&... recordArgs) { + auto const currentWrite = writeIndex_.load(std::memory_order_relaxed); + auto nextRecord = currentWrite + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + new (&records_[currentWrite]) T(std::forward(recordArgs)...); + writeIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // queue is full + return false; + } + + // move the value at the front of the queue to given variable + bool Read(T& record) { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return false; + } + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + record = std::move(records_[currentRead]); + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // pointer to the value at the front of the queue (for use in-place) or + // nullptr if empty. + T* FrontPtr() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return nullptr; + } + return &records_[currentRead]; + } + + // queue must not be empty + void PopFront() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + assert(currentRead != writeIndex_.load(std::memory_order_acquire)); + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + } + + bool IsEmpty() const { + return readIndex_.load(std::memory_order_acquire) == + writeIndex_.load(std::memory_order_acquire); + } + + bool IsFull() const { + auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + return false; + } + // queue is full + return true; + } + + // * If called by consumer, then true size may be more (because producer may + // be adding items concurrently). + // * If called by producer, then true size may be less (because consumer may + // be removing items concurrently). + // * It is undefined to call this from any other thread. + size_t SizeGuess() const { + int ret = writeIndex_.load(std::memory_order_acquire) - + readIndex_.load(std::memory_order_acquire); + if (ret < 0) { + ret += size_; + } + return ret; + } + + // maximum number of items in the queue. + size_t capacity() const { return size_ - 1; } + + private: + using AtomicIndex = std::atomic; + + char pad0_[hardware_destructive_interference_size]; + const uint32_t size_; + T* const records_; + + AtomicIndex readIndex_; + char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; + AtomicIndex writeIndex_; + + char pad2_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; +}; + +} // namespace folly +} // namespace arrow_vendored diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime.h new file mode 100644 index 0000000000000000000000000000000000000000..e437cdcbc2daecdafe589a9483d9348324ad32ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/datetime/date.h" // IWYU pragma: export +#include "arrow/vendored/datetime/tz.h" // IWYU pragma: export + +// Can be defined by date.h. +#ifdef NOEXCEPT +#undef NOEXCEPT +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h new file mode 100644 index 0000000000000000000000000000000000000000..fd2569c6de0f642f65b56e777b7e10e68fca3337 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h @@ -0,0 +1,8237 @@ +#ifndef DATE_H +#define DATE_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016, 2017 Howard Hinnant +// Copyright (c) 2016 Adrian Colomitchi +// Copyright (c) 2017 Florian Dang +// Copyright (c) 2017 Paul Thompson +// Copyright (c) 2018, 2019 Tomasz Kamiński +// Copyright (c) 2019 Jiangang Zhuang +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +#ifndef HAS_STRING_VIEW +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_STRING_VIEW 1 +# else +# define HAS_STRING_VIEW 0 +# endif +#endif // HAS_STRING_VIEW + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if HAS_STRING_VIEW +# include +#endif +#include +#include + +#ifdef __GNUC__ +# pragma GCC diagnostic push +# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 7) +# pragma GCC diagnostic ignored "-Wpedantic" +# endif +# if __GNUC__ < 5 + // GCC 4.9 Bug 61489 Wrong warning with -Wmissing-field-initializers +# pragma GCC diagnostic ignored "-Wmissing-field-initializers" +# endif +#endif + +#ifdef _MSC_VER +# pragma warning(push) +// warning C4127: conditional expression is constant +# pragma warning(disable : 4127) +#endif + +namespace arrow_vendored +{ +namespace date +{ + +//---------------+ +// Configuration | +//---------------+ + +#ifndef ONLY_C_LOCALE +# define ONLY_C_LOCALE 0 +#endif + +#if defined(_MSC_VER) && (!defined(__clang__) || (_MSC_VER < 1910)) +// MSVC +# ifndef _SILENCE_CXX17_UNCAUGHT_EXCEPTION_DEPRECATION_WARNING +# define _SILENCE_CXX17_UNCAUGHT_EXCEPTION_DEPRECATION_WARNING +# endif +# if _MSC_VER < 1910 +// before VS2017 +# define CONSTDATA const +# define CONSTCD11 +# define CONSTCD14 +# define NOEXCEPT _NOEXCEPT +# else +// VS2017 and later +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 constexpr +# define NOEXCEPT noexcept +# endif + +#elif defined(__SUNPRO_CC) && __SUNPRO_CC <= 0x5150 +// Oracle Developer Studio 12.6 and earlier +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 +# define NOEXCEPT noexcept + +#elif __cplusplus >= 201402 +// C++14 +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 constexpr +# define NOEXCEPT noexcept +#else +// C++11 +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 +# define NOEXCEPT noexcept +#endif + +#ifndef HAS_UNCAUGHT_EXCEPTIONS +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_UNCAUGHT_EXCEPTIONS 1 +# else +# define HAS_UNCAUGHT_EXCEPTIONS 0 +# endif +#endif // HAS_UNCAUGHT_EXCEPTIONS + +#ifndef HAS_VOID_T +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_VOID_T 1 +# else +# define HAS_VOID_T 0 +# endif +#endif // HAS_VOID_T + +// Protect from Oracle sun macro +#ifdef sun +# undef sun +#endif + +// Work around for a NVCC compiler bug which causes it to fail +// to compile std::ratio_{multiply,divide} when used directly +// in the std::chrono::duration template instantiations below +namespace detail { +template +using ratio_multiply = decltype(std::ratio_multiply{}); + +template +using ratio_divide = decltype(std::ratio_divide{}); +} // namespace detail + +//-----------+ +// Interface | +//-----------+ + +// durations + +using days = std::chrono::duration + , std::chrono::hours::period>>; + +using weeks = std::chrono::duration + , days::period>>; + +using years = std::chrono::duration + , days::period>>; + +using months = std::chrono::duration + >>; + +// time_point + +template + using sys_time = std::chrono::time_point; + +using sys_days = sys_time; +using sys_seconds = sys_time; + +struct local_t {}; + +template + using local_time = std::chrono::time_point; + +using local_seconds = local_time; +using local_days = local_time; + +// types + +struct last_spec +{ + explicit last_spec() = default; +}; + +class day; +class month; +class year; + +class weekday; +class weekday_indexed; +class weekday_last; + +class month_day; +class month_day_last; +class month_weekday; +class month_weekday_last; + +class year_month; + +class year_month_day; +class year_month_day_last; +class year_month_weekday; +class year_month_weekday_last; + +// date composition operators + +CONSTCD11 year_month operator/(const year& y, const month& m) NOEXCEPT; +CONSTCD11 year_month operator/(const year& y, int m) NOEXCEPT; + +CONSTCD11 month_day operator/(const day& d, const month& m) NOEXCEPT; +CONSTCD11 month_day operator/(const day& d, int m) NOEXCEPT; +CONSTCD11 month_day operator/(const month& m, const day& d) NOEXCEPT; +CONSTCD11 month_day operator/(const month& m, int d) NOEXCEPT; +CONSTCD11 month_day operator/(int m, const day& d) NOEXCEPT; + +CONSTCD11 month_day_last operator/(const month& m, last_spec) NOEXCEPT; +CONSTCD11 month_day_last operator/(int m, last_spec) NOEXCEPT; +CONSTCD11 month_day_last operator/(last_spec, const month& m) NOEXCEPT; +CONSTCD11 month_day_last operator/(last_spec, int m) NOEXCEPT; + +CONSTCD11 month_weekday operator/(const month& m, const weekday_indexed& wdi) NOEXCEPT; +CONSTCD11 month_weekday operator/(int m, const weekday_indexed& wdi) NOEXCEPT; +CONSTCD11 month_weekday operator/(const weekday_indexed& wdi, const month& m) NOEXCEPT; +CONSTCD11 month_weekday operator/(const weekday_indexed& wdi, int m) NOEXCEPT; + +CONSTCD11 month_weekday_last operator/(const month& m, const weekday_last& wdl) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(int m, const weekday_last& wdl) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(const weekday_last& wdl, const month& m) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(const weekday_last& wdl, int m) NOEXCEPT; + +CONSTCD11 year_month_day operator/(const year_month& ym, const day& d) NOEXCEPT; +CONSTCD11 year_month_day operator/(const year_month& ym, int d) NOEXCEPT; +CONSTCD11 year_month_day operator/(const year& y, const month_day& md) NOEXCEPT; +CONSTCD11 year_month_day operator/(int y, const month_day& md) NOEXCEPT; +CONSTCD11 year_month_day operator/(const month_day& md, const year& y) NOEXCEPT; +CONSTCD11 year_month_day operator/(const month_day& md, int y) NOEXCEPT; + +CONSTCD11 + year_month_day_last operator/(const year_month& ym, last_spec) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const year& y, const month_day_last& mdl) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(int y, const month_day_last& mdl) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const month_day_last& mdl, const year& y) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const month_day_last& mdl, int y) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const year_month& ym, const weekday_indexed& wdi) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const year& y, const month_weekday& mwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(int y, const month_weekday& mwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const month_weekday& mwd, const year& y) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const month_weekday& mwd, int y) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const year_month& ym, const weekday_last& wdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const year& y, const month_weekday_last& mwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(int y, const month_weekday_last& mwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const month_weekday_last& mwdl, const year& y) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const month_weekday_last& mwdl, int y) NOEXCEPT; + +// Detailed interface + +// day + +class day +{ + unsigned char d_; + +public: + day() = default; + explicit CONSTCD11 day(unsigned d) NOEXCEPT; + + CONSTCD14 day& operator++() NOEXCEPT; + CONSTCD14 day operator++(int) NOEXCEPT; + CONSTCD14 day& operator--() NOEXCEPT; + CONSTCD14 day operator--(int) NOEXCEPT; + + CONSTCD14 day& operator+=(const days& d) NOEXCEPT; + CONSTCD14 day& operator-=(const days& d) NOEXCEPT; + + CONSTCD11 explicit operator unsigned() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator< (const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator> (const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const day& x, const day& y) NOEXCEPT; + +CONSTCD11 day operator+(const day& x, const days& y) NOEXCEPT; +CONSTCD11 day operator+(const days& x, const day& y) NOEXCEPT; +CONSTCD11 day operator-(const day& x, const days& y) NOEXCEPT; +CONSTCD11 days operator-(const day& x, const day& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const day& d); + +// month + +class month +{ + unsigned char m_; + +public: + month() = default; + explicit CONSTCD11 month(unsigned m) NOEXCEPT; + + CONSTCD14 month& operator++() NOEXCEPT; + CONSTCD14 month operator++(int) NOEXCEPT; + CONSTCD14 month& operator--() NOEXCEPT; + CONSTCD14 month operator--(int) NOEXCEPT; + + CONSTCD14 month& operator+=(const months& m) NOEXCEPT; + CONSTCD14 month& operator-=(const months& m) NOEXCEPT; + + CONSTCD11 explicit operator unsigned() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator< (const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator> (const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month& x, const month& y) NOEXCEPT; + +CONSTCD14 month operator+(const month& x, const months& y) NOEXCEPT; +CONSTCD14 month operator+(const months& x, const month& y) NOEXCEPT; +CONSTCD14 month operator-(const month& x, const months& y) NOEXCEPT; +CONSTCD14 months operator-(const month& x, const month& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month& m); + +// year + +class year +{ + short y_; + +public: + year() = default; + explicit CONSTCD11 year(int y) NOEXCEPT; + + CONSTCD14 year& operator++() NOEXCEPT; + CONSTCD14 year operator++(int) NOEXCEPT; + CONSTCD14 year& operator--() NOEXCEPT; + CONSTCD14 year operator--(int) NOEXCEPT; + + CONSTCD14 year& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 year operator-() const NOEXCEPT; + CONSTCD11 year operator+() const NOEXCEPT; + + CONSTCD11 bool is_leap() const NOEXCEPT; + + CONSTCD11 explicit operator int() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; + + static CONSTCD11 year min() NOEXCEPT { return year{-32767}; } + static CONSTCD11 year max() NOEXCEPT { return year{32767}; } +}; + +CONSTCD11 bool operator==(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator< (const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator> (const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year& x, const year& y) NOEXCEPT; + +CONSTCD11 year operator+(const year& x, const years& y) NOEXCEPT; +CONSTCD11 year operator+(const years& x, const year& y) NOEXCEPT; +CONSTCD11 year operator-(const year& x, const years& y) NOEXCEPT; +CONSTCD11 years operator-(const year& x, const year& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year& y); + +// weekday + +class weekday +{ + unsigned char wd_; +public: + weekday() = default; + explicit CONSTCD11 weekday(unsigned wd) NOEXCEPT; + CONSTCD14 weekday(const sys_days& dp) NOEXCEPT; + CONSTCD14 explicit weekday(const local_days& dp) NOEXCEPT; + + CONSTCD14 weekday& operator++() NOEXCEPT; + CONSTCD14 weekday operator++(int) NOEXCEPT; + CONSTCD14 weekday& operator--() NOEXCEPT; + CONSTCD14 weekday operator--(int) NOEXCEPT; + + CONSTCD14 weekday& operator+=(const days& d) NOEXCEPT; + CONSTCD14 weekday& operator-=(const days& d) NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; + + CONSTCD11 unsigned c_encoding() const NOEXCEPT; + CONSTCD11 unsigned iso_encoding() const NOEXCEPT; + + CONSTCD11 weekday_indexed operator[](unsigned index) const NOEXCEPT; + CONSTCD11 weekday_last operator[](last_spec) const NOEXCEPT; + +private: + static CONSTCD14 unsigned char weekday_from_days(int z) NOEXCEPT; + + friend CONSTCD11 bool operator==(const weekday& x, const weekday& y) NOEXCEPT; + friend CONSTCD14 days operator-(const weekday& x, const weekday& y) NOEXCEPT; + friend CONSTCD14 weekday operator+(const weekday& x, const days& y) NOEXCEPT; + template + friend std::basic_ostream& + operator<<(std::basic_ostream& os, const weekday& wd); + friend class weekday_indexed; +}; + +CONSTCD11 bool operator==(const weekday& x, const weekday& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday& x, const weekday& y) NOEXCEPT; + +CONSTCD14 weekday operator+(const weekday& x, const days& y) NOEXCEPT; +CONSTCD14 weekday operator+(const days& x, const weekday& y) NOEXCEPT; +CONSTCD14 weekday operator-(const weekday& x, const days& y) NOEXCEPT; +CONSTCD14 days operator-(const weekday& x, const weekday& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday& wd); + +// weekday_indexed + +class weekday_indexed +{ + unsigned char wd_ : 4; + unsigned char index_ : 4; + +public: + weekday_indexed() = default; + CONSTCD11 weekday_indexed(const date::weekday& wd, unsigned index) NOEXCEPT; + + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 unsigned index() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_indexed& wdi); + +// weekday_last + +class weekday_last +{ + date::weekday wd_; + +public: + explicit CONSTCD11 weekday_last(const date::weekday& wd) NOEXCEPT; + + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const weekday_last& x, const weekday_last& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday_last& x, const weekday_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_last& wdl); + +namespace detail +{ + +struct unspecified_month_disambiguator {}; + +} // namespace detail + +// year_month + +class year_month +{ + date::year y_; + date::month m_; + +public: + year_month() = default; + CONSTCD11 year_month(const date::year& y, const date::month& m) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + + template + CONSTCD14 year_month& operator+=(const months& dm) NOEXCEPT; + template + CONSTCD14 year_month& operator-=(const months& dm) NOEXCEPT; + CONSTCD14 year_month& operator+=(const years& dy) NOEXCEPT; + CONSTCD14 year_month& operator-=(const years& dy) NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator< (const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator> (const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year_month& x, const year_month& y) NOEXCEPT; + +template +CONSTCD14 year_month operator+(const year_month& ym, const months& dm) NOEXCEPT; +template +CONSTCD14 year_month operator+(const months& dm, const year_month& ym) NOEXCEPT; +template +CONSTCD14 year_month operator-(const year_month& ym, const months& dm) NOEXCEPT; + +CONSTCD11 months operator-(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 year_month operator+(const year_month& ym, const years& dy) NOEXCEPT; +CONSTCD11 year_month operator+(const years& dy, const year_month& ym) NOEXCEPT; +CONSTCD11 year_month operator-(const year_month& ym, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month& ym); + +// month_day + +class month_day +{ + date::month m_; + date::day d_; + +public: + month_day() = default; + CONSTCD11 month_day(const date::month& m, const date::day& d) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::day day() const NOEXCEPT; + + CONSTCD14 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator< (const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator> (const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month_day& x, const month_day& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day& md); + +// month_day_last + +class month_day_last +{ + date::month m_; + +public: + CONSTCD11 explicit month_day_last(const date::month& m) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator< (const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator> (const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month_day_last& x, const month_day_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day_last& mdl); + +// month_weekday + +class month_weekday +{ + date::month m_; + date::weekday_indexed wdi_; +public: + CONSTCD11 month_weekday(const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday_indexed weekday_indexed() const NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_weekday& x, const month_weekday& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_weekday& x, const month_weekday& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday& mwd); + +// month_weekday_last + +class month_weekday_last +{ + date::month m_; + date::weekday_last wdl_; + +public: + CONSTCD11 month_weekday_last(const date::month& m, + const date::weekday_last& wd) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday_last weekday_last() const NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday_last& mwdl); + +// class year_month_day + +class year_month_day +{ + date::year y_; + date::month m_; + date::day d_; + +public: + year_month_day() = default; + CONSTCD11 year_month_day(const date::year& y, const date::month& m, + const date::day& d) NOEXCEPT; + CONSTCD14 year_month_day(const year_month_day_last& ymdl) NOEXCEPT; + + CONSTCD14 year_month_day(sys_days dp) NOEXCEPT; + CONSTCD14 explicit year_month_day(local_days dp) NOEXCEPT; + + template + CONSTCD14 year_month_day& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_day& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_day& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_day& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::day day() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD14 bool ok() const NOEXCEPT; + +private: + static CONSTCD14 year_month_day from_days(days dp) NOEXCEPT; + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator< (const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator> (const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year_month_day& x, const year_month_day& y) NOEXCEPT; + +template +CONSTCD14 year_month_day operator+(const year_month_day& ymd, const months& dm) NOEXCEPT; +template +CONSTCD14 year_month_day operator+(const months& dm, const year_month_day& ymd) NOEXCEPT; +template +CONSTCD14 year_month_day operator-(const year_month_day& ymd, const months& dm) NOEXCEPT; +CONSTCD11 year_month_day operator+(const year_month_day& ymd, const years& dy) NOEXCEPT; +CONSTCD11 year_month_day operator+(const years& dy, const year_month_day& ymd) NOEXCEPT; +CONSTCD11 year_month_day operator-(const year_month_day& ymd, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day& ymd); + +// year_month_day_last + +class year_month_day_last +{ + date::year y_; + date::month_day_last mdl_; + +public: + CONSTCD11 year_month_day_last(const date::year& y, + const date::month_day_last& mdl) NOEXCEPT; + + template + CONSTCD14 year_month_day_last& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_day_last& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_day_last& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_day_last& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::month_day_last month_day_last() const NOEXCEPT; + CONSTCD14 date::day day() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator< (const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator> (const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator<=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator>=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator+(const year_month_day_last& ymdl, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator+(const months& dm, const year_month_day_last& ymdl) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator+(const year_month_day_last& ymdl, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator+(const years& dy, const year_month_day_last& ymdl) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator-(const year_month_day_last& ymdl, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator-(const year_month_day_last& ymdl, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day_last& ymdl); + +// year_month_weekday + +class year_month_weekday +{ + date::year y_; + date::month m_; + date::weekday_indexed wdi_; + +public: + year_month_weekday() = default; + CONSTCD11 year_month_weekday(const date::year& y, const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT; + CONSTCD14 year_month_weekday(const sys_days& dp) NOEXCEPT; + CONSTCD14 explicit year_month_weekday(const local_days& dp) NOEXCEPT; + + template + CONSTCD14 year_month_weekday& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_weekday& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_weekday& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_weekday& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 unsigned index() const NOEXCEPT; + CONSTCD11 date::weekday_indexed weekday_indexed() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD14 bool ok() const NOEXCEPT; + +private: + static CONSTCD14 year_month_weekday from_days(days dp) NOEXCEPT; + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator+(const year_month_weekday& ymwd, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator+(const months& dm, const year_month_weekday& ymwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator+(const year_month_weekday& ymwd, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator+(const years& dy, const year_month_weekday& ymwd) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator-(const year_month_weekday& ymwd, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator-(const year_month_weekday& ymwd, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday& ymwdi); + +// year_month_weekday_last + +class year_month_weekday_last +{ + date::year y_; + date::month m_; + date::weekday_last wdl_; + +public: + CONSTCD11 year_month_weekday_last(const date::year& y, const date::month& m, + const date::weekday_last& wdl) NOEXCEPT; + + template + CONSTCD14 year_month_weekday_last& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_weekday_last& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_weekday_last& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_weekday_last& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 date::weekday_last weekday_last() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; + +private: + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 +bool +operator==(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT; + +CONSTCD11 +bool +operator!=(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator+(const months& dm, const year_month_weekday_last& ymwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator+(const years& dy, const year_month_weekday_last& ymwdl) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday_last& ymwdl); + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +inline namespace literals +{ + +CONSTCD11 date::day operator "" _d(unsigned long long d) NOEXCEPT; +CONSTCD11 date::year operator "" _y(unsigned long long y) NOEXCEPT; + +} // inline namespace literals +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +// CONSTDATA date::month January{1}; +// CONSTDATA date::month February{2}; +// CONSTDATA date::month March{3}; +// CONSTDATA date::month April{4}; +// CONSTDATA date::month May{5}; +// CONSTDATA date::month June{6}; +// CONSTDATA date::month July{7}; +// CONSTDATA date::month August{8}; +// CONSTDATA date::month September{9}; +// CONSTDATA date::month October{10}; +// CONSTDATA date::month November{11}; +// CONSTDATA date::month December{12}; +// +// CONSTDATA date::weekday Sunday{0u}; +// CONSTDATA date::weekday Monday{1u}; +// CONSTDATA date::weekday Tuesday{2u}; +// CONSTDATA date::weekday Wednesday{3u}; +// CONSTDATA date::weekday Thursday{4u}; +// CONSTDATA date::weekday Friday{5u}; +// CONSTDATA date::weekday Saturday{6u}; + +#if HAS_VOID_T + +template > +struct is_clock + : std::false_type +{}; + +template +struct is_clock> + : std::true_type +{}; + +template inline constexpr bool is_clock_v = is_clock::value; + +#endif // HAS_VOID_T + +//----------------+ +// Implementation | +//----------------+ + +// utilities +namespace detail { + +template> +class save_istream +{ +protected: + std::basic_ios& is_; + CharT fill_; + std::ios::fmtflags flags_; + std::streamsize precision_; + std::streamsize width_; + std::basic_ostream* tie_; + std::locale loc_; + +public: + ~save_istream() + { + is_.fill(fill_); + is_.flags(flags_); + is_.precision(precision_); + is_.width(width_); + is_.imbue(loc_); + is_.tie(tie_); + } + + save_istream(const save_istream&) = delete; + save_istream& operator=(const save_istream&) = delete; + + explicit save_istream(std::basic_ios& is) + : is_(is) + , fill_(is.fill()) + , flags_(is.flags()) + , precision_(is.precision()) + , width_(is.width(0)) + , tie_(is.tie(nullptr)) + , loc_(is.getloc()) + { + if (tie_ != nullptr) + tie_->flush(); + } +}; + +template> +class save_ostream + : private save_istream +{ +public: + ~save_ostream() + { + if ((this->flags_ & std::ios::unitbuf) && +#if HAS_UNCAUGHT_EXCEPTIONS + std::uncaught_exceptions() == 0 && +#else + !std::uncaught_exception() && +#endif + this->is_.good()) + this->is_.rdbuf()->pubsync(); + } + + save_ostream(const save_ostream&) = delete; + save_ostream& operator=(const save_ostream&) = delete; + + explicit save_ostream(std::basic_ios& os) + : save_istream(os) + { + } +}; + +template +struct choose_trunc_type +{ + static const int digits = std::numeric_limits::digits; + using type = typename std::conditional + < + digits < 32, + std::int32_t, + typename std::conditional + < + digits < 64, + std::int64_t, +#ifdef __SIZEOF_INT128__ + __int128 +#else + std::int64_t +#endif + >::type + >::type; +}; + +template +CONSTCD11 +inline +typename std::enable_if +< + !std::chrono::treat_as_floating_point::value, + T +>::type +trunc(T t) NOEXCEPT +{ + return t; +} + +template +CONSTCD14 +inline +typename std::enable_if +< + std::chrono::treat_as_floating_point::value, + T +>::type +trunc(T t) NOEXCEPT +{ + using std::numeric_limits; + using I = typename choose_trunc_type::type; + CONSTDATA auto digits = numeric_limits::digits; + static_assert(digits < numeric_limits::digits, ""); + CONSTDATA auto max = I{1} << (digits-1); + CONSTDATA auto min = -max; + const auto negative = t < T{0}; + if (min <= t && t <= max && t != 0 && t == t) + { + t = static_cast(static_cast(t)); + if (t == 0 && negative) + t = -t; + } + return t; +} + +template +struct static_gcd +{ + static const std::intmax_t value = static_gcd::value; +}; + +template +struct static_gcd +{ + static const std::intmax_t value = Xp; +}; + +template <> +struct static_gcd<0, 0> +{ + static const std::intmax_t value = 1; +}; + +template +struct no_overflow +{ +private: + static const std::intmax_t gcd_n1_n2 = static_gcd::value; + static const std::intmax_t gcd_d1_d2 = static_gcd::value; + static const std::intmax_t n1 = R1::num / gcd_n1_n2; + static const std::intmax_t d1 = R1::den / gcd_d1_d2; + static const std::intmax_t n2 = R2::num / gcd_n1_n2; + static const std::intmax_t d2 = R2::den / gcd_d1_d2; +#ifdef __cpp_constexpr + static const std::intmax_t max = std::numeric_limits::max(); +#else + static const std::intmax_t max = LLONG_MAX; +#endif + + template + struct mul // overflow == false + { + static const std::intmax_t value = Xp * Yp; + }; + + template + struct mul + { + static const std::intmax_t value = 1; + }; + +public: + static const bool value = (n1 <= max / d2) && (n2 <= max / d1); + typedef std::ratio::value, + mul::value> type; +}; + +} // detail + +// trunc towards zero +template +CONSTCD11 +inline +typename std::enable_if +< + detail::no_overflow::value, + To +>::type +trunc(const std::chrono::duration& d) +{ + return To{detail::trunc(std::chrono::duration_cast(d).count())}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + !detail::no_overflow::value, + To +>::type +trunc(const std::chrono::duration& d) +{ + using std::chrono::duration_cast; + using std::chrono::duration; + using rep = typename std::common_type::type; + return To{detail::trunc(duration_cast(duration_cast>(d)).count())}; +} + +#ifndef HAS_CHRONO_ROUNDING +# if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023918 || (_MSC_FULL_VER >= 190000000 && defined (__clang__))) +# define HAS_CHRONO_ROUNDING 1 +# elif defined(__cpp_lib_chrono) && __cplusplus > 201402 && __cpp_lib_chrono >= 201510 +# define HAS_CHRONO_ROUNDING 1 +# elif defined(_LIBCPP_VERSION) && __cplusplus > 201402 && _LIBCPP_VERSION >= 3800 +# define HAS_CHRONO_ROUNDING 1 +# else +# define HAS_CHRONO_ROUNDING 0 +# endif +#endif // HAS_CHRONO_ROUNDING + +#if HAS_CHRONO_ROUNDING == 0 + +// round down +template +CONSTCD14 +inline +typename std::enable_if +< + detail::no_overflow::value, + To +>::type +floor(const std::chrono::duration& d) +{ + auto t = trunc(d); + if (t > d) + return t - To{1}; + return t; +} + +template +CONSTCD14 +inline +typename std::enable_if +< + !detail::no_overflow::value, + To +>::type +floor(const std::chrono::duration& d) +{ + using rep = typename std::common_type::type; + return floor(floor>(d)); +} + +// round to nearest, to even on tie +template +CONSTCD14 +inline +To +round(const std::chrono::duration& d) +{ + auto t0 = floor(d); + auto t1 = t0 + To{1}; + if (t1 == To{0} && t0 < To{0}) + t1 = -t1; + auto diff0 = d - t0; + auto diff1 = t1 - d; + if (diff0 == diff1) + { + if (t0 - trunc(t0/2)*2 == To{0}) + return t0; + return t1; + } + if (diff0 < diff1) + return t0; + return t1; +} + +// round up +template +CONSTCD14 +inline +To +ceil(const std::chrono::duration& d) +{ + auto t = trunc(d); + if (t < d) + return t + To{1}; + return t; +} + +template ::is_signed + >::type> +CONSTCD11 +std::chrono::duration +abs(std::chrono::duration d) +{ + return d >= d.zero() ? d : static_cast(-d); +} + +// round down +template +CONSTCD11 +inline +std::chrono::time_point +floor(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{date::floor(tp.time_since_epoch())}; +} + +// round to nearest, to even on tie +template +CONSTCD11 +inline +std::chrono::time_point +round(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{round(tp.time_since_epoch())}; +} + +// round up +template +CONSTCD11 +inline +std::chrono::time_point +ceil(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{ceil(tp.time_since_epoch())}; +} + +#else // HAS_CHRONO_ROUNDING == 1 + +using std::chrono::floor; +using std::chrono::ceil; +using std::chrono::round; +using std::chrono::abs; + +#endif // HAS_CHRONO_ROUNDING + +namespace detail +{ + +template +CONSTCD14 +inline +typename std::enable_if +< + !std::chrono::treat_as_floating_point::value, + To +>::type +round_i(const std::chrono::duration& d) +{ + return round(d); +} + +template +CONSTCD14 +inline +typename std::enable_if +< + std::chrono::treat_as_floating_point::value, + To +>::type +round_i(const std::chrono::duration& d) +{ + return d; +} + +template +CONSTCD11 +inline +std::chrono::time_point +round_i(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{round_i(tp.time_since_epoch())}; +} + +} // detail + +// trunc towards zero +template +CONSTCD11 +inline +std::chrono::time_point +trunc(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{trunc(tp.time_since_epoch())}; +} + +// day + +CONSTCD11 inline day::day(unsigned d) NOEXCEPT : d_(static_cast(d)) {} +CONSTCD14 inline day& day::operator++() NOEXCEPT {++d_; return *this;} +CONSTCD14 inline day day::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline day& day::operator--() NOEXCEPT {--d_; return *this;} +CONSTCD14 inline day day::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} +CONSTCD14 inline day& day::operator+=(const days& d) NOEXCEPT {*this = *this + d; return *this;} +CONSTCD14 inline day& day::operator-=(const days& d) NOEXCEPT {*this = *this - d; return *this;} +CONSTCD11 inline day::operator unsigned() const NOEXCEPT {return d_;} +CONSTCD11 inline bool day::ok() const NOEXCEPT {return 1 <= d_ && d_ <= 31;} + +CONSTCD11 +inline +bool +operator==(const day& x, const day& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const day& x, const day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const day& x, const day& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const day& x, const day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const day& x, const day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const day& x, const day& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD11 +inline +days +operator-(const day& x, const day& y) NOEXCEPT +{ + return days{static_cast(static_cast(x) + - static_cast(y))}; +} + +CONSTCD11 +inline +day +operator+(const day& x, const days& y) NOEXCEPT +{ + return day{static_cast(x) + static_cast(y.count())}; +} + +CONSTCD11 +inline +day +operator+(const days& x, const day& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD11 +inline +day +operator-(const day& x, const days& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const day& d) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(d); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const day& d) +{ + detail::low_level_fmt(os, d); + if (!d.ok()) + os << " is not a valid day"; + return os; +} + +// month + +CONSTCD11 inline month::month(unsigned m) NOEXCEPT : m_(static_cast(m)) {} +CONSTCD14 inline month& month::operator++() NOEXCEPT {*this += months{1}; return *this;} +CONSTCD14 inline month month::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline month& month::operator--() NOEXCEPT {*this -= months{1}; return *this;} +CONSTCD14 inline month month::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} + +CONSTCD14 +inline +month& +month::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +CONSTCD14 +inline +month& +month::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD11 inline month::operator unsigned() const NOEXCEPT {return m_;} +CONSTCD11 inline bool month::ok() const NOEXCEPT {return 1 <= m_ && m_ <= 12;} + +CONSTCD11 +inline +bool +operator==(const month& x, const month& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const month& x, const month& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month& x, const month& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const month& x, const month& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month& x, const month& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month& x, const month& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD14 +inline +months +operator-(const month& x, const month& y) NOEXCEPT +{ + auto const d = static_cast(x) - static_cast(y); + return months(d <= 11 ? d : d + 12); +} + +CONSTCD14 +inline +month +operator+(const month& x, const months& y) NOEXCEPT +{ + auto const mu = static_cast(static_cast(x)) + y.count() - 1; + auto const yr = (mu >= 0 ? mu : mu-11) / 12; + return month{static_cast(mu - yr * 12 + 1)}; +} + +CONSTCD14 +inline +month +operator+(const months& x, const month& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD14 +inline +month +operator-(const month& x, const months& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month& m) +{ + if (m.ok()) + { + CharT fmt[] = {'%', 'b', 0}; + os << format(os.getloc(), fmt, m); + } + else + os << static_cast(m); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month& m) +{ + detail::low_level_fmt(os, m); + if (!m.ok()) + os << " is not a valid month"; + return os; +} + +// year + +CONSTCD11 inline year::year(int y) NOEXCEPT : y_(static_cast(y)) {} +CONSTCD14 inline year& year::operator++() NOEXCEPT {++y_; return *this;} +CONSTCD14 inline year year::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline year& year::operator--() NOEXCEPT {--y_; return *this;} +CONSTCD14 inline year year::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} +CONSTCD14 inline year& year::operator+=(const years& y) NOEXCEPT {*this = *this + y; return *this;} +CONSTCD14 inline year& year::operator-=(const years& y) NOEXCEPT {*this = *this - y; return *this;} +CONSTCD11 inline year year::operator-() const NOEXCEPT {return year{-y_};} +CONSTCD11 inline year year::operator+() const NOEXCEPT {return *this;} + +CONSTCD11 +inline +bool +year::is_leap() const NOEXCEPT +{ + return y_ % 4 == 0 && (y_ % 100 != 0 || y_ % 400 == 0); +} + +CONSTCD11 inline year::operator int() const NOEXCEPT {return y_;} + +CONSTCD11 +inline +bool +year::ok() const NOEXCEPT +{ + return y_ != std::numeric_limits::min(); +} + +CONSTCD11 +inline +bool +operator==(const year& x, const year& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const year& x, const year& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year& x, const year& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const year& x, const year& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year& x, const year& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year& x, const year& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD11 +inline +years +operator-(const year& x, const year& y) NOEXCEPT +{ + return years{static_cast(x) - static_cast(y)}; +} + +CONSTCD11 +inline +year +operator+(const year& x, const years& y) NOEXCEPT +{ + return year{static_cast(x) + y.count()}; +} + +CONSTCD11 +inline +year +operator+(const years& x, const year& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD11 +inline +year +operator-(const year& x, const years& y) NOEXCEPT +{ + return year{static_cast(x) - y.count()}; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year& y) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::internal); + os.width(4 + (y < year{0})); + os.imbue(std::locale::classic()); + os << static_cast(y); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year& y) +{ + detail::low_level_fmt(os, y); + if (!y.ok()) + os << " is not a valid year"; + return os; +} + +// weekday + +CONSTCD14 +inline +unsigned char +weekday::weekday_from_days(int z) NOEXCEPT +{ + auto u = static_cast(z); + return static_cast(z >= -4 ? (u+4) % 7 : u % 7); +} + +CONSTCD11 +inline +weekday::weekday(unsigned wd) NOEXCEPT + : wd_(static_cast(wd != 7 ? wd : 0)) + {} + +CONSTCD14 +inline +weekday::weekday(const sys_days& dp) NOEXCEPT + : wd_(weekday_from_days(dp.time_since_epoch().count())) + {} + +CONSTCD14 +inline +weekday::weekday(const local_days& dp) NOEXCEPT + : wd_(weekday_from_days(dp.time_since_epoch().count())) + {} + +CONSTCD14 inline weekday& weekday::operator++() NOEXCEPT {*this += days{1}; return *this;} +CONSTCD14 inline weekday weekday::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline weekday& weekday::operator--() NOEXCEPT {*this -= days{1}; return *this;} +CONSTCD14 inline weekday weekday::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} + +CONSTCD14 +inline +weekday& +weekday::operator+=(const days& d) NOEXCEPT +{ + *this = *this + d; + return *this; +} + +CONSTCD14 +inline +weekday& +weekday::operator-=(const days& d) NOEXCEPT +{ + *this = *this - d; + return *this; +} + +CONSTCD11 inline bool weekday::ok() const NOEXCEPT {return wd_ <= 6;} + +CONSTCD11 +inline +unsigned weekday::c_encoding() const NOEXCEPT +{ + return unsigned{wd_}; +} + +CONSTCD11 +inline +unsigned weekday::iso_encoding() const NOEXCEPT +{ + return unsigned{((wd_ == 0u) ? 7u : wd_)}; +} + +CONSTCD11 +inline +bool +operator==(const weekday& x, const weekday& y) NOEXCEPT +{ + return x.wd_ == y.wd_; +} + +CONSTCD11 +inline +bool +operator!=(const weekday& x, const weekday& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD14 +inline +days +operator-(const weekday& x, const weekday& y) NOEXCEPT +{ + auto const wdu = x.wd_ - y.wd_; + auto const wk = (wdu >= 0 ? wdu : wdu-6) / 7; + return days{wdu - wk * 7}; +} + +CONSTCD14 +inline +weekday +operator+(const weekday& x, const days& y) NOEXCEPT +{ + auto const wdu = static_cast(static_cast(x.wd_)) + y.count(); + auto const wk = (wdu >= 0 ? wdu : wdu-6) / 7; + return weekday{static_cast(wdu - wk * 7)}; +} + +CONSTCD14 +inline +weekday +operator+(const days& x, const weekday& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD14 +inline +weekday +operator-(const weekday& x, const days& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday& wd) +{ + if (wd.ok()) + { + CharT fmt[] = {'%', 'a', 0}; + os << format(fmt, wd); + } + else + os << wd.c_encoding(); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday& wd) +{ + detail::low_level_fmt(os, wd); + if (!wd.ok()) + os << " is not a valid weekday"; + return os; +} + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +inline namespace literals +{ + +CONSTCD11 +inline +date::day +operator "" _d(unsigned long long d) NOEXCEPT +{ + return date::day{static_cast(d)}; +} + +CONSTCD11 +inline +date::year +operator "" _y(unsigned long long y) NOEXCEPT +{ + return date::year(static_cast(y)); +} +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +CONSTDATA date::last_spec last{}; + +CONSTDATA date::month jan{1}; +CONSTDATA date::month feb{2}; +CONSTDATA date::month mar{3}; +CONSTDATA date::month apr{4}; +CONSTDATA date::month may{5}; +CONSTDATA date::month jun{6}; +CONSTDATA date::month jul{7}; +CONSTDATA date::month aug{8}; +CONSTDATA date::month sep{9}; +CONSTDATA date::month oct{10}; +CONSTDATA date::month nov{11}; +CONSTDATA date::month dec{12}; + +CONSTDATA date::weekday sun{0u}; +CONSTDATA date::weekday mon{1u}; +CONSTDATA date::weekday tue{2u}; +CONSTDATA date::weekday wed{3u}; +CONSTDATA date::weekday thu{4u}; +CONSTDATA date::weekday fri{5u}; +CONSTDATA date::weekday sat{6u}; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +} // inline namespace literals +#endif + +CONSTDATA date::month January{1}; +CONSTDATA date::month February{2}; +CONSTDATA date::month March{3}; +CONSTDATA date::month April{4}; +CONSTDATA date::month May{5}; +CONSTDATA date::month June{6}; +CONSTDATA date::month July{7}; +CONSTDATA date::month August{8}; +CONSTDATA date::month September{9}; +CONSTDATA date::month October{10}; +CONSTDATA date::month November{11}; +CONSTDATA date::month December{12}; + +CONSTDATA date::weekday Monday{1}; +CONSTDATA date::weekday Tuesday{2}; +CONSTDATA date::weekday Wednesday{3}; +CONSTDATA date::weekday Thursday{4}; +CONSTDATA date::weekday Friday{5}; +CONSTDATA date::weekday Saturday{6}; +CONSTDATA date::weekday Sunday{7}; + +// weekday_indexed + +CONSTCD11 +inline +weekday +weekday_indexed::weekday() const NOEXCEPT +{ + return date::weekday{static_cast(wd_)}; +} + +CONSTCD11 inline unsigned weekday_indexed::index() const NOEXCEPT {return index_;} + +CONSTCD11 +inline +bool +weekday_indexed::ok() const NOEXCEPT +{ + return weekday().ok() && 1 <= index_ && index_ <= 5; +} + +#ifdef __GNUC__ +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wconversion" +#endif // __GNUC__ + +CONSTCD11 +inline +weekday_indexed::weekday_indexed(const date::weekday& wd, unsigned index) NOEXCEPT + : wd_(static_cast(static_cast(wd.wd_))) + , index_(static_cast(index)) + {} + +#ifdef __GNUC__ +# pragma GCC diagnostic pop +#endif // __GNUC__ + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday_indexed& wdi) +{ + return low_level_fmt(os, wdi.weekday()) << '[' << wdi.index() << ']'; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_indexed& wdi) +{ + detail::low_level_fmt(os, wdi); + if (!wdi.ok()) + os << " is not a valid weekday_indexed"; + return os; +} + +CONSTCD11 +inline +weekday_indexed +weekday::operator[](unsigned index) const NOEXCEPT +{ + return {*this, index}; +} + +CONSTCD11 +inline +bool +operator==(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT +{ + return x.weekday() == y.weekday() && x.index() == y.index(); +} + +CONSTCD11 +inline +bool +operator!=(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT +{ + return !(x == y); +} + +// weekday_last + +CONSTCD11 inline date::weekday weekday_last::weekday() const NOEXCEPT {return wd_;} +CONSTCD11 inline bool weekday_last::ok() const NOEXCEPT {return wd_.ok();} +CONSTCD11 inline weekday_last::weekday_last(const date::weekday& wd) NOEXCEPT : wd_(wd) {} + +CONSTCD11 +inline +bool +operator==(const weekday_last& x, const weekday_last& y) NOEXCEPT +{ + return x.weekday() == y.weekday(); +} + +CONSTCD11 +inline +bool +operator!=(const weekday_last& x, const weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday_last& wdl) +{ + return low_level_fmt(os, wdl.weekday()) << "[last]"; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_last& wdl) +{ + detail::low_level_fmt(os, wdl); + if (!wdl.ok()) + os << " is not a valid weekday_last"; + return os; +} + +CONSTCD11 +inline +weekday_last +weekday::operator[](last_spec) const NOEXCEPT +{ + return weekday_last{*this}; +} + +// year_month + +CONSTCD11 +inline +year_month::year_month(const date::year& y, const date::month& m) NOEXCEPT + : y_(y) + , m_(m) + {} + +CONSTCD11 inline year year_month::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month::month() const NOEXCEPT {return m_;} +CONSTCD11 inline bool year_month::ok() const NOEXCEPT {return y_.ok() && m_.ok();} + +template +CONSTCD14 +inline +year_month& +year_month::operator+=(const months& dm) NOEXCEPT +{ + *this = *this + dm; + return *this; +} + +template +CONSTCD14 +inline +year_month& +year_month::operator-=(const months& dm) NOEXCEPT +{ + *this = *this - dm; + return *this; +} + +CONSTCD14 +inline +year_month& +year_month::operator+=(const years& dy) NOEXCEPT +{ + *this = *this + dy; + return *this; +} + +CONSTCD14 +inline +year_month& +year_month::operator-=(const years& dy) NOEXCEPT +{ + *this = *this - dy; + return *this; +} + +CONSTCD11 +inline +bool +operator==(const year_month& x, const year_month& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month& x, const year_month& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month() < y.month())); +} + +CONSTCD11 +inline +bool +operator>(const year_month& x, const year_month& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(x < y); +} + +template +CONSTCD14 +inline +year_month +operator+(const year_month& ym, const months& dm) NOEXCEPT +{ + auto dmi = static_cast(static_cast(ym.month())) - 1 + dm.count(); + auto dy = (dmi >= 0 ? dmi : dmi-11) / 12; + dmi = dmi - dy * 12 + 1; + return (ym.year() + years(dy)) / month(static_cast(dmi)); +} + +template +CONSTCD14 +inline +year_month +operator+(const months& dm, const year_month& ym) NOEXCEPT +{ + return ym + dm; +} + +template +CONSTCD14 +inline +year_month +operator-(const year_month& ym, const months& dm) NOEXCEPT +{ + return ym + -dm; +} + +CONSTCD11 +inline +months +operator-(const year_month& x, const year_month& y) NOEXCEPT +{ + return (x.year() - y.year()) + + months(static_cast(x.month()) - static_cast(y.month())); +} + +CONSTCD11 +inline +year_month +operator+(const year_month& ym, const years& dy) NOEXCEPT +{ + return (ym.year() + dy) / ym.month(); +} + +CONSTCD11 +inline +year_month +operator+(const years& dy, const year_month& ym) NOEXCEPT +{ + return ym + dy; +} + +CONSTCD11 +inline +year_month +operator-(const year_month& ym, const years& dy) NOEXCEPT +{ + return ym + -dy; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year_month& ym) +{ + low_level_fmt(os, ym.year()) << '/'; + return low_level_fmt(os, ym.month()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month& ym) +{ + detail::low_level_fmt(os, ym); + if (!ym.ok()) + os << " is not a valid year_month"; + return os; +} + +// month_day + +CONSTCD11 +inline +month_day::month_day(const date::month& m, const date::day& d) NOEXCEPT + : m_(m) + , d_(d) + {} + +CONSTCD11 inline date::month month_day::month() const NOEXCEPT {return m_;} +CONSTCD11 inline date::day month_day::day() const NOEXCEPT {return d_;} + +CONSTCD14 +inline +bool +month_day::ok() const NOEXCEPT +{ + CONSTDATA date::day d[] = + { + date::day(31), date::day(29), date::day(31), + date::day(30), date::day(31), date::day(30), + date::day(31), date::day(31), date::day(30), + date::day(31), date::day(30), date::day(31) + }; + return m_.ok() && date::day{1} <= d_ && d_ <= d[static_cast(m_)-1]; +} + +CONSTCD11 +inline +bool +operator==(const month_day& x, const month_day& y) NOEXCEPT +{ + return x.month() == y.month() && x.day() == y.day(); +} + +CONSTCD11 +inline +bool +operator!=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month_day& x, const month_day& y) NOEXCEPT +{ + return x.month() < y.month() ? true + : (x.month() > y.month() ? false + : (x.day() < y.day())); +} + +CONSTCD11 +inline +bool +operator>(const month_day& x, const month_day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_day& md) +{ + low_level_fmt(os, md.month()) << '/'; + return low_level_fmt(os, md.day()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day& md) +{ + detail::low_level_fmt(os, md); + if (!md.ok()) + os << " is not a valid month_day"; + return os; +} + +// month_day_last + +CONSTCD11 inline month month_day_last::month() const NOEXCEPT {return m_;} +CONSTCD11 inline bool month_day_last::ok() const NOEXCEPT {return m_.ok();} +CONSTCD11 inline month_day_last::month_day_last(const date::month& m) NOEXCEPT : m_(m) {} + +CONSTCD11 +inline +bool +operator==(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return x.month() == y.month(); +} + +CONSTCD11 +inline +bool +operator!=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return x.month() < y.month(); +} + +CONSTCD11 +inline +bool +operator>(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_day_last& mdl) +{ + return low_level_fmt(os, mdl.month()) << "/last"; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day_last& mdl) +{ + detail::low_level_fmt(os, mdl); + if (!mdl.ok()) + os << " is not a valid month_day_last"; + return os; +} + +// month_weekday + +CONSTCD11 +inline +month_weekday::month_weekday(const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT + : m_(m) + , wdi_(wdi) + {} + +CONSTCD11 inline month month_weekday::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday_indexed +month_weekday::weekday_indexed() const NOEXCEPT +{ + return wdi_; +} + +CONSTCD11 +inline +bool +month_weekday::ok() const NOEXCEPT +{ + return m_.ok() && wdi_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const month_weekday& x, const month_weekday& y) NOEXCEPT +{ + return x.month() == y.month() && x.weekday_indexed() == y.weekday_indexed(); +} + +CONSTCD11 +inline +bool +operator!=(const month_weekday& x, const month_weekday& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_weekday& mwd) +{ + low_level_fmt(os, mwd.month()) << '/'; + return low_level_fmt(os, mwd.weekday_indexed()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday& mwd) +{ + detail::low_level_fmt(os, mwd); + if (!mwd.ok()) + os << " is not a valid month_weekday"; + return os; +} + +// month_weekday_last + +CONSTCD11 +inline +month_weekday_last::month_weekday_last(const date::month& m, + const date::weekday_last& wdl) NOEXCEPT + : m_(m) + , wdl_(wdl) + {} + +CONSTCD11 inline month month_weekday_last::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday_last +month_weekday_last::weekday_last() const NOEXCEPT +{ + return wdl_; +} + +CONSTCD11 +inline +bool +month_weekday_last::ok() const NOEXCEPT +{ + return m_.ok() && wdl_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT +{ + return x.month() == y.month() && x.weekday_last() == y.weekday_last(); +} + +CONSTCD11 +inline +bool +operator!=(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_weekday_last& mwdl) +{ + low_level_fmt(os, mwdl.month()) << '/'; + return low_level_fmt(os, mwdl.weekday_last()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday_last& mwdl) +{ + detail::low_level_fmt(os, mwdl); + if (!mwdl.ok()) + os << " is not a valid month_weekday_last"; + return os; +} + +// year_month_day_last + +CONSTCD11 +inline +year_month_day_last::year_month_day_last(const date::year& y, + const date::month_day_last& mdl) NOEXCEPT + : y_(y) + , mdl_(mdl) + {} + +template +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_day_last::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_day_last::month() const NOEXCEPT {return mdl_.month();} + +CONSTCD11 +inline +month_day_last +year_month_day_last::month_day_last() const NOEXCEPT +{ + return mdl_; +} + +CONSTCD14 +inline +day +year_month_day_last::day() const NOEXCEPT +{ + CONSTDATA date::day d[] = + { + date::day(31), date::day(28), date::day(31), + date::day(30), date::day(31), date::day(30), + date::day(31), date::day(31), date::day(30), + date::day(31), date::day(30), date::day(31) + }; + return (month() != February || !y_.is_leap()) && mdl_.ok() ? + d[static_cast(month()) - 1] : date::day{29}; +} + +CONSTCD14 +inline +year_month_day_last::operator sys_days() const NOEXCEPT +{ + return sys_days(year()/month()/day()); +} + +CONSTCD14 +inline +year_month_day_last::operator local_days() const NOEXCEPT +{ + return local_days(year()/month()/day()); +} + +CONSTCD11 +inline +bool +year_month_day_last::ok() const NOEXCEPT +{ + return y_.ok() && mdl_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return x.year() == y.year() && x.month_day_last() == y.month_day_last(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month_day_last() < y.month_day_last())); +} + +CONSTCD11 +inline +bool +operator>(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year_month_day_last& ymdl) +{ + low_level_fmt(os, ymdl.year()) << '/'; + return low_level_fmt(os, ymdl.month_day_last()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day_last& ymdl) +{ + detail::low_level_fmt(os, ymdl); + if (!ymdl.ok()) + os << " is not a valid year_month_day_last"; + return os; +} + +template +CONSTCD14 +inline +year_month_day_last +operator+(const year_month_day_last& ymdl, const months& dm) NOEXCEPT +{ + return (ymdl.year() / ymdl.month() + dm) / last; +} + +template +CONSTCD14 +inline +year_month_day_last +operator+(const months& dm, const year_month_day_last& ymdl) NOEXCEPT +{ + return ymdl + dm; +} + +template +CONSTCD14 +inline +year_month_day_last +operator-(const year_month_day_last& ymdl, const months& dm) NOEXCEPT +{ + return ymdl + (-dm); +} + +CONSTCD11 +inline +year_month_day_last +operator+(const year_month_day_last& ymdl, const years& dy) NOEXCEPT +{ + return {ymdl.year()+dy, ymdl.month_day_last()}; +} + +CONSTCD11 +inline +year_month_day_last +operator+(const years& dy, const year_month_day_last& ymdl) NOEXCEPT +{ + return ymdl + dy; +} + +CONSTCD11 +inline +year_month_day_last +operator-(const year_month_day_last& ymdl, const years& dy) NOEXCEPT +{ + return ymdl + (-dy); +} + +// year_month_day + +CONSTCD11 +inline +year_month_day::year_month_day(const date::year& y, const date::month& m, + const date::day& d) NOEXCEPT + : y_(y) + , m_(m) + , d_(d) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(const year_month_day_last& ymdl) NOEXCEPT + : y_(ymdl.year()) + , m_(ymdl.month()) + , d_(ymdl.day()) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(sys_days dp) NOEXCEPT + : year_month_day(from_days(dp.time_since_epoch())) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(local_days dp) NOEXCEPT + : year_month_day(from_days(dp.time_since_epoch())) + {} + +CONSTCD11 inline year year_month_day::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_day::month() const NOEXCEPT {return m_;} +CONSTCD11 inline day year_month_day::day() const NOEXCEPT {return d_;} + +template +CONSTCD14 +inline +year_month_day& +year_month_day::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_day& +year_month_day::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_day& +year_month_day::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_day& +year_month_day::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD14 +inline +days +year_month_day::to_days() const NOEXCEPT +{ + static_assert(std::numeric_limits::digits >= 18, + "This algorithm has not been ported to a 16 bit unsigned integer"); + static_assert(std::numeric_limits::digits >= 20, + "This algorithm has not been ported to a 16 bit signed integer"); + auto const y = static_cast(y_) - (m_ <= February); + auto const m = static_cast(m_); + auto const d = static_cast(d_); + auto const era = (y >= 0 ? y : y-399) / 400; + auto const yoe = static_cast(y - era * 400); // [0, 399] + auto const doy = (153*(m > 2 ? m-3 : m+9) + 2)/5 + d-1; // [0, 365] + auto const doe = yoe * 365 + yoe/4 - yoe/100 + doy; // [0, 146096] + return days{era * 146097 + static_cast(doe) - 719468}; +} + +CONSTCD14 +inline +year_month_day::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_day::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD14 +inline +bool +year_month_day::ok() const NOEXCEPT +{ + if (!(y_.ok() && m_.ok())) + return false; + return date::day{1} <= d_ && d_ <= (y_ / m_ / last).day(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && x.day() == y.day(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month() < y.month() ? true + : (x.month() > y.month() ? false + : (x.day() < y.day())))); +} + +CONSTCD11 +inline +bool +operator>(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(x < y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day& ymd) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.imbue(std::locale::classic()); + os << static_cast(ymd.year()) << '-'; + os.width(2); + os << static_cast(ymd.month()) << '-'; + os.width(2); + os << static_cast(ymd.day()); + if (!ymd.ok()) + os << " is not a valid year_month_day"; + return os; +} + +CONSTCD14 +inline +year_month_day +year_month_day::from_days(days dp) NOEXCEPT +{ + static_assert(std::numeric_limits::digits >= 18, + "This algorithm has not been ported to a 16 bit unsigned integer"); + static_assert(std::numeric_limits::digits >= 20, + "This algorithm has not been ported to a 16 bit signed integer"); + auto const z = dp.count() + 719468; + auto const era = (z >= 0 ? z : z - 146096) / 146097; + auto const doe = static_cast(z - era * 146097); // [0, 146096] + auto const yoe = (doe - doe/1460 + doe/36524 - doe/146096) / 365; // [0, 399] + auto const y = static_cast(yoe) + era * 400; + auto const doy = doe - (365*yoe + yoe/4 - yoe/100); // [0, 365] + auto const mp = (5*doy + 2)/153; // [0, 11] + auto const d = doy - (153*mp+2)/5 + 1; // [1, 31] + auto const m = mp < 10 ? mp+3 : mp-9; // [1, 12] + return year_month_day{date::year{y + (m <= 2)}, date::month(m), date::day(d)}; +} + +template +CONSTCD14 +inline +year_month_day +operator+(const year_month_day& ymd, const months& dm) NOEXCEPT +{ + return (ymd.year() / ymd.month() + dm) / ymd.day(); +} + +template +CONSTCD14 +inline +year_month_day +operator+(const months& dm, const year_month_day& ymd) NOEXCEPT +{ + return ymd + dm; +} + +template +CONSTCD14 +inline +year_month_day +operator-(const year_month_day& ymd, const months& dm) NOEXCEPT +{ + return ymd + (-dm); +} + +CONSTCD11 +inline +year_month_day +operator+(const year_month_day& ymd, const years& dy) NOEXCEPT +{ + return (ymd.year() + dy) / ymd.month() / ymd.day(); +} + +CONSTCD11 +inline +year_month_day +operator+(const years& dy, const year_month_day& ymd) NOEXCEPT +{ + return ymd + dy; +} + +CONSTCD11 +inline +year_month_day +operator-(const year_month_day& ymd, const years& dy) NOEXCEPT +{ + return ymd + (-dy); +} + +// year_month_weekday + +CONSTCD11 +inline +year_month_weekday::year_month_weekday(const date::year& y, const date::month& m, + const date::weekday_indexed& wdi) + NOEXCEPT + : y_(y) + , m_(m) + , wdi_(wdi) + {} + +CONSTCD14 +inline +year_month_weekday::year_month_weekday(const sys_days& dp) NOEXCEPT + : year_month_weekday(from_days(dp.time_since_epoch())) + {} + +CONSTCD14 +inline +year_month_weekday::year_month_weekday(const local_days& dp) NOEXCEPT + : year_month_weekday(from_days(dp.time_since_epoch())) + {} + +template +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_weekday::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_weekday::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday +year_month_weekday::weekday() const NOEXCEPT +{ + return wdi_.weekday(); +} + +CONSTCD11 +inline +unsigned +year_month_weekday::index() const NOEXCEPT +{ + return wdi_.index(); +} + +CONSTCD11 +inline +weekday_indexed +year_month_weekday::weekday_indexed() const NOEXCEPT +{ + return wdi_; +} + +CONSTCD14 +inline +year_month_weekday::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_weekday::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD14 +inline +bool +year_month_weekday::ok() const NOEXCEPT +{ + if (!y_.ok() || !m_.ok() || !wdi_.weekday().ok() || wdi_.index() < 1) + return false; + if (wdi_.index() <= 4) + return true; + auto d2 = wdi_.weekday() - date::weekday(static_cast(y_/m_/1)) + + days((wdi_.index()-1)*7 + 1); + return static_cast(d2.count()) <= static_cast((y_/m_/last).day()); +} + +CONSTCD14 +inline +year_month_weekday +year_month_weekday::from_days(days d) NOEXCEPT +{ + sys_days dp{d}; + auto const wd = date::weekday(dp); + auto const ymd = year_month_day(dp); + return {ymd.year(), ymd.month(), wd[(static_cast(ymd.day())-1)/7+1]}; +} + +CONSTCD14 +inline +days +year_month_weekday::to_days() const NOEXCEPT +{ + auto d = sys_days(y_/m_/1); + return (d + (wdi_.weekday() - date::weekday(d) + days{(wdi_.index()-1)*7}) + ).time_since_epoch(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && + x.weekday_indexed() == y.weekday_indexed(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT +{ + return !(x == y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday& ymwdi) +{ + detail::low_level_fmt(os, ymwdi.year()) << '/'; + detail::low_level_fmt(os, ymwdi.month()) << '/'; + detail::low_level_fmt(os, ymwdi.weekday_indexed()); + if (!ymwdi.ok()) + os << " is not a valid year_month_weekday"; + return os; +} + +template +CONSTCD14 +inline +year_month_weekday +operator+(const year_month_weekday& ymwd, const months& dm) NOEXCEPT +{ + return (ymwd.year() / ymwd.month() + dm) / ymwd.weekday_indexed(); +} + +template +CONSTCD14 +inline +year_month_weekday +operator+(const months& dm, const year_month_weekday& ymwd) NOEXCEPT +{ + return ymwd + dm; +} + +template +CONSTCD14 +inline +year_month_weekday +operator-(const year_month_weekday& ymwd, const months& dm) NOEXCEPT +{ + return ymwd + (-dm); +} + +CONSTCD11 +inline +year_month_weekday +operator+(const year_month_weekday& ymwd, const years& dy) NOEXCEPT +{ + return {ymwd.year()+dy, ymwd.month(), ymwd.weekday_indexed()}; +} + +CONSTCD11 +inline +year_month_weekday +operator+(const years& dy, const year_month_weekday& ymwd) NOEXCEPT +{ + return ymwd + dy; +} + +CONSTCD11 +inline +year_month_weekday +operator-(const year_month_weekday& ymwd, const years& dy) NOEXCEPT +{ + return ymwd + (-dy); +} + +// year_month_weekday_last + +CONSTCD11 +inline +year_month_weekday_last::year_month_weekday_last(const date::year& y, + const date::month& m, + const date::weekday_last& wdl) NOEXCEPT + : y_(y) + , m_(m) + , wdl_(wdl) + {} + +template +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_weekday_last::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_weekday_last::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday +year_month_weekday_last::weekday() const NOEXCEPT +{ + return wdl_.weekday(); +} + +CONSTCD11 +inline +weekday_last +year_month_weekday_last::weekday_last() const NOEXCEPT +{ + return wdl_; +} + +CONSTCD14 +inline +year_month_weekday_last::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_weekday_last::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD11 +inline +bool +year_month_weekday_last::ok() const NOEXCEPT +{ + return y_.ok() && m_.ok() && wdl_.ok(); +} + +CONSTCD14 +inline +days +year_month_weekday_last::to_days() const NOEXCEPT +{ + auto const d = sys_days(y_/m_/last); + return (d - (date::weekday{d} - wdl_.weekday())).time_since_epoch(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && + x.weekday_last() == y.weekday_last(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday_last& ymwdl) +{ + detail::low_level_fmt(os, ymwdl.year()) << '/'; + detail::low_level_fmt(os, ymwdl.month()) << '/'; + detail::low_level_fmt(os, ymwdl.weekday_last()); + if (!ymwdl.ok()) + os << " is not a valid year_month_weekday_last"; + return os; +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT +{ + return (ymwdl.year() / ymwdl.month() + dm) / ymwdl.weekday_last(); +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator+(const months& dm, const year_month_weekday_last& ymwdl) NOEXCEPT +{ + return ymwdl + dm; +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT +{ + return ymwdl + (-dm); +} + +CONSTCD11 +inline +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT +{ + return {ymwdl.year()+dy, ymwdl.month(), ymwdl.weekday_last()}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator+(const years& dy, const year_month_weekday_last& ymwdl) NOEXCEPT +{ + return ymwdl + dy; +} + +CONSTCD11 +inline +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT +{ + return ymwdl + (-dy); +} + +// year_month from operator/() + +CONSTCD11 +inline +year_month +operator/(const year& y, const month& m) NOEXCEPT +{ + return {y, m}; +} + +CONSTCD11 +inline +year_month +operator/(const year& y, int m) NOEXCEPT +{ + return y / month(static_cast(m)); +} + +// month_day from operator/() + +CONSTCD11 +inline +month_day +operator/(const month& m, const day& d) NOEXCEPT +{ + return {m, d}; +} + +CONSTCD11 +inline +month_day +operator/(const day& d, const month& m) NOEXCEPT +{ + return m / d; +} + +CONSTCD11 +inline +month_day +operator/(const month& m, int d) NOEXCEPT +{ + return m / day(static_cast(d)); +} + +CONSTCD11 +inline +month_day +operator/(int m, const day& d) NOEXCEPT +{ + return month(static_cast(m)) / d; +} + +CONSTCD11 inline month_day operator/(const day& d, int m) NOEXCEPT {return m / d;} + +// month_day_last from operator/() + +CONSTCD11 +inline +month_day_last +operator/(const month& m, last_spec) NOEXCEPT +{ + return month_day_last{m}; +} + +CONSTCD11 +inline +month_day_last +operator/(last_spec, const month& m) NOEXCEPT +{ + return m/last; +} + +CONSTCD11 +inline +month_day_last +operator/(int m, last_spec) NOEXCEPT +{ + return month(static_cast(m))/last; +} + +CONSTCD11 +inline +month_day_last +operator/(last_spec, int m) NOEXCEPT +{ + return m/last; +} + +// month_weekday from operator/() + +CONSTCD11 +inline +month_weekday +operator/(const month& m, const weekday_indexed& wdi) NOEXCEPT +{ + return {m, wdi}; +} + +CONSTCD11 +inline +month_weekday +operator/(const weekday_indexed& wdi, const month& m) NOEXCEPT +{ + return m / wdi; +} + +CONSTCD11 +inline +month_weekday +operator/(int m, const weekday_indexed& wdi) NOEXCEPT +{ + return month(static_cast(m)) / wdi; +} + +CONSTCD11 +inline +month_weekday +operator/(const weekday_indexed& wdi, int m) NOEXCEPT +{ + return m / wdi; +} + +// month_weekday_last from operator/() + +CONSTCD11 +inline +month_weekday_last +operator/(const month& m, const weekday_last& wdl) NOEXCEPT +{ + return {m, wdl}; +} + +CONSTCD11 +inline +month_weekday_last +operator/(const weekday_last& wdl, const month& m) NOEXCEPT +{ + return m / wdl; +} + +CONSTCD11 +inline +month_weekday_last +operator/(int m, const weekday_last& wdl) NOEXCEPT +{ + return month(static_cast(m)) / wdl; +} + +CONSTCD11 +inline +month_weekday_last +operator/(const weekday_last& wdl, int m) NOEXCEPT +{ + return m / wdl; +} + +// year_month_day from operator/() + +CONSTCD11 +inline +year_month_day +operator/(const year_month& ym, const day& d) NOEXCEPT +{ + return {ym.year(), ym.month(), d}; +} + +CONSTCD11 +inline +year_month_day +operator/(const year_month& ym, int d) NOEXCEPT +{ + return ym / day(static_cast(d)); +} + +CONSTCD11 +inline +year_month_day +operator/(const year& y, const month_day& md) NOEXCEPT +{ + return y / md.month() / md.day(); +} + +CONSTCD11 +inline +year_month_day +operator/(int y, const month_day& md) NOEXCEPT +{ + return year(y) / md; +} + +CONSTCD11 +inline +year_month_day +operator/(const month_day& md, const year& y) NOEXCEPT +{ + return y / md; +} + +CONSTCD11 +inline +year_month_day +operator/(const month_day& md, int y) NOEXCEPT +{ + return year(y) / md; +} + +// year_month_day_last from operator/() + +CONSTCD11 +inline +year_month_day_last +operator/(const year_month& ym, last_spec) NOEXCEPT +{ + return {ym.year(), month_day_last{ym.month()}}; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const year& y, const month_day_last& mdl) NOEXCEPT +{ + return {y, mdl}; +} + +CONSTCD11 +inline +year_month_day_last +operator/(int y, const month_day_last& mdl) NOEXCEPT +{ + return year(y) / mdl; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const month_day_last& mdl, const year& y) NOEXCEPT +{ + return y / mdl; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const month_day_last& mdl, int y) NOEXCEPT +{ + return year(y) / mdl; +} + +// year_month_weekday from operator/() + +CONSTCD11 +inline +year_month_weekday +operator/(const year_month& ym, const weekday_indexed& wdi) NOEXCEPT +{ + return {ym.year(), ym.month(), wdi}; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const year& y, const month_weekday& mwd) NOEXCEPT +{ + return {y, mwd.month(), mwd.weekday_indexed()}; +} + +CONSTCD11 +inline +year_month_weekday +operator/(int y, const month_weekday& mwd) NOEXCEPT +{ + return year(y) / mwd; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const month_weekday& mwd, const year& y) NOEXCEPT +{ + return y / mwd; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const month_weekday& mwd, int y) NOEXCEPT +{ + return year(y) / mwd; +} + +// year_month_weekday_last from operator/() + +CONSTCD11 +inline +year_month_weekday_last +operator/(const year_month& ym, const weekday_last& wdl) NOEXCEPT +{ + return {ym.year(), ym.month(), wdl}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const year& y, const month_weekday_last& mwdl) NOEXCEPT +{ + return {y, mwdl.month(), mwdl.weekday_last()}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(int y, const month_weekday_last& mwdl) NOEXCEPT +{ + return year(y) / mwdl; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const month_weekday_last& mwdl, const year& y) NOEXCEPT +{ + return y / mwdl; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const month_weekday_last& mwdl, int y) NOEXCEPT +{ + return year(y) / mwdl; +} + +template +struct fields; + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev = nullptr, + const std::chrono::seconds* offset_sec = nullptr); + +template +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr); + +// hh_mm_ss + +namespace detail +{ + +struct undocumented {explicit undocumented() = default;}; + +// width::value is the number of fractional decimal digits in 1/n +// width<0>::value and width<1>::value are defined to be 0 +// If 1/n takes more than 18 fractional decimal digits, +// the result is truncated to 19. +// Example: width<2>::value == 1 +// Example: width<3>::value == 19 +// Example: width<4>::value == 2 +// Example: width<10>::value == 1 +// Example: width<1000>::value == 3 +template +struct width +{ + static_assert(d > 0, "width called with zero denominator"); + static CONSTDATA unsigned value = 1 + width::value; +}; + +template +struct width +{ + static CONSTDATA unsigned value = 0; +}; + +template +struct static_pow10 +{ +private: + static CONSTDATA std::uint64_t h = static_pow10::value; +public: + static CONSTDATA std::uint64_t value = h * h * (exp % 2 ? 10 : 1); +}; + +template <> +struct static_pow10<0> +{ + static CONSTDATA std::uint64_t value = 1; +}; + +template +class decimal_format_seconds +{ + using CT = typename std::common_type::type; + using rep = typename CT::rep; + static unsigned CONSTDATA trial_width = + detail::width::value; +public: + static unsigned CONSTDATA width = trial_width < 19 ? trial_width : 6u; + using precision = std::chrono::duration::value>>; + +private: + std::chrono::seconds s_; + precision sub_s_; + +public: + CONSTCD11 decimal_format_seconds() + : s_() + , sub_s_() + {} + + CONSTCD11 explicit decimal_format_seconds(const Duration& d) NOEXCEPT + : s_(std::chrono::duration_cast(d)) + , sub_s_(std::chrono::duration_cast(d - s_)) + {} + + CONSTCD14 std::chrono::seconds& seconds() NOEXCEPT {return s_;} + CONSTCD11 std::chrono::seconds seconds() const NOEXCEPT {return s_;} + CONSTCD11 precision subseconds() const NOEXCEPT {return sub_s_;} + + CONSTCD14 precision to_duration() const NOEXCEPT + { + return s_ + sub_s_; + } + + CONSTCD11 bool in_conventional_range() const NOEXCEPT + { + return sub_s_ < std::chrono::seconds{1} && s_ < std::chrono::minutes{1}; + } + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, const decimal_format_seconds& x) + { + return x.print(os, std::chrono::treat_as_floating_point{}); + } + + template + std::basic_ostream& + print(std::basic_ostream& os, std::true_type) const + { + date::detail::save_ostream _(os); + std::chrono::duration d = s_ + sub_s_; + if (d < std::chrono::seconds{10}) + os << '0'; + os.precision(width+6); + os << std::fixed << d.count(); + return os; + } + + template + std::basic_ostream& + print(std::basic_ostream& os, std::false_type) const + { + date::detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << s_.count(); + if (width > 0) + { +#if !ONLY_C_LOCALE + os << std::use_facet>(os.getloc()).decimal_point(); +#else + os << '.'; +#endif + date::detail::save_ostream _s(os); + os.imbue(std::locale::classic()); + os.width(width); + os << sub_s_.count(); + } + return os; + } +}; + +template +inline +CONSTCD11 +typename std::enable_if + < + std::numeric_limits::is_signed, + std::chrono::duration + >::type +abs(std::chrono::duration d) +{ + return d >= d.zero() ? +d : -d; +} + +template +inline +CONSTCD11 +typename std::enable_if + < + !std::numeric_limits::is_signed, + std::chrono::duration + >::type +abs(std::chrono::duration d) +{ + return d; +} + +} // namespace detail + +template +class hh_mm_ss +{ + using dfs = detail::decimal_format_seconds::type>; + + std::chrono::hours h_; + std::chrono::minutes m_; + dfs s_; + bool neg_; + +public: + static unsigned CONSTDATA fractional_width = dfs::width; + using precision = typename dfs::precision; + + CONSTCD11 hh_mm_ss() NOEXCEPT + : hh_mm_ss(Duration::zero()) + {} + + CONSTCD11 explicit hh_mm_ss(Duration d) NOEXCEPT + : h_(std::chrono::duration_cast(detail::abs(d))) + , m_(std::chrono::duration_cast(detail::abs(d)) - h_) + , s_(detail::abs(d) - h_ - m_) + , neg_(d < Duration::zero()) + {} + + CONSTCD11 std::chrono::hours hours() const NOEXCEPT {return h_;} + CONSTCD11 std::chrono::minutes minutes() const NOEXCEPT {return m_;} + CONSTCD11 std::chrono::seconds seconds() const NOEXCEPT {return s_.seconds();} + CONSTCD14 std::chrono::seconds& + seconds(detail::undocumented) NOEXCEPT {return s_.seconds();} + CONSTCD11 precision subseconds() const NOEXCEPT {return s_.subseconds();} + CONSTCD11 bool is_negative() const NOEXCEPT {return neg_;} + + CONSTCD11 explicit operator precision() const NOEXCEPT {return to_duration();} + CONSTCD11 precision to_duration() const NOEXCEPT + {return (s_.to_duration() + m_ + h_) * (1-2*neg_);} + + CONSTCD11 bool in_conventional_range() const NOEXCEPT + { + return !neg_ && h_ < days{1} && m_ < std::chrono::hours{1} && + s_.in_conventional_range(); + } + +private: + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, hh_mm_ss const& tod) + { + if (tod.is_negative()) + os << '-'; + if (tod.h_ < std::chrono::hours{10}) + os << '0'; + os << tod.h_.count() << ':'; + if (tod.m_ < std::chrono::minutes{10}) + os << '0'; + os << tod.m_.count() << ':' << tod.s_; + return os; + } + + template + friend + std::basic_ostream& + date::to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev, + const std::chrono::seconds* offset_sec); + + template + friend + std::basic_istream& + date::from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, + std::basic_string* abbrev, std::chrono::minutes* offset); +}; + +inline +CONSTCD14 +bool +is_am(std::chrono::hours const& h) NOEXCEPT +{ + using std::chrono::hours; + return hours{0} <= h && h < hours{12}; +} + +inline +CONSTCD14 +bool +is_pm(std::chrono::hours const& h) NOEXCEPT +{ + using std::chrono::hours; + return hours{12} <= h && h < hours{24}; +} + +inline +CONSTCD14 +std::chrono::hours +make12(std::chrono::hours h) NOEXCEPT +{ + using std::chrono::hours; + if (h < hours{12}) + { + if (h == hours{0}) + h = hours{12}; + } + else + { + if (h != hours{12}) + h = h - hours{12}; + } + return h; +} + +inline +CONSTCD14 +std::chrono::hours +make24(std::chrono::hours h, bool is_pm) NOEXCEPT +{ + using std::chrono::hours; + if (is_pm) + { + if (h != hours{12}) + h = h + hours{12}; + } + else if (h == hours{12}) + h = hours{0}; + return h; +} + +template +using time_of_day = hh_mm_ss; + +template +CONSTCD11 +inline +hh_mm_ss> +make_time(const std::chrono::duration& d) +{ + return hh_mm_ss>(d); +} + +template +inline +typename std::enable_if +< + !std::is_convertible::value, + std::basic_ostream& +>::type +operator<<(std::basic_ostream& os, const sys_time& tp) +{ + auto const dp = date::floor(tp); + return os << year_month_day(dp) << ' ' << make_time(tp-dp); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const sys_days& dp) +{ + return os << year_month_day(dp); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const local_time& ut) +{ + return (os << sys_time{ut.time_since_epoch()}); +} + +namespace detail +{ + +template +class string_literal; + +template +inline +CONSTCD14 +string_literal::type, + N1 + N2 - 1> +operator+(const string_literal& x, const string_literal& y) NOEXCEPT; + +template +class string_literal +{ + CharT p_[N]; + + CONSTCD11 string_literal() NOEXCEPT + : p_{} + {} + +public: + using const_iterator = const CharT*; + + string_literal(string_literal const&) = default; + string_literal& operator=(string_literal const&) = delete; + + template ::type> + CONSTCD11 string_literal(CharT c) NOEXCEPT + : p_{c} + { + } + + template ::type> + CONSTCD11 string_literal(CharT c1, CharT c2) NOEXCEPT + : p_{c1, c2} + { + } + + template ::type> + CONSTCD11 string_literal(CharT c1, CharT c2, CharT c3) NOEXCEPT + : p_{c1, c2, c3} + { + } + + CONSTCD14 string_literal(const CharT(&a)[N]) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + template ::type> + CONSTCD14 string_literal(const char(&a)[N]) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + template ::value>::type> + CONSTCD14 string_literal(string_literal const& a) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + CONSTCD11 const CharT* data() const NOEXCEPT {return p_;} + CONSTCD11 std::size_t size() const NOEXCEPT {return N-1;} + + CONSTCD11 const_iterator begin() const NOEXCEPT {return p_;} + CONSTCD11 const_iterator end() const NOEXCEPT {return p_ + N-1;} + + CONSTCD11 CharT const& operator[](std::size_t n) const NOEXCEPT + { + return p_[n]; + } + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, const string_literal& s) + { + return os << s.p_; + } + + template + friend + CONSTCD14 + string_literal::type, + N1 + N2 - 1> + operator+(const string_literal& x, const string_literal& y) NOEXCEPT; +}; + +template +CONSTCD11 +inline +string_literal +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + return string_literal(x[0], y[0]); +} + +template +CONSTCD11 +inline +string_literal +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + return string_literal(x[0], x[1], y[0]); +} + +template +CONSTCD14 +inline +string_literal::type, + N1 + N2 - 1> +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + using CT = typename std::conditional::type; + + string_literal r; + std::size_t i = 0; + for (; i < N1-1; ++i) + r.p_[i] = CT(x.p_[i]); + for (std::size_t j = 0; j < N2; ++j, ++i) + r.p_[i] = CT(y.p_[j]); + + return r; +} + + +template +inline +std::basic_string +operator+(std::basic_string x, const string_literal& y) +{ + x.append(y.data(), y.size()); + return x; +} + +#if __cplusplus >= 201402 && (!defined(__EDG_VERSION__) || __EDG_VERSION__ > 411) \ + && (!defined(__SUNPRO_CC) || __SUNPRO_CC > 0x5150) + +template ::value || + std::is_same::value || + std::is_same::value || + std::is_same::value>> +CONSTCD14 +inline +string_literal +msl(CharT c) NOEXCEPT +{ + return string_literal{c}; +} + +CONSTCD14 +inline +std::size_t +to_string_len(std::intmax_t i) +{ + std::size_t r = 0; + do + { + i /= 10; + ++r; + } while (i > 0); + return r; +} + +template +CONSTCD14 +inline +std::enable_if_t +< + N < 10, + string_literal +> +msl() NOEXCEPT +{ + return msl(char(N % 10 + '0')); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + 10 <= N, + string_literal +> +msl() NOEXCEPT +{ + return msl() + msl(char(N % 10 + '0')); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + std::ratio::type::den != 1, + string_literal::type::num) + + to_string_len(std::ratio::type::den) + 4> +> +msl(std::ratio) NOEXCEPT +{ + using R = typename std::ratio::type; + return msl(CharT{'['}) + msl() + msl(CharT{'/'}) + + msl() + msl(CharT{']'}); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + std::ratio::type::den == 1, + string_literal::type::num) + 3> +> +msl(std::ratio) NOEXCEPT +{ + using R = typename std::ratio::type; + return msl(CharT{'['}) + msl() + msl(CharT{']'}); +} + + +#else // __cplusplus < 201402 || (defined(__EDG_VERSION__) && __EDG_VERSION__ <= 411) + +inline +std::string +to_string(std::uint64_t x) +{ + return std::to_string(x); +} + +template +inline +std::basic_string +to_string(std::uint64_t x) +{ + auto y = std::to_string(x); + return std::basic_string(y.begin(), y.end()); +} + +template +inline +typename std::enable_if +< + std::ratio::type::den != 1, + std::basic_string +>::type +msl(std::ratio) +{ + using R = typename std::ratio::type; + return std::basic_string(1, '[') + to_string(R::num) + CharT{'/'} + + to_string(R::den) + CharT{']'}; +} + +template +inline +typename std::enable_if +< + std::ratio::type::den == 1, + std::basic_string +>::type +msl(std::ratio) +{ + using R = typename std::ratio::type; + return std::basic_string(1, '[') + to_string(R::num) + CharT{']'}; +} + +#endif // __cplusplus < 201402 || (defined(__EDG_VERSION__) && __EDG_VERSION__ <= 411) + +template +CONSTCD11 +inline +string_literal +msl(std::atto) NOEXCEPT +{ + return string_literal{'a'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::femto) NOEXCEPT +{ + return string_literal{'f'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::pico) NOEXCEPT +{ + return string_literal{'p'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::nano) NOEXCEPT +{ + return string_literal{'n'}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + std::is_same::value, + string_literal +>::type +msl(std::micro) NOEXCEPT +{ + return string_literal{'\xC2', '\xB5'}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + !std::is_same::value, + string_literal +>::type +msl(std::micro) NOEXCEPT +{ + return string_literal{CharT{static_cast('\xB5')}}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::milli) NOEXCEPT +{ + return string_literal{'m'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::centi) NOEXCEPT +{ + return string_literal{'c'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::deca) NOEXCEPT +{ + return string_literal{'d', 'a'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::deci) NOEXCEPT +{ + return string_literal{'d'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::hecto) NOEXCEPT +{ + return string_literal{'h'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::kilo) NOEXCEPT +{ + return string_literal{'k'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::mega) NOEXCEPT +{ + return string_literal{'M'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::giga) NOEXCEPT +{ + return string_literal{'G'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::tera) NOEXCEPT +{ + return string_literal{'T'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::peta) NOEXCEPT +{ + return string_literal{'P'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::exa) NOEXCEPT +{ + return string_literal{'E'}; +} + +template +CONSTCD11 +inline +auto +get_units(Period p) + -> decltype(msl(p) + string_literal{'s'}) +{ + return msl(p) + string_literal{'s'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<1>) +{ + return string_literal{'s'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<3600>) +{ + return string_literal{'h'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<60>) +{ + return string_literal{'m', 'i', 'n'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<86400>) +{ + return string_literal{'d'}; +} + +template > +struct make_string; + +template <> +struct make_string +{ + template + static + std::string + from(Rep n) + { + return std::to_string(n); + } +}; + +template +struct make_string +{ + template + static + std::basic_string + from(Rep n) + { + auto s = std::to_string(n); + return std::basic_string(s.begin(), s.end()); + } +}; + +template <> +struct make_string +{ + template + static + std::wstring + from(Rep n) + { + return std::to_wstring(n); + } +}; + +template +struct make_string +{ + template + static + std::basic_string + from(Rep n) + { + auto s = std::to_wstring(n); + return std::basic_string(s.begin(), s.end()); + } +}; + +} // namespace detail + +// to_stream + +CONSTDATA year nanyear{-32768}; + +template +struct fields +{ + year_month_day ymd{nanyear/0/0}; + weekday wd{8u}; + hh_mm_ss tod{}; + bool has_tod = false; + +#if !defined(__clang__) && defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ <= 409) + fields() : ymd{nanyear/0/0}, wd{8u}, tod{}, has_tod{false} {} +#else + fields() = default; +#endif + + fields(year_month_day ymd_) : ymd(ymd_) {} + fields(weekday wd_) : wd(wd_) {} + fields(hh_mm_ss tod_) : tod(tod_), has_tod(true) {} + + fields(year_month_day ymd_, weekday wd_) : ymd(ymd_), wd(wd_) {} + fields(year_month_day ymd_, hh_mm_ss tod_) : ymd(ymd_), tod(tod_), + has_tod(true) {} + + fields(weekday wd_, hh_mm_ss tod_) : wd(wd_), tod(tod_), has_tod(true) {} + + fields(year_month_day ymd_, weekday wd_, hh_mm_ss tod_) + : ymd(ymd_) + , wd(wd_) + , tod(tod_) + , has_tod(true) + {} +}; + +namespace detail +{ + +template +unsigned +extract_weekday(std::basic_ostream& os, const fields& fds) +{ + if (!fds.ymd.ok() && !fds.wd.ok()) + { + // fds does not contain a valid weekday + os.setstate(std::ios::failbit); + return 8; + } + weekday wd; + if (fds.ymd.ok()) + { + wd = weekday{sys_days(fds.ymd)}; + if (fds.wd.ok() && wd != fds.wd) + { + // fds.ymd and fds.wd are inconsistent + os.setstate(std::ios::failbit); + return 8; + } + } + else + wd = fds.wd; + return static_cast((wd - Sunday).count()); +} + +template +unsigned +extract_month(std::basic_ostream& os, const fields& fds) +{ + if (!fds.ymd.month().ok()) + { + // fds does not contain a valid month + os.setstate(std::ios::failbit); + return 0; + } + return static_cast(fds.ymd.month()); +} + +} // namespace detail + +#if ONLY_C_LOCALE + +namespace detail +{ + +inline +std::pair +weekday_names() +{ + static const std::string nm[] = + { + "Sunday", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sun", + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +inline +std::pair +month_names() +{ + static const std::string nm[] = + { + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +inline +std::pair +ampm_names() +{ + static const std::string nm[] = + { + "AM", + "PM" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +template +FwdIter +scan_keyword(std::basic_istream& is, FwdIter kb, FwdIter ke) +{ + size_t nkw = static_cast(std::distance(kb, ke)); + const unsigned char doesnt_match = '\0'; + const unsigned char might_match = '\1'; + const unsigned char does_match = '\2'; + unsigned char statbuf[100]; + unsigned char* status = statbuf; + std::unique_ptr stat_hold(0, free); + if (nkw > sizeof(statbuf)) + { + status = (unsigned char*)std::malloc(nkw); + if (status == nullptr) + throw std::bad_alloc(); + stat_hold.reset(status); + } + size_t n_might_match = nkw; // At this point, any keyword might match + size_t n_does_match = 0; // but none of them definitely do + // Initialize all statuses to might_match, except for "" keywords are does_match + unsigned char* st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (!ky->empty()) + *st = might_match; + else + { + *st = does_match; + --n_might_match; + ++n_does_match; + } + } + // While there might be a match, test keywords against the next CharT + for (size_t indx = 0; is && n_might_match > 0; ++indx) + { + // Peek at the next CharT but don't consume it + auto ic = is.peek(); + if (ic == EOF) + { + is.setstate(std::ios::eofbit); + break; + } + auto c = static_cast(toupper(static_cast(ic))); + bool consume = false; + // For each keyword which might match, see if the indx character is c + // If a match if found, consume c + // If a match is found, and that is the last character in the keyword, + // then that keyword matches. + // If the keyword doesn't match this character, then change the keyword + // to doesn't match + st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (*st == might_match) + { + if (c == static_cast(toupper(static_cast((*ky)[indx])))) + { + consume = true; + if (ky->size() == indx+1) + { + *st = does_match; + --n_might_match; + ++n_does_match; + } + } + else + { + *st = doesnt_match; + --n_might_match; + } + } + } + // consume if we matched a character + if (consume) + { + (void)is.get(); + // If we consumed a character and there might be a matched keyword that + // was marked matched on a previous iteration, then such keywords + // are now marked as not matching. + if (n_might_match + n_does_match > 1) + { + st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (*st == does_match && ky->size() != indx+1) + { + *st = doesnt_match; + --n_does_match; + } + } + } + } + } + // We've exited the loop because we hit eof and/or we have no more "might matches". + // Return the first matching result + for (st = status; kb != ke; ++kb, ++st) + if (*st == does_match) + break; + if (kb == ke) + is.setstate(std::ios::failbit); + return kb; +} + +} // namespace detail + +#endif // ONLY_C_LOCALE + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev, + const std::chrono::seconds* offset_sec) +{ +#if ONLY_C_LOCALE + using detail::weekday_names; + using detail::month_names; + using detail::ampm_names; +#endif + using detail::save_ostream; + using detail::get_units; + using detail::extract_weekday; + using detail::extract_month; + using std::ios; + using std::chrono::duration_cast; + using std::chrono::seconds; + using std::chrono::minutes; + using std::chrono::hours; + date::detail::save_ostream ss(os); + os.fill(' '); + os.flags(std::ios::skipws | std::ios::dec); + os.width(0); + tm tm{}; + bool insert_negative = fds.has_tod && fds.tod.to_duration() < Duration::zero(); +#if !ONLY_C_LOCALE + auto& facet = std::use_facet>(os.getloc()); +#endif + const CharT* command = nullptr; + CharT modified = CharT{}; + for (; *fmt; ++fmt) + { + switch (*fmt) + { + case 'a': + case 'A': + if (command) + { + if (modified == CharT{}) + { + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else // ONLY_C_LOCALE + os << weekday_names().first[tm.tm_wday+7*(*fmt == 'a')]; +#endif // ONLY_C_LOCALE + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'b': + case 'B': + case 'h': + if (command) + { + if (modified == CharT{}) + { + tm.tm_mon = static_cast(extract_month(os, fds)) - 1; +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else // ONLY_C_LOCALE + os << month_names().first[tm.tm_mon+12*(*fmt != 'B')]; +#endif // ONLY_C_LOCALE + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'c': + case 'x': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + if (*fmt == 'c' && !fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + tm = std::tm{}; + auto const& ymd = fds.ymd; + auto ld = local_days(ymd); + if (*fmt == 'c') + { + tm.tm_sec = static_cast(fds.tod.seconds().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_hour = static_cast(fds.tod.hours().count()); + } + tm.tm_mday = static_cast(static_cast(ymd.day())); + tm.tm_mon = static_cast(extract_month(os, fds) - 1); + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + CharT f[3] = {'%'}; + auto fe = std::begin(f) + 1; + if (modified == CharT{'E'}) + *fe++ = modified; + *fe++ = *fmt; + facet.put(os, os, os.fill(), &tm, std::begin(f), fe); +#else // ONLY_C_LOCALE + if (*fmt == 'c') + { + auto wd = static_cast(extract_weekday(os, fds)); + os << weekday_names().first[static_cast(wd)+7] + << ' '; + os << month_names().first[extract_month(os, fds)-1+12] << ' '; + auto d = static_cast(static_cast(fds.ymd.day())); + if (d < 10) + os << ' '; + os << d << ' ' + << make_time(duration_cast(fds.tod.to_duration())) + << ' ' << fds.ymd.year(); + + } + else // *fmt == 'x' + { + auto const& ymd = fds.ymd; + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(ymd.month()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.day()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.year()) % 100; + } +#endif // ONLY_C_LOCALE + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'C': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = static_cast(fds.ymd.year()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + if (y >= 0) + { + os.width(2); + os << y/100; + } + else + { + os << CharT{'-'}; + os.width(2); + os << -(y-99)/100; + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + tm.tm_year = y - 1900; + CharT f[3] = {'%', 'E', 'C'}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'd': + case 'e': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.day().ok()) + os.setstate(std::ios::failbit); + auto d = static_cast(static_cast(fds.ymd.day())); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + if (*fmt == CharT{'d'}) + os.fill('0'); + else + os.fill(' '); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << d; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + tm.tm_mday = d; + CharT f[3] = {'%', 'O', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'D': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto const& ymd = fds.ymd; + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(ymd.month()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.day()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.year()) % 100; + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'F': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto const& ymd = fds.ymd; + save_ostream _(os); + os.imbue(std::locale::classic()); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(4); + os << static_cast(ymd.year()) << CharT{'-'}; + os.width(2); + os << static_cast(ymd.month()) << CharT{'-'}; + os.width(2); + os << static_cast(ymd.day()); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'g': + case 'G': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(fds.ymd); + auto y = year_month_day{ld + days{3}}.year(); + auto start = local_days((y-years{1})/December/Thursday[last]) + + (Monday-Thursday); + if (ld < start) + --y; + if (*fmt == CharT{'G'}) + os << y; + else + { + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << std::abs(static_cast(y)) % 100; + } + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'H': + case 'I': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } + auto hms = fds.tod; +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto h = *fmt == CharT{'I'} ? date::make12(hms.hours()) : hms.hours(); + if (h < hours{10}) + os << CharT{'0'}; + os << h.count(); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_hour = static_cast(hms.hours().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'j': + if (command) + { + if (modified == CharT{}) + { + if (fds.ymd.ok() || fds.has_tod) + { + days doy; + if (fds.ymd.ok()) + { + auto ld = local_days(fds.ymd); + auto y = fds.ymd.year(); + doy = ld - local_days(y/January/1) + days{1}; + } + else + { + doy = duration_cast(fds.tod.to_duration()); + } + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(3); + os << doy.count(); + } + else + { + os.setstate(std::ios::failbit); + } + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'm': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.month().ok()) + os.setstate(std::ios::failbit); + auto m = static_cast(fds.ymd.month()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + if (m < 10) + os << CharT{'0'}; + os << m; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_mon = static_cast(m-1); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'M': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + if (fds.tod.minutes() < minutes{10}) + os << CharT{'0'}; + os << fds.tod.minutes().count(); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_min = static_cast(fds.tod.minutes().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'n': + if (command) + { + if (modified == CharT{}) + os << CharT{'\n'}; + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'p': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + tm.tm_hour = static_cast(fds.tod.hours().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else + if (date::is_am(fds.tod.hours())) + os << ampm_names().first[0]; + else + os << ampm_names().first[1]; +#endif + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'Q': + case 'q': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + auto d = fds.tod.to_duration(); + if (*fmt == 'q') + os << get_units(typename decltype(d)::period::type{}); + else + os << d.count(); + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'r': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + tm.tm_hour = static_cast(fds.tod.hours().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_sec = static_cast(fds.tod.seconds().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else + hh_mm_ss tod(duration_cast(fds.tod.to_duration())); + save_ostream _(os); + os.fill('0'); + os.width(2); + os << date::make12(tod.hours()).count() << CharT{':'}; + os.width(2); + os << tod.minutes().count() << CharT{':'}; + os.width(2); + os << tod.seconds().count() << CharT{' '}; + if (date::is_am(tod.hours())) + os << ampm_names().first[0]; + else + os << ampm_names().first[1]; +#endif + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'R': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (fds.tod.hours() < hours{10}) + os << CharT{'0'}; + os << fds.tod.hours().count() << CharT{':'}; + if (fds.tod.minutes() < minutes{10}) + os << CharT{'0'}; + os << fds.tod.minutes().count(); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'S': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + os << fds.tod.s_; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_sec = static_cast(fds.tod.s_.seconds().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 't': + if (command) + { + if (modified == CharT{}) + os << CharT{'\t'}; + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'T': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + os << fds.tod; + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'u': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto wd = extract_weekday(os, fds); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + os << (wd != 0 ? wd : 7u); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_wday = static_cast(wd); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'U': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto const& ymd = fds.ymd; + if (!ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto st = local_days(Sunday[1]/January/ymd.year()); + if (ld < st) + os << CharT{'0'} << CharT{'0'}; + else + { + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } + } + #if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'V': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(fds.ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto y = year_month_day{ld + days{3}}.year(); + auto st = local_days((y-years{1})/12/Thursday[last]) + + (Monday-Thursday); + if (ld < st) + { + --y; + st = local_days((y - years{1})/12/Thursday[last]) + + (Monday-Thursday); + } + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + auto const& ymd = fds.ymd; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'w': + if (command) + { + auto wd = extract_weekday(os, fds); + if (os.fail()) + return os; +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + os << wd; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_wday = static_cast(wd); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'W': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto const& ymd = fds.ymd; + if (!ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto st = local_days(Monday[1]/January/ymd.year()); + if (ld < st) + os << CharT{'0'} << CharT{'0'}; + else + { + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'X': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + tm = std::tm{}; + tm.tm_sec = static_cast(fds.tod.seconds().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_hour = static_cast(fds.tod.hours().count()); + CharT f[3] = {'%'}; + auto fe = std::begin(f) + 1; + if (modified == CharT{'E'}) + *fe++ = modified; + *fe++ = *fmt; + facet.put(os, os, os.fill(), &tm, std::begin(f), fe); +#else + os << fds.tod; +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'y': + if (command) + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = static_cast(fds.ymd.year()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) + { +#endif + y = std::abs(y) % 100; + if (y < 10) + os << CharT{'0'}; + os << y; +#if !ONLY_C_LOCALE + } + else + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = y - 1900; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'Y': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = fds.ymd.year(); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + os.imbue(std::locale::classic()); + os << y; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(y) - 1900; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'z': + if (command) + { + if (offset_sec == nullptr) + { + // Can not format %z with unknown offset + os.setstate(ios::failbit); + return os; + } + auto m = duration_cast(*offset_sec); + auto neg = m < minutes{0}; + m = date::abs(m); + auto h = duration_cast(m); + m -= h; + if (neg) + os << CharT{'-'}; + else + os << CharT{'+'}; + if (h < hours{10}) + os << CharT{'0'}; + os << h.count(); + if (modified != CharT{}) + os << CharT{':'}; + if (m < minutes{10}) + os << CharT{'0'}; + os << m.count(); + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'Z': + if (command) + { + if (modified == CharT{}) + { + if (abbrev == nullptr) + { + // Can not format %Z with unknown time_zone + os.setstate(ios::failbit); + return os; + } + for (auto c : *abbrev) + os << CharT(c); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'E': + case 'O': + if (command) + { + if (modified == CharT{}) + { + modified = *fmt; + } + else + { + os << CharT{'%'} << modified << *fmt; + command = nullptr; + modified = CharT{}; + } + } + else + os << *fmt; + break; + case '%': + if (command) + { + if (modified == CharT{}) + { + os << CharT{'%'}; + command = nullptr; + } + else + { + os << CharT{'%'} << modified << CharT{'%'}; + command = nullptr; + modified = CharT{}; + } + } + else + command = fmt; + break; + default: + if (command) + { + os << CharT{'%'}; + command = nullptr; + } + if (modified != CharT{}) + { + os << modified; + modified = CharT{}; + } + os << *fmt; + break; + } + } + if (command) + os << CharT{'%'}; + if (modified != CharT{}) + os << modified; + return os; +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const year& y) +{ + using CT = std::chrono::seconds; + fields fds{y/0/0}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const month& m) +{ + using CT = std::chrono::seconds; + fields fds{m/0/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const day& d) +{ + using CT = std::chrono::seconds; + fields fds{d/0/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const weekday& wd) +{ + using CT = std::chrono::seconds; + fields fds{wd}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const year_month& ym) +{ + using CT = std::chrono::seconds; + fields fds{ym/0}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const month_day& md) +{ + using CT = std::chrono::seconds; + fields fds{md/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const year_month_day& ymd) +{ + using CT = std::chrono::seconds; + fields fds{ymd}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const std::chrono::duration& d) +{ + using Duration = std::chrono::duration; + using CT = typename std::common_type::type; + fields fds{hh_mm_ss{d}}; + return to_stream(os, fmt, fds); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const local_time& tp, const std::string* abbrev = nullptr, + const std::chrono::seconds* offset_sec = nullptr) +{ + using CT = typename std::common_type::type; + auto ld = std::chrono::time_point_cast(tp); + fields fds; + if (ld <= tp) + fds = fields{year_month_day{ld}, hh_mm_ss{tp-local_seconds{ld}}}; + else + fds = fields{year_month_day{ld - days{1}}, + hh_mm_ss{days{1} - (local_seconds{ld} - tp)}}; + return to_stream(os, fmt, fds, abbrev, offset_sec); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const sys_time& tp) +{ + using std::chrono::seconds; + using CT = typename std::common_type::type; + const std::string abbrev("UTC"); + CONSTDATA seconds offset{0}; + auto sd = std::chrono::time_point_cast(tp); + fields fds; + if (sd <= tp) + fds = fields{year_month_day{sd}, hh_mm_ss{tp-sys_seconds{sd}}}; + else + fds = fields{year_month_day{sd - days{1}}, + hh_mm_ss{days{1} - (sys_seconds{sd} - tp)}}; + return to_stream(os, fmt, fds, &abbrev, &offset); +} + +// format + +template +auto +format(const std::locale& loc, const CharT* fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt, tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + os.imbue(loc); + to_stream(os, fmt, tp); + return os.str(); +} + +template +auto +format(const CharT* fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt, tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + to_stream(os, fmt, tp); + return os.str(); +} + +template +auto +format(const std::locale& loc, const std::basic_string& fmt, + const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt.c_str(), tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + os.imbue(loc); + to_stream(os, fmt.c_str(), tp); + return os.str(); +} + +template +auto +format(const std::basic_string& fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt.c_str(), tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + to_stream(os, fmt.c_str(), tp); + return os.str(); +} + +// parse + +namespace detail +{ + +template +bool +read_char(std::basic_istream& is, CharT fmt, std::ios::iostate& err) +{ + auto ic = is.get(); + if (Traits::eq_int_type(ic, Traits::eof()) || + !Traits::eq(Traits::to_char_type(ic), fmt)) + { + err |= std::ios::failbit; + is.setstate(std::ios::failbit); + return false; + } + return true; +} + +template +unsigned +read_unsigned(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + unsigned x = 0; + unsigned count = 0; + while (true) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + break; + auto c = static_cast(Traits::to_char_type(ic)); + if (!('0' <= c && c <= '9')) + break; + (void)is.get(); + ++count; + x = 10*x + static_cast(c - '0'); + if (count == M) + break; + } + if (count < m) + is.setstate(std::ios::failbit); + return x; +} + +template +int +read_signed(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + auto ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (('0' <= c && c <= '9') || c == '-' || c == '+') + { + if (c == '-' || c == '+') + (void)is.get(); + auto x = static_cast(read_unsigned(is, std::max(m, 1u), M)); + if (!is.fail()) + { + if (c == '-') + x = -x; + return x; + } + } + } + if (m > 0) + is.setstate(std::ios::failbit); + return 0; +} + +template +long double +read_long_double(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + unsigned count = 0; + unsigned fcount = 0; + unsigned long long i = 0; + unsigned long long f = 0; + bool parsing_fraction = false; +#if ONLY_C_LOCALE + typename Traits::int_type decimal_point = '.'; +#else + auto decimal_point = Traits::to_int_type( + std::use_facet>(is.getloc()).decimal_point()); +#endif + while (true) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + break; + if (Traits::eq_int_type(ic, decimal_point)) + { + decimal_point = Traits::eof(); + parsing_fraction = true; + } + else + { + auto c = static_cast(Traits::to_char_type(ic)); + if (!('0' <= c && c <= '9')) + break; + if (!parsing_fraction) + { + i = 10*i + static_cast(c - '0'); + } + else + { + f = 10*f + static_cast(c - '0'); + ++fcount; + } + } + (void)is.get(); + if (++count == M) + break; + } + if (count < m) + { + is.setstate(std::ios::failbit); + return 0; + } + return static_cast(i) + static_cast(f)/std::pow(10.L, fcount); +} + +struct rs +{ + int& i; + unsigned m; + unsigned M; +}; + +struct ru +{ + int& i; + unsigned m; + unsigned M; +}; + +struct rld +{ + long double& i; + unsigned m; + unsigned M; +}; + +template +void +read(std::basic_istream&) +{ +} + +template +void +read(std::basic_istream& is, CharT a0, Args&& ...args); + +template +void +read(std::basic_istream& is, rs a0, Args&& ...args); + +template +void +read(std::basic_istream& is, ru a0, Args&& ...args); + +template +void +read(std::basic_istream& is, int a0, Args&& ...args); + +template +void +read(std::basic_istream& is, rld a0, Args&& ...args); + +template +void +read(std::basic_istream& is, CharT a0, Args&& ...args) +{ + // No-op if a0 == CharT{} + if (a0 != CharT{}) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + { + is.setstate(std::ios::failbit | std::ios::eofbit); + return; + } + if (!Traits::eq(Traits::to_char_type(ic), a0)) + { + is.setstate(std::ios::failbit); + return; + } + (void)is.get(); + } + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, rs a0, Args&& ...args) +{ + auto x = read_signed(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = x; + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, ru a0, Args&& ...args) +{ + auto x = read_unsigned(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = static_cast(x); + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, int a0, Args&& ...args) +{ + if (a0 != -1) + { + auto u = static_cast(a0); + CharT buf[std::numeric_limits::digits10+2u] = {}; + auto e = buf; + do + { + *e++ = static_cast(CharT(u % 10) + CharT{'0'}); + u /= 10; + } while (u > 0); + std::reverse(buf, e); + for (auto p = buf; p != e && is.rdstate() == std::ios::goodbit; ++p) + read(is, *p); + } + if (is.rdstate() == std::ios::goodbit) + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, rld a0, Args&& ...args) +{ + auto x = read_long_double(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = x; + read(is, std::forward(args)...); +} + +template +inline +void +checked_set(T& value, T from, T not_a_value, std::basic_ios& is) +{ + if (!is.fail()) + { + if (value == not_a_value) + value = std::move(from); + else if (value != from) + is.setstate(std::ios::failbit); + } +} + +} // namespace detail; + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, std::basic_string* abbrev, + std::chrono::minutes* offset) +{ + using std::numeric_limits; + using std::ios; + using std::chrono::duration; + using std::chrono::duration_cast; + using std::chrono::seconds; + using std::chrono::minutes; + using std::chrono::hours; + using detail::round_i; + typename std::basic_istream::sentry ok{is, true}; + if (ok) + { + date::detail::save_istream ss(is); + is.fill(' '); + is.flags(std::ios::skipws | std::ios::dec); + is.width(0); +#if !ONLY_C_LOCALE + auto& f = std::use_facet>(is.getloc()); + std::tm tm{}; +#endif + const CharT* command = nullptr; + auto modified = CharT{}; + auto width = -1; + + CONSTDATA int not_a_year = numeric_limits::min(); + CONSTDATA int not_a_2digit_year = 100; + CONSTDATA int not_a_century = not_a_year / 100; + CONSTDATA int not_a_month = 0; + CONSTDATA int not_a_day = 0; + CONSTDATA int not_a_hour = numeric_limits::min(); + CONSTDATA int not_a_hour_12_value = 0; + CONSTDATA int not_a_minute = not_a_hour; + CONSTDATA Duration not_a_second = Duration::min(); + CONSTDATA int not_a_doy = -1; + CONSTDATA int not_a_weekday = 8; + CONSTDATA int not_a_week_num = 100; + CONSTDATA int not_a_ampm = -1; + CONSTDATA minutes not_a_offset = minutes::min(); + + int Y = not_a_year; // c, F, Y * + int y = not_a_2digit_year; // D, x, y * + int g = not_a_2digit_year; // g * + int G = not_a_year; // G * + int C = not_a_century; // C * + int m = not_a_month; // b, B, h, m, c, D, F, x * + int d = not_a_day; // c, d, D, e, F, x * + int j = not_a_doy; // j * + int wd = not_a_weekday; // a, A, u, w * + int H = not_a_hour; // c, H, R, T, X * + int I = not_a_hour_12_value; // I, r * + int p = not_a_ampm; // p, r * + int M = not_a_minute; // c, M, r, R, T, X * + Duration s = not_a_second; // c, r, S, T, X * + int U = not_a_week_num; // U * + int V = not_a_week_num; // V * + int W = not_a_week_num; // W * + std::basic_string temp_abbrev; // Z * + minutes temp_offset = not_a_offset; // z * + + using detail::read; + using detail::rs; + using detail::ru; + using detail::rld; + using detail::checked_set; + for (; *fmt != CharT{} && !is.fail(); ++fmt) + { + switch (*fmt) + { + case 'a': + case 'A': + case 'u': + case 'w': // wd: a, A, u, w + if (command) + { + int trial_wd = not_a_weekday; + if (*fmt == 'a' || *fmt == 'A') + { + if (modified == CharT{}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (!is.fail()) + trial_wd = tm.tm_wday; +#else + auto nm = detail::weekday_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + if (!is.fail()) + trial_wd = i % 7; +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + } + else // *fmt == 'u' || *fmt == 'w' + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + read(is, ru{trial_wd, 1, width == -1 ? + 1u : static_cast(width)}); + if (!is.fail()) + { + if (*fmt == 'u') + { + if (!(1 <= trial_wd && trial_wd <= 7)) + { + trial_wd = not_a_weekday; + is.setstate(ios::failbit); + } + else if (trial_wd == 7) + trial_wd = 0; + } + else // *fmt == 'w' + { + if (!(0 <= trial_wd && trial_wd <= 6)) + { + trial_wd = not_a_weekday; + is.setstate(ios::failbit); + } + } + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (!is.fail()) + trial_wd = tm.tm_wday; + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + } + if (trial_wd != not_a_weekday) + checked_set(wd, trial_wd, not_a_weekday, is); + } + else // !command + read(is, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + break; + case 'b': + case 'B': + case 'h': + if (command) + { + if (modified == CharT{}) + { + int ttm = not_a_month; +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + ttm = tm.tm_mon + 1; + is.setstate(err); +#else + auto nm = detail::month_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + if (!is.fail()) + ttm = i % 12 + 1; +#endif + checked_set(m, ttm, not_a_month, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'c': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + checked_set(m, tm.tm_mon + 1, not_a_month, is); + checked_set(d, tm.tm_mday, not_a_day, is); + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_minute, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%a %b %e %T %Y" + auto nm = detail::weekday_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(wd, static_cast(i % 7), not_a_weekday, is); + ws(is); + nm = detail::month_names(); + i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(m, static_cast(i % 12 + 1), not_a_month, is); + ws(is); + int td = not_a_day; + read(is, rs{td, 1, 2}); + checked_set(d, td, not_a_day, is); + ws(is); + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH; + int tM; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + ws(is); + int tY = not_a_year; + read(is, rs{tY, 1, 4u}); + checked_set(Y, tY, not_a_year, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'x': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + checked_set(m, tm.tm_mon + 1, not_a_month, is); + checked_set(d, tm.tm_mday, not_a_day, is); + } + is.setstate(err); +#else + // "%m/%d/%y" + int ty = not_a_2digit_year; + int tm = not_a_month; + int td = not_a_day; + read(is, ru{tm, 1, 2}, CharT{'/'}, ru{td, 1, 2}, CharT{'/'}, + rs{ty, 1, 2}); + checked_set(y, ty, not_a_2digit_year, is); + checked_set(m, tm, not_a_month, is); + checked_set(d, td, not_a_day, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'X': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_minute, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%T" + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH = not_a_hour; + int tM = not_a_minute; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'C': + if (command) + { + int tC = not_a_century; +#if !ONLY_C_LOCALE + if (modified == CharT{}) + { +#endif + read(is, rs{tC, 1, width == -1 ? 2u : static_cast(width)}); +#if !ONLY_C_LOCALE + } + else + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + auto tY = tm.tm_year + 1900; + tC = (tY >= 0 ? tY : tY-99) / 100; + } + is.setstate(err); + } +#endif + checked_set(C, tC, not_a_century, is); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'D': + if (command) + { + if (modified == CharT{}) + { + int tn = not_a_month; + int td = not_a_day; + int ty = not_a_2digit_year; + read(is, ru{tn, 1, 2}, CharT{'\0'}, CharT{'/'}, CharT{'\0'}, + ru{td, 1, 2}, CharT{'\0'}, CharT{'/'}, CharT{'\0'}, + rs{ty, 1, 2}); + checked_set(y, ty, not_a_2digit_year, is); + checked_set(m, tn, not_a_month, is); + checked_set(d, td, not_a_day, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'F': + if (command) + { + if (modified == CharT{}) + { + int tY = not_a_year; + int tn = not_a_month; + int td = not_a_day; + read(is, rs{tY, 1, width == -1 ? 4u : static_cast(width)}, + CharT{'-'}, ru{tn, 1, 2}, CharT{'-'}, ru{td, 1, 2}); + checked_set(Y, tY, not_a_year, is); + checked_set(m, tn, not_a_month, is); + checked_set(d, td, not_a_day, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'd': + case 'e': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int td = not_a_day; + read(is, rs{td, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(d, td, not_a_day, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + command = nullptr; + width = -1; + modified = CharT{}; + if ((err & ios::failbit) == 0) + checked_set(d, tm.tm_mday, not_a_day, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'H': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tH = not_a_hour; + read(is, ru{tH, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(H, tH, not_a_hour, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(H, tm.tm_hour, not_a_hour, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'I': + if (command) + { + if (modified == CharT{}) + { + int tI = not_a_hour_12_value; + // reads in an hour into I, but most be in [1, 12] + read(is, rs{tI, 1, width == -1 ? 2u : static_cast(width)}); + if (!(1 <= tI && tI <= 12)) + is.setstate(ios::failbit); + checked_set(I, tI, not_a_hour_12_value, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'j': + if (command) + { + if (modified == CharT{}) + { + int tj = not_a_doy; + read(is, ru{tj, 1, width == -1 ? 3u : static_cast(width)}); + checked_set(j, tj, not_a_doy, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'M': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tM = not_a_minute; + read(is, ru{tM, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(M, tM, not_a_minute, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(M, tm.tm_min, not_a_minute, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'm': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tn = not_a_month; + read(is, rs{tn, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(m, tn, not_a_month, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(m, tm.tm_mon + 1, not_a_month, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'n': + case 't': + if (command) + { + if (modified == CharT{}) + { + // %n matches a single white space character + // %t matches 0 or 1 white space characters + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + { + ios::iostate err = ios::eofbit; + if (*fmt == 'n') + err |= ios::failbit; + is.setstate(err); + break; + } + if (isspace(ic)) + { + (void)is.get(); + } + else if (*fmt == 'n') + is.setstate(ios::failbit); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'p': + if (command) + { + if (modified == CharT{}) + { + int tp = not_a_ampm; +#if !ONLY_C_LOCALE + tm = std::tm{}; + tm.tm_hour = 1; + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (tm.tm_hour == 1) + tp = 0; + else if (tm.tm_hour == 13) + tp = 1; + else + is.setstate(err); +#else + auto nm = detail::ampm_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + tp = static_cast(i); +#endif + checked_set(p, tp, not_a_ampm, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + + break; + case 'r': + if (command) + { + if (modified == CharT{}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_hour, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%I:%M:%S %p" + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + long double S{}; + int tI = not_a_hour_12_value; + int tM = not_a_minute; + read(is, ru{tI, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(I, tI, not_a_hour_12_value, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + ws(is); + auto nm = detail::ampm_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(p, static_cast(i), not_a_ampm, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'R': + if (command) + { + if (modified == CharT{}) + { + int tH = not_a_hour; + int tM = not_a_minute; + read(is, ru{tH, 1, 2}, CharT{'\0'}, CharT{':'}, CharT{'\0'}, + ru{tM, 1, 2}, CharT{'\0'}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'S': + if (command) + { + #if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + long double S{}; + read(is, rld{S, 1, width == -1 ? w : static_cast(width)}); + checked_set(s, round_i(duration{S}), + not_a_second, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'T': + if (command) + { + if (modified == CharT{}) + { + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH = not_a_hour; + int tM = not_a_minute; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'Y': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'O'}) +#endif + { + int tY = not_a_year; + read(is, rs{tY, 1, width == -1 ? 4u : static_cast(width)}); + checked_set(Y, tY, not_a_year, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'y': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + int ty = not_a_2digit_year; + read(is, ru{ty, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(y, ty, not_a_2digit_year, is); + } +#if !ONLY_C_LOCALE + else + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + is.setstate(err); + } +#endif + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'g': + if (command) + { + if (modified == CharT{}) + { + int tg = not_a_2digit_year; + read(is, ru{tg, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(g, tg, not_a_2digit_year, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'G': + if (command) + { + if (modified == CharT{}) + { + int tG = not_a_year; + read(is, rs{tG, 1, width == -1 ? 4u : static_cast(width)}); + checked_set(G, tG, not_a_year, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'U': + if (command) + { + if (modified == CharT{}) + { + int tU = not_a_week_num; + read(is, ru{tU, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(U, tU, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'V': + if (command) + { + if (modified == CharT{}) + { + int tV = not_a_week_num; + read(is, ru{tV, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(V, tV, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'W': + if (command) + { + if (modified == CharT{}) + { + int tW = not_a_week_num; + read(is, ru{tW, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(W, tW, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'E': + case 'O': + if (command) + { + if (modified == CharT{}) + { + modified = *fmt; + } + else + { + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + } + else + read(is, *fmt); + break; + case '%': + if (command) + { + if (modified == CharT{}) + read(is, *fmt); + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + command = fmt; + break; + case 'z': + if (command) + { + int tH, tM; + minutes toff = not_a_offset; + bool neg = false; + auto ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (c == '-') + neg = true; + } + if (modified == CharT{}) + { + read(is, rs{tH, 2, 2}); + if (!is.fail()) + toff = hours{std::abs(tH)}; + if (is.good()) + { + ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if ('0' <= c && c <= '9') + { + read(is, ru{tM, 2, 2}); + if (!is.fail()) + toff += minutes{tM}; + } + } + } + } + else + { + read(is, rs{tH, 1, 2}); + if (!is.fail()) + toff = hours{std::abs(tH)}; + if (is.good()) + { + ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (c == ':') + { + (void)is.get(); + read(is, ru{tM, 2, 2}); + if (!is.fail()) + toff += minutes{tM}; + } + } + } + } + if (neg) + toff = -toff; + checked_set(temp_offset, toff, not_a_offset, is); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'Z': + if (command) + { + if (modified == CharT{}) + { + std::basic_string buf; + while (is.rdstate() == std::ios::goodbit) + { + auto i = is.rdbuf()->sgetc(); + if (Traits::eq_int_type(i, Traits::eof())) + { + is.setstate(ios::eofbit); + break; + } + auto wc = Traits::to_char_type(i); + auto c = static_cast(wc); + // is c a valid time zone name or abbreviation character? + if (!(CharT{1} < wc && wc < CharT{127}) || !(isalnum(c) || + c == '_' || c == '/' || c == '-' || c == '+')) + break; + buf.push_back(c); + is.rdbuf()->sbumpc(); + } + if (buf.empty()) + is.setstate(ios::failbit); + checked_set(temp_abbrev, buf, {}, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + default: + if (command) + { + if (width == -1 && modified == CharT{} && '0' <= *fmt && *fmt <= '9') + { + width = static_cast(*fmt) - '0'; + while ('0' <= fmt[1] && fmt[1] <= '9') + width = 10*width + static_cast(*++fmt) - '0'; + } + else + { + if (modified == CharT{}) + read(is, CharT{'%'}, width, *fmt); + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + } + else // !command + { + if (isspace(static_cast(*fmt))) + { + // space matches 0 or more white space characters + if (is.good()) + ws(is); + } + else + read(is, *fmt); + } + break; + } + } + // is.fail() || *fmt == CharT{} + if (is.rdstate() == ios::goodbit && command) + { + if (modified == CharT{}) + read(is, CharT{'%'}, width); + else + read(is, CharT{'%'}, width, modified); + } + if (!is.fail()) + { + if (y != not_a_2digit_year) + { + // Convert y and an optional C to Y + if (!(0 <= y && y <= 99)) + goto broken; + if (C == not_a_century) + { + if (Y == not_a_year) + { + if (y >= 69) + C = 19; + else + C = 20; + } + else + { + C = (Y >= 0 ? Y : Y-100) / 100; + } + } + int tY; + if (C >= 0) + tY = 100*C + y; + else + tY = 100*(C+1) - (y == 0 ? 100 : y); + if (Y != not_a_year && Y != tY) + goto broken; + Y = tY; + } + if (g != not_a_2digit_year) + { + // Convert g and an optional C to G + if (!(0 <= g && g <= 99)) + goto broken; + if (C == not_a_century) + { + if (G == not_a_year) + { + if (g >= 69) + C = 19; + else + C = 20; + } + else + { + C = (G >= 0 ? G : G-100) / 100; + } + } + int tG; + if (C >= 0) + tG = 100*C + g; + else + tG = 100*(C+1) - (g == 0 ? 100 : g); + if (G != not_a_year && G != tG) + goto broken; + G = tG; + } + if (Y < static_cast(year::min()) || Y > static_cast(year::max())) + Y = not_a_year; + bool computed = false; + if (G != not_a_year && V != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{G-1}/December/Thursday[last]) + + (Monday-Thursday) + weeks{V-1} + + (weekday{static_cast(wd)}-Monday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (Y != not_a_year && U != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{Y}/January/Sunday[1]) + + weeks{U-1} + + (weekday{static_cast(wd)} - Sunday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (Y != not_a_year && W != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{Y}/January/Monday[1]) + + weeks{W-1} + + (weekday{static_cast(wd)} - Monday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (j != not_a_doy && Y != not_a_year) + { + auto ymd_trial = year_month_day{local_days(year{Y}/1/1) + days{j-1}}; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + j = not_a_doy; + } + auto ymd = year{Y}/m/d; + if (ymd.ok()) + { + if (wd == not_a_weekday) + wd = static_cast((weekday(sys_days(ymd)) - Sunday).count()); + else if (wd != static_cast((weekday(sys_days(ymd)) - Sunday).count())) + goto broken; + if (!computed) + { + if (G != not_a_year || V != not_a_week_num) + { + sys_days sd = ymd; + auto G_trial = year_month_day{sd + days{3}}.year(); + auto start = sys_days((G_trial - years{1})/December/Thursday[last]) + + (Monday - Thursday); + if (sd < start) + { + --G_trial; + if (V != not_a_week_num) + start = sys_days((G_trial - years{1})/December/Thursday[last]) + + (Monday - Thursday); + } + if (G != not_a_year && G != static_cast(G_trial)) + goto broken; + if (V != not_a_week_num) + { + auto V_trial = duration_cast(sd - start).count() + 1; + if (V != V_trial) + goto broken; + } + } + if (U != not_a_week_num) + { + auto start = sys_days(Sunday[1]/January/ymd.year()); + auto U_trial = floor(sys_days(ymd) - start).count() + 1; + if (U != U_trial) + goto broken; + } + if (W != not_a_week_num) + { + auto start = sys_days(Monday[1]/January/ymd.year()); + auto W_trial = floor(sys_days(ymd) - start).count() + 1; + if (W != W_trial) + goto broken; + } + } + } + fds.ymd = ymd; + if (I != not_a_hour_12_value) + { + if (!(1 <= I && I <= 12)) + goto broken; + if (p != not_a_ampm) + { + // p is in [0, 1] == [AM, PM] + // Store trial H in I + if (I == 12) + --p; + I += p*12; + // Either set H from I or make sure H and I are consistent + if (H == not_a_hour) + H = I; + else if (I != H) + goto broken; + } + else // p == not_a_ampm + { + // if H, make sure H and I could be consistent + if (H != not_a_hour) + { + if (I == 12) + { + if (H != 0 && H != 12) + goto broken; + } + else if (!(I == H || I == H+12)) + { + goto broken; + } + } + else // I is ambiguous, AM or PM? + goto broken; + } + } + if (H != not_a_hour) + { + fds.has_tod = true; + fds.tod = hh_mm_ss{hours{H}}; + } + if (M != not_a_minute) + { + fds.has_tod = true; + fds.tod.m_ = minutes{M}; + } + if (s != not_a_second) + { + fds.has_tod = true; + fds.tod.s_ = detail::decimal_format_seconds{s}; + } + if (j != not_a_doy) + { + fds.has_tod = true; + fds.tod.h_ += hours{days{j}}; + } + if (wd != not_a_weekday) + fds.wd = weekday{static_cast(wd)}; + if (abbrev != nullptr) + *abbrev = std::move(temp_abbrev); + if (offset != nullptr && temp_offset != not_a_offset) + *offset = temp_offset; + } + return is; + } +broken: + is.setstate(ios::failbit); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, year& y, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.year().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + y = fds.ymd.year(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, month& m, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + m = fds.ymd.month(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, day& d, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.day().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + d = fds.ymd.day(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, weekday& wd, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.wd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + wd = fds.wd; + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, year_month& ym, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + ym = fds.ymd.year()/fds.ymd.month(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, month_day& md, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok() || !fds.ymd.day().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + md = fds.ymd.month()/fds.ymd.day(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + year_month_day& ymd, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + ymd = fds.ymd; + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + sys_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = typename std::common_type::type; + using detail::round_i; + std::chrono::minutes offset_local{}; + auto offptr = offset ? offset : &offset_local; + fields fds{}; + fds.has_tod = true; + date::from_stream(is, fmt, fds, abbrev, offptr); + if (!fds.ymd.ok() || !fds.tod.in_conventional_range()) + is.setstate(std::ios::failbit); + if (!is.fail()) + tp = round_i(sys_days(fds.ymd) - *offptr + fds.tod.to_duration()); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + local_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = typename std::common_type::type; + using detail::round_i; + fields fds{}; + fds.has_tod = true; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.ok() || !fds.tod.in_conventional_range()) + is.setstate(std::ios::failbit); + if (!is.fail()) + tp = round_i(local_seconds{local_days(fds.ymd)} + fds.tod.to_duration()); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + std::chrono::duration& d, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using Duration = std::chrono::duration; + using CT = typename std::common_type::type; + using detail::round_i; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.has_tod) + is.setstate(std::ios::failbit); + if (!is.fail()) + d = round_i(fds.tod.to_duration()); + return is; +} + +template , + class Alloc = std::allocator> +struct parse_manip +{ + const std::basic_string format_; + Parsable& tp_; + std::basic_string* abbrev_; + std::chrono::minutes* offset_; + +public: + parse_manip(std::basic_string format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(std::move(format)) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} + +#if HAS_STRING_VIEW + parse_manip(const CharT* format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(format) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} + + parse_manip(std::basic_string_view format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(format) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} +#endif // HAS_STRING_VIEW +}; + +template +std::basic_istream& +operator>>(std::basic_istream& is, + const parse_manip& x) +{ + return date::from_stream(is, x.format_.c_str(), x.tp_, x.abbrev_, x.offset_); +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp), + parse_manip{format, tp}) +{ + return {format, tp}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::basic_string& abbrev) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, &abbrev), + parse_manip{format, tp, &abbrev}) +{ + return {format, tp, &abbrev}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, + std::declval*>(), + &offset), + parse_manip{format, tp, nullptr, &offset}) +{ + return {format, tp, nullptr, &offset}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::basic_string& abbrev, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, &abbrev, &offset), + parse_manip{format, tp, &abbrev, &offset}) +{ + return {format, tp, &abbrev, &offset}; +} + +// const CharT* formats + +template +inline +auto +parse(const CharT* format, Parsable& tp) + -> decltype(date::from_stream(std::declval&>(), format, tp), + parse_manip{format, tp}) +{ + return {format, tp}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, std::basic_string& abbrev) + -> decltype(date::from_stream(std::declval&>(), format, + tp, &abbrev), + parse_manip{format, tp, &abbrev}) +{ + return {format, tp, &abbrev}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), format, + tp, std::declval*>(), &offset), + parse_manip{format, tp, nullptr, &offset}) +{ + return {format, tp, nullptr, &offset}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, + std::basic_string& abbrev, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), format, + tp, &abbrev, &offset), + parse_manip{format, tp, &abbrev, &offset}) +{ + return {format, tp, &abbrev, &offset}; +} + +// duration streaming + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, + const std::chrono::duration& d) +{ + return os << detail::make_string::from(d.count()) + + detail::get_units(typename Period::type{}); +} + +} // namespace date +} // namespace arrow_vendored + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#ifdef __GNUC__ +# pragma GCC diagnostic pop +#endif + +#endif // DATE_H diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h new file mode 100644 index 0000000000000000000000000000000000000000..acad28d13b558b9ce97ad2100e90d0835aaadc0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h @@ -0,0 +1,53 @@ +// +// ios.h +// DateTimeLib +// +// The MIT License (MIT) +// +// Copyright (c) 2016 Alexander Kormanovsky +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ios_hpp +#define ios_hpp + +#if __APPLE__ +# include +# if TARGET_OS_IPHONE +# include + + namespace arrow_vendored + { + namespace date + { + namespace iOSUtils + { + + std::string get_tzdata_path(); + std::string get_current_timezone(); + + } // namespace iOSUtils + } // namespace date + } // namespace arrow_vendored + +# endif // TARGET_OS_IPHONE +#else // !__APPLE__ +# define TARGET_OS_IPHONE 0 +#endif // !__APPLE__ +#endif // ios_hpp diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h new file mode 100644 index 0000000000000000000000000000000000000000..467db6d19979375f310374d5bc20eeb1d09a3034 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h @@ -0,0 +1,2801 @@ +#ifndef TZ_H +#define TZ_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016, 2017 Howard Hinnant +// Copyright (c) 2017 Jiangang Zhuang +// Copyright (c) 2017 Aaron Bishop +// Copyright (c) 2017 Tomasz Kamiński +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +// Get more recent database at http://www.iana.org/time-zones + +// The notion of "current timezone" is something the operating system is expected to "just +// know". How it knows this is system specific. It's often a value set by the user at OS +// installation time and recorded by the OS somewhere. On Linux and Mac systems the current +// timezone name is obtained by looking at the name or contents of a particular file on +// disk. On Windows the current timezone name comes from the registry. In either method, +// there is no guarantee that the "native" current timezone name obtained will match any +// of the "Standard" names in this library's "database". On Linux, the names usually do +// seem to match so mapping functions to map from native to "Standard" are typically not +// required. On Windows, the names are never "Standard" so mapping is always required. +// Technically any OS may use the mapping process but currently only Windows does use it. + +// NOTE(ARROW): If this is not set, then the library will attempt to +// use libcurl to obtain a timezone database, and we probably do not want this. +#ifndef _WIN32 +#define USE_OS_TZDB 1 +#endif + +#ifndef USE_OS_TZDB +# define USE_OS_TZDB 0 +#endif + +#ifndef HAS_REMOTE_API +# if USE_OS_TZDB == 0 +# ifdef _WIN32 +# define HAS_REMOTE_API 0 +# else +# define HAS_REMOTE_API 1 +# endif +# else // HAS_REMOTE_API makes no sense when using the OS timezone database +# define HAS_REMOTE_API 0 +# endif +#endif + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wconstant-logical-operand" +#endif + +static_assert(!(USE_OS_TZDB && HAS_REMOTE_API), + "USE_OS_TZDB and HAS_REMOTE_API can not be used together"); + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + +#ifndef AUTO_DOWNLOAD +# define AUTO_DOWNLOAD HAS_REMOTE_API +#endif + +static_assert(HAS_REMOTE_API == 0 ? AUTO_DOWNLOAD == 0 : true, + "AUTO_DOWNLOAD can not be turned on without HAS_REMOTE_API"); + +#ifndef USE_SHELL_API +# define USE_SHELL_API 1 +#endif + +#if USE_OS_TZDB +# ifdef _WIN32 +# error "USE_OS_TZDB can not be used on Windows" +# endif +#endif + +#ifndef HAS_DEDUCTION_GUIDES +# if __cplusplus >= 201703 +# define HAS_DEDUCTION_GUIDES 1 +# else +# define HAS_DEDUCTION_GUIDES 0 +# endif +#endif // HAS_DEDUCTION_GUIDES + +#include "date.h" + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +#include "tz_private.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +# ifdef DATE_BUILD_DLL +# define DATE_API __declspec(dllexport) +# elif defined(DATE_USE_DLL) +# define DATE_API __declspec(dllimport) +# else +# define DATE_API +# endif +#else +# ifdef DATE_BUILD_DLL +# define DATE_API __attribute__ ((visibility ("default"))) +# else +# define DATE_API +# endif +#endif + +namespace arrow_vendored +{ +namespace date +{ + +enum class choose {earliest, latest}; + +namespace detail +{ + struct undocumented; + + template + struct nodeduct + { + using type = T; + }; + + template + using nodeduct_t = typename nodeduct::type; +} + +struct sys_info +{ + sys_seconds begin; + sys_seconds end; + std::chrono::seconds offset; + std::chrono::minutes save; + std::string abbrev; +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const sys_info& r) +{ + os << r.begin << '\n'; + os << r.end << '\n'; + os << make_time(r.offset) << "\n"; + os << make_time(r.save) << "\n"; + os << r.abbrev << '\n'; + return os; +} + +struct local_info +{ + enum {unique, nonexistent, ambiguous} result; + sys_info first; + sys_info second; +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const local_info& r) +{ + if (r.result == local_info::nonexistent) + os << "nonexistent between\n"; + else if (r.result == local_info::ambiguous) + os << "ambiguous between\n"; + os << r.first; + if (r.result != local_info::unique) + { + os << "and\n"; + os << r.second; + } + return os; +} + +class nonexistent_local_time + : public std::runtime_error +{ +public: + template + nonexistent_local_time(local_time tp, const local_info& i); + +private: + template + static + std::string + make_msg(local_time tp, const local_info& i); +}; + +template +inline +nonexistent_local_time::nonexistent_local_time(local_time tp, + const local_info& i) + : std::runtime_error(make_msg(tp, i)) +{ +} + +template +std::string +nonexistent_local_time::make_msg(local_time tp, const local_info& i) +{ + assert(i.result == local_info::nonexistent); + std::ostringstream os; + os << tp << " is in a gap between\n" + << local_seconds{i.first.end.time_since_epoch()} + i.first.offset << ' ' + << i.first.abbrev << " and\n" + << local_seconds{i.second.begin.time_since_epoch()} + i.second.offset << ' ' + << i.second.abbrev + << " which are both equivalent to\n" + << i.first.end << " UTC"; + return os.str(); +} + +class ambiguous_local_time + : public std::runtime_error +{ +public: + template + ambiguous_local_time(local_time tp, const local_info& i); + +private: + template + static + std::string + make_msg(local_time tp, const local_info& i); +}; + +template +inline +ambiguous_local_time::ambiguous_local_time(local_time tp, const local_info& i) + : std::runtime_error(make_msg(tp, i)) +{ +} + +template +std::string +ambiguous_local_time::make_msg(local_time tp, const local_info& i) +{ + assert(i.result == local_info::ambiguous); + std::ostringstream os; + os << tp << " is ambiguous. It could be\n" + << tp << ' ' << i.first.abbrev << " == " + << tp - i.first.offset << " UTC or\n" + << tp << ' ' << i.second.abbrev << " == " + << tp - i.second.offset << " UTC"; + return os.str(); +} + +class time_zone; + +#if HAS_STRING_VIEW +DATE_API const time_zone* locate_zone(std::string_view tz_name); +#else +DATE_API const time_zone* locate_zone(const std::string& tz_name); +#endif + +DATE_API const time_zone* current_zone(); + +template +struct zoned_traits +{ +}; + +template <> +struct zoned_traits +{ + static + const time_zone* + default_zone() + { + return date::locate_zone("Etc/UTC"); + } + +#if HAS_STRING_VIEW + + static + const time_zone* + locate_zone(std::string_view name) + { + return date::locate_zone(name); + } + +#else // !HAS_STRING_VIEW + + static + const time_zone* + locate_zone(const std::string& name) + { + return date::locate_zone(name); + } + + static + const time_zone* + locate_zone(const char* name) + { + return date::locate_zone(name); + } + +#endif // !HAS_STRING_VIEW +}; + +template +class zoned_time; + +template +bool +operator==(const zoned_time& x, + const zoned_time& y); + +template +class zoned_time +{ +public: + using duration = typename std::common_type::type; + +private: + TimeZonePtr zone_; + sys_time tp_; + +public: +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::default_zone())> +#endif + zoned_time(); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::default_zone())> +#endif + zoned_time(const sys_time& st); + explicit zoned_time(TimeZonePtr z); + +#if HAS_STRING_VIEW + template ::locate_zone(std::string_view())) + >::value + >::type> + explicit zoned_time(std::string_view name); +#else +# if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())) + >::value + >::type> +# endif + explicit zoned_time(const std::string& name); +#endif + + template , + sys_time>::value + >::type> + zoned_time(const zoned_time& zt) NOEXCEPT; + + zoned_time(TimeZonePtr z, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ()->to_sys(local_time{})), + sys_time + >::value + >::type> +#endif + zoned_time(TimeZonePtr z, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ()->to_sys(local_time{}, + choose::earliest)), + sys_time + >::value + >::type> +#endif + zoned_time(TimeZonePtr z, const local_time& tp, choose c); + + template , + sys_time>::value + >::type> + zoned_time(TimeZonePtr z, const zoned_time& zt); + + template , + sys_time>::value + >::type> + zoned_time(TimeZonePtr z, const zoned_time& zt, choose); + +#if HAS_STRING_VIEW + + template ::locate_zone(std::string_view())), + sys_time + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> st); + + template ::locate_zone(std::string_view())), + local_time + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> tp); + + template ::locate_zone(std::string_view())), + local_time, + choose + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> tp, choose c); + + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string_view())), + zoned_time + >::value + >::type> + zoned_time(std::string_view name, const zoned_time& zt); + + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string_view())), + zoned_time, + choose + >::value + >::type> + zoned_time(std::string_view name, const zoned_time& zt, choose); + +#else // !HAS_STRING_VIEW + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + sys_time + >::value + >::type> +#endif + zoned_time(const std::string& name, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + sys_time + >::value + >::type> +#endif + zoned_time(const char* name, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time + >::value + >::type> +#endif + zoned_time(const std::string& name, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time + >::value + >::type> +#endif + zoned_time(const char* name, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time, + choose + >::value + >::type> +#endif + zoned_time(const std::string& name, const local_time& tp, choose c); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time, + choose + >::value + >::type> +#endif + zoned_time(const char* name, const local_time& tp, choose c); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time + >::value + >::type> +#else + template +#endif + zoned_time(const std::string& name, const zoned_time& zt); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time + >::value + >::type> +#else + template +#endif + zoned_time(const char* name, const zoned_time& zt); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time, + choose + >::value + >::type> +#else + template +#endif + zoned_time(const std::string& name, const zoned_time& zt, + choose); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time, + choose + >::value + >::type> +#else + template +#endif + zoned_time(const char* name, const zoned_time& zt, + choose); + +#endif // !HAS_STRING_VIEW + + zoned_time& operator=(const sys_time& st); + zoned_time& operator=(const local_time& ut); + + explicit operator sys_time() const; + explicit operator local_time() const; + + TimeZonePtr get_time_zone() const; + local_time get_local_time() const; + sys_time get_sys_time() const; + sys_info get_info() const; + + template + friend + bool + operator==(const zoned_time& x, + const zoned_time& y); + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, + const zoned_time& t); + +private: + template friend class zoned_time; + + template + static + TimeZonePtr2&& + check(TimeZonePtr2&& p); +}; + +using zoned_seconds = zoned_time; + +#if HAS_DEDUCTION_GUIDES + +namespace detail +{ + template + using time_zone_representation = + std::conditional_t + < + std::is_convertible::value, + time_zone const*, + std::remove_cv_t> + >; +} + +zoned_time() + -> zoned_time; + +template +zoned_time(sys_time) + -> zoned_time>; + +template +zoned_time(TimeZonePtrOrName&&) + -> zoned_time>; + +template +zoned_time(TimeZonePtrOrName&&, sys_time) + -> zoned_time, detail::time_zone_representation>; + +template +zoned_time(TimeZonePtrOrName&&, local_time, choose = choose::earliest) + -> zoned_time, detail::time_zone_representation>; + +template +zoned_time(TimeZonePtrOrName&&, zoned_time, choose = choose::earliest) + -> zoned_time, detail::time_zone_representation>; + +#endif // HAS_DEDUCTION_GUIDES + +template +inline +bool +operator==(const zoned_time& x, + const zoned_time& y) +{ + return x.zone_ == y.zone_ && x.tp_ == y.tp_; +} + +template +inline +bool +operator!=(const zoned_time& x, + const zoned_time& y) +{ + return !(x == y); +} + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + +namespace detail +{ +# if USE_OS_TZDB + struct transition; + struct expanded_ttinfo; +# else // !USE_OS_TZDB + struct zonelet; + class Rule; +# endif // !USE_OS_TZDB +} + +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +class time_zone +{ +private: + std::string name_; +#if USE_OS_TZDB + std::vector transitions_; + std::vector ttinfos_; +#else // !USE_OS_TZDB + std::vector zonelets_; +#endif // !USE_OS_TZDB + std::unique_ptr adjusted_; + +public: +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + time_zone(time_zone&&) = default; + time_zone& operator=(time_zone&&) = default; +#else // defined(_MSC_VER) && (_MSC_VER < 1900) + time_zone(time_zone&& src); + time_zone& operator=(time_zone&& src); +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + + DATE_API explicit time_zone(const std::string& s, detail::undocumented); + + const std::string& name() const NOEXCEPT; + + template sys_info get_info(sys_time st) const; + template local_info get_info(local_time tp) const; + + template + sys_time::type> + to_sys(local_time tp) const; + + template + sys_time::type> + to_sys(local_time tp, choose z) const; + + template + local_time::type> + to_local(sys_time tp) const; + + friend bool operator==(const time_zone& x, const time_zone& y) NOEXCEPT; + friend bool operator< (const time_zone& x, const time_zone& y) NOEXCEPT; + friend DATE_API std::ostream& operator<<(std::ostream& os, const time_zone& z); + +#if !USE_OS_TZDB + DATE_API void add(const std::string& s); +#endif // !USE_OS_TZDB + +private: + DATE_API sys_info get_info_impl(sys_seconds tp) const; + DATE_API local_info get_info_impl(local_seconds tp) const; + + template + sys_time::type> + to_sys_impl(local_time tp, choose z, std::false_type) const; + template + sys_time::type> + to_sys_impl(local_time tp, choose, std::true_type) const; + +#if USE_OS_TZDB + DATE_API void init() const; + DATE_API void init_impl(); + DATE_API sys_info + load_sys_info(std::vector::const_iterator i) const; + + template + DATE_API void + load_data(std::istream& inf, std::int32_t tzh_leapcnt, std::int32_t tzh_timecnt, + std::int32_t tzh_typecnt, std::int32_t tzh_charcnt); +#else // !USE_OS_TZDB + DATE_API sys_info get_info_impl(sys_seconds tp, int tz_int) const; + DATE_API void adjust_infos(const std::vector& rules); + DATE_API void parse_info(std::istream& in); +#endif // !USE_OS_TZDB +}; + +#if defined(_MSC_VER) && (_MSC_VER < 1900) + +inline +time_zone::time_zone(time_zone&& src) + : name_(std::move(src.name_)) + , zonelets_(std::move(src.zonelets_)) + , adjusted_(std::move(src.adjusted_)) + {} + +inline +time_zone& +time_zone::operator=(time_zone&& src) +{ + name_ = std::move(src.name_); + zonelets_ = std::move(src.zonelets_); + adjusted_ = std::move(src.adjusted_); + return *this; +} + +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + +inline +const std::string& +time_zone::name() const NOEXCEPT +{ + return name_; +} + +template +inline +sys_info +time_zone::get_info(sys_time st) const +{ + return get_info_impl(date::floor(st)); +} + +template +inline +local_info +time_zone::get_info(local_time tp) const +{ + return get_info_impl(date::floor(tp)); +} + +template +inline +sys_time::type> +time_zone::to_sys(local_time tp) const +{ + return to_sys_impl(tp, choose{}, std::true_type{}); +} + +template +inline +sys_time::type> +time_zone::to_sys(local_time tp, choose z) const +{ + return to_sys_impl(tp, z, std::false_type{}); +} + +template +inline +local_time::type> +time_zone::to_local(sys_time tp) const +{ + using LT = local_time::type>; + auto i = get_info(tp); + return LT{(tp + i.offset).time_since_epoch()}; +} + +inline bool operator==(const time_zone& x, const time_zone& y) NOEXCEPT {return x.name_ == y.name_;} +inline bool operator< (const time_zone& x, const time_zone& y) NOEXCEPT {return x.name_ < y.name_;} + +inline bool operator!=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(x == y);} +inline bool operator> (const time_zone& x, const time_zone& y) NOEXCEPT {return y < x;} +inline bool operator<=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(y < x);} +inline bool operator>=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(x < y);} + +template +sys_time::type> +time_zone::to_sys_impl(local_time tp, choose z, std::false_type) const +{ + auto i = get_info(tp); + if (i.result == local_info::nonexistent) + { + return i.first.end; + } + else if (i.result == local_info::ambiguous) + { + if (z == choose::latest) + return sys_time{tp.time_since_epoch()} - i.second.offset; + } + return sys_time{tp.time_since_epoch()} - i.first.offset; +} + +template +sys_time::type> +time_zone::to_sys_impl(local_time tp, choose, std::true_type) const +{ + auto i = get_info(tp); + if (i.result == local_info::nonexistent) + throw nonexistent_local_time(tp, i); + else if (i.result == local_info::ambiguous) + throw ambiguous_local_time(tp, i); + return sys_time{tp.time_since_epoch()} - i.first.offset; +} + +#if !USE_OS_TZDB + +class time_zone_link +{ +private: + std::string name_; + std::string target_; +public: + DATE_API explicit time_zone_link(const std::string& s); + + const std::string& name() const {return name_;} + const std::string& target() const {return target_;} + + friend bool operator==(const time_zone_link& x, const time_zone_link& y) {return x.name_ == y.name_;} + friend bool operator< (const time_zone_link& x, const time_zone_link& y) {return x.name_ < y.name_;} + + friend DATE_API std::ostream& operator<<(std::ostream& os, const time_zone_link& x); +}; + +using link = time_zone_link; + +inline bool operator!=(const time_zone_link& x, const time_zone_link& y) {return !(x == y);} +inline bool operator> (const time_zone_link& x, const time_zone_link& y) {return y < x;} +inline bool operator<=(const time_zone_link& x, const time_zone_link& y) {return !(y < x);} +inline bool operator>=(const time_zone_link& x, const time_zone_link& y) {return !(x < y);} + +#endif // !USE_OS_TZDB + +class leap_second +{ +private: + sys_seconds date_; + +public: +#if USE_OS_TZDB + DATE_API explicit leap_second(const sys_seconds& s, detail::undocumented); +#else + DATE_API explicit leap_second(const std::string& s, detail::undocumented); +#endif + + sys_seconds date() const {return date_;} + + friend bool operator==(const leap_second& x, const leap_second& y) {return x.date_ == y.date_;} + friend bool operator< (const leap_second& x, const leap_second& y) {return x.date_ < y.date_;} + + template + friend + bool + operator==(const leap_second& x, const sys_time& y) + { + return x.date_ == y; + } + + template + friend + bool + operator< (const leap_second& x, const sys_time& y) + { + return x.date_ < y; + } + + template + friend + bool + operator< (const sys_time& x, const leap_second& y) + { + return x < y.date_; + } + + friend DATE_API std::ostream& operator<<(std::ostream& os, const leap_second& x); +}; + +inline bool operator!=(const leap_second& x, const leap_second& y) {return !(x == y);} +inline bool operator> (const leap_second& x, const leap_second& y) {return y < x;} +inline bool operator<=(const leap_second& x, const leap_second& y) {return !(y < x);} +inline bool operator>=(const leap_second& x, const leap_second& y) {return !(x < y);} + +template +inline +bool +operator==(const sys_time& x, const leap_second& y) +{ + return y == x; +} + +template +inline +bool +operator!=(const leap_second& x, const sys_time& y) +{ + return !(x == y); +} + +template +inline +bool +operator!=(const sys_time& x, const leap_second& y) +{ + return !(x == y); +} + +template +inline +bool +operator> (const leap_second& x, const sys_time& y) +{ + return y < x; +} + +template +inline +bool +operator> (const sys_time& x, const leap_second& y) +{ + return y < x; +} + +template +inline +bool +operator<=(const leap_second& x, const sys_time& y) +{ + return !(y < x); +} + +template +inline +bool +operator<=(const sys_time& x, const leap_second& y) +{ + return !(y < x); +} + +template +inline +bool +operator>=(const leap_second& x, const sys_time& y) +{ + return !(x < y); +} + +template +inline +bool +operator>=(const sys_time& x, const leap_second& y) +{ + return !(x < y); +} + +using leap = leap_second; + +#ifdef _WIN32 + +namespace detail +{ + +// The time zone mapping is modelled after this data file: +// http://unicode.org/repos/cldr/trunk/common/supplemental/windowsZones.xml +// and the field names match the element names from the mapZone element +// of windowsZones.xml. +// The website displays this file here: +// http://www.unicode.org/cldr/charts/latest/supplemental/zone_tzid.html +// The html view is sorted before being displayed but is otherwise the same +// There is a mapping between the os centric view (in this case windows) +// the html displays uses and the generic view the xml file. +// That mapping is this: +// display column "windows" -> xml field "other". +// display column "region" -> xml field "territory". +// display column "tzid" -> xml field "type". +// This structure uses the generic terminology because it could be +// used to to support other os/native name conversions, not just windows, +// and using the same generic names helps retain the connection to the +// origin of the data that we are using. +struct timezone_mapping +{ + timezone_mapping(const char* other, const char* territory, const char* type) + : other(other), territory(territory), type(type) + { + } + timezone_mapping() = default; + std::string other; + std::string territory; + std::string type; +}; + +} // detail + +#endif // _WIN32 + +struct tzdb +{ + std::string version = "unknown"; + std::vector zones; +#if !USE_OS_TZDB + std::vector links; +#endif + std::vector leap_seconds; +#if !USE_OS_TZDB + std::vector rules; +#endif +#ifdef _WIN32 + std::vector mappings; +#endif + tzdb* next = nullptr; + + tzdb() = default; +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + tzdb(tzdb&&) = default; + tzdb& operator=(tzdb&&) = default; +#else // defined(_MSC_VER) && (_MSC_VER < 1900) + tzdb(tzdb&& src) + : version(std::move(src.version)) + , zones(std::move(src.zones)) + , links(std::move(src.links)) + , leap_seconds(std::move(src.leap_seconds)) + , rules(std::move(src.rules)) + , mappings(std::move(src.mappings)) + {} + + tzdb& operator=(tzdb&& src) + { + version = std::move(src.version); + zones = std::move(src.zones); + links = std::move(src.links); + leap_seconds = std::move(src.leap_seconds); + rules = std::move(src.rules); + mappings = std::move(src.mappings); + return *this; + } +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + +#if HAS_STRING_VIEW + const time_zone* locate_zone(std::string_view tz_name) const; +#else + const time_zone* locate_zone(const std::string& tz_name) const; +#endif + const time_zone* current_zone() const; +}; + +using TZ_DB = tzdb; + +DATE_API std::ostream& +operator<<(std::ostream& os, const tzdb& db); + +DATE_API const tzdb& get_tzdb(); + +class tzdb_list +{ + std::atomic head_{nullptr}; + +public: + ~tzdb_list(); + tzdb_list() = default; + tzdb_list(tzdb_list&& x) NOEXCEPT; + + const tzdb& front() const NOEXCEPT {return *head_;} + tzdb& front() NOEXCEPT {return *head_;} + + class const_iterator; + + const_iterator begin() const NOEXCEPT; + const_iterator end() const NOEXCEPT; + + const_iterator cbegin() const NOEXCEPT; + const_iterator cend() const NOEXCEPT; + + const_iterator erase_after(const_iterator p) NOEXCEPT; + + struct undocumented_helper; +private: + void push_front(tzdb* tzdb) NOEXCEPT; +}; + +class tzdb_list::const_iterator +{ + tzdb* p_ = nullptr; + + explicit const_iterator(tzdb* p) NOEXCEPT : p_{p} {} +public: + const_iterator() = default; + + using iterator_category = std::forward_iterator_tag; + using value_type = tzdb; + using reference = const value_type&; + using pointer = const value_type*; + using difference_type = std::ptrdiff_t; + + reference operator*() const NOEXCEPT {return *p_;} + pointer operator->() const NOEXCEPT {return p_;} + + const_iterator& operator++() NOEXCEPT {p_ = p_->next; return *this;} + const_iterator operator++(int) NOEXCEPT {auto t = *this; ++(*this); return t;} + + friend + bool + operator==(const const_iterator& x, const const_iterator& y) NOEXCEPT + {return x.p_ == y.p_;} + + friend + bool + operator!=(const const_iterator& x, const const_iterator& y) NOEXCEPT + {return !(x == y);} + + friend class tzdb_list; +}; + +inline +tzdb_list::const_iterator +tzdb_list::begin() const NOEXCEPT +{ + return const_iterator{head_}; +} + +inline +tzdb_list::const_iterator +tzdb_list::end() const NOEXCEPT +{ + return const_iterator{nullptr}; +} + +inline +tzdb_list::const_iterator +tzdb_list::cbegin() const NOEXCEPT +{ + return begin(); +} + +inline +tzdb_list::const_iterator +tzdb_list::cend() const NOEXCEPT +{ + return end(); +} + +DATE_API tzdb_list& get_tzdb_list(); + +#if !USE_OS_TZDB + +DATE_API const tzdb& reload_tzdb(); +DATE_API void set_install(const std::string& install); + +#endif // !USE_OS_TZDB + +#if HAS_REMOTE_API + +DATE_API std::string remote_version(); +// if provided error_buffer size should be at least CURL_ERROR_SIZE +DATE_API bool remote_download(const std::string& version, char* error_buffer = nullptr); +DATE_API bool remote_install(const std::string& version); + +#endif + +// zoned_time + +namespace detail +{ + +template +inline +T* +to_raw_pointer(T* p) NOEXCEPT +{ + return p; +} + +template +inline +auto +to_raw_pointer(Pointer p) NOEXCEPT + -> decltype(detail::to_raw_pointer(p.operator->())) +{ + return detail::to_raw_pointer(p.operator->()); +} + +} // namespace detail + +template +template +inline +TimeZonePtr2&& +zoned_time::check(TimeZonePtr2&& p) +{ + if (detail::to_raw_pointer(p) == nullptr) + throw std::runtime_error( + "zoned_time constructed with a time zone pointer == nullptr"); + return std::forward(p); +} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time() + : zone_(check(zoned_traits::default_zone())) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const sys_time& st) + : zone_(check(zoned_traits::default_zone())) + , tp_(st) + {} + +template +inline +zoned_time::zoned_time(TimeZonePtr z) + : zone_(check(std::move(z))) + {} + +#if HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(std::string_view name) + : zoned_time(zoned_traits::locate_zone(name)) + {} + +#else // !HAS_STRING_VIEW + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name) + : zoned_time(zoned_traits::locate_zone(name)) + {} + +#endif // !HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(const zoned_time& zt) NOEXCEPT + : zone_(zt.zone_) + , tp_(zt.tp_) + {} + +template +inline +zoned_time::zoned_time(TimeZonePtr z, const sys_time& st) + : zone_(check(std::move(z))) + , tp_(st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(TimeZonePtr z, const local_time& t) + : zone_(check(std::move(z))) + , tp_(zone_->to_sys(t)) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(TimeZonePtr z, const local_time& t, + choose c) + : zone_(check(std::move(z))) + , tp_(zone_->to_sys(t, c)) + {} + +template +template +inline +zoned_time::zoned_time(TimeZonePtr z, + const zoned_time& zt) + : zone_(check(std::move(z))) + , tp_(zt.tp_) + {} + +template +template +inline +zoned_time::zoned_time(TimeZonePtr z, + const zoned_time& zt, choose) + : zoned_time(std::move(z), zt) + {} + +#if HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +#else // !HAS_STRING_VIEW + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const sys_time& st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const sys_time& st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const local_time& t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const local_time& t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const local_time& t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const local_time& t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const char* name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const char* name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +#endif // HAS_STRING_VIEW + +template +inline +zoned_time& +zoned_time::operator=(const sys_time& st) +{ + tp_ = st; + return *this; +} + +template +inline +zoned_time& +zoned_time::operator=(const local_time& ut) +{ + tp_ = zone_->to_sys(ut); + return *this; +} + +template +inline +zoned_time::operator local_time::duration>() const +{ + return get_local_time(); +} + +template +inline +zoned_time::operator sys_time::duration>() const +{ + return get_sys_time(); +} + +template +inline +TimeZonePtr +zoned_time::get_time_zone() const +{ + return zone_; +} + +template +inline +local_time::duration> +zoned_time::get_local_time() const +{ + return zone_->to_local(tp_); +} + +template +inline +sys_time::duration> +zoned_time::get_sys_time() const +{ + return tp_; +} + +template +inline +sys_info +zoned_time::get_info() const +{ + return zone_->get_info(tp_); +} + +// make_zoned_time + +inline +zoned_time +make_zoned() +{ + return zoned_time(); +} + +template +inline +zoned_time::type> +make_zoned(const sys_time& tp) +{ + return zoned_time::type>(tp); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class + < + typename std::decay + < + decltype(*detail::to_raw_pointer(std::declval())) + >::type + >{} + >::type +#endif +#endif + > +inline +zoned_time +make_zoned(TimeZonePtr z) +{ + return zoned_time(std::move(z)); +} + +inline +zoned_seconds +make_zoned(const std::string& name) +{ + return zoned_seconds(name); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const local_time& tp) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), tp); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const local_time& tp, choose c) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), tp, c); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const local_time& tp) +{ + return zoned_time::type>(name, tp); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const local_time& tp, choose c) +{ + return zoned_time::type>(name, tp, c); +} + +template +inline +zoned_time +make_zoned(TimeZonePtr zone, const zoned_time& zt) +{ + return zoned_time(std::move(zone), zt); +} + +template +inline +zoned_time +make_zoned(const std::string& name, const zoned_time& zt) +{ + return zoned_time(name, zt); +} + +template +inline +zoned_time +make_zoned(TimeZonePtr zone, const zoned_time& zt, choose c) +{ + return zoned_time(std::move(zone), zt, c); +} + +template +inline +zoned_time +make_zoned(const std::string& name, const zoned_time& zt, choose c) +{ + return zoned_time(name, zt, c); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const sys_time& st) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), st); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const sys_time& st) +{ + return zoned_time::type>(name, st); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const zoned_time& tp) +{ + using duration = typename zoned_time::duration; + using LT = local_time; + auto const st = tp.get_sys_time(); + auto const info = tp.get_time_zone()->get_info(st); + return to_stream(os, fmt, LT{(st+info.offset).time_since_epoch()}, + &info.abbrev, &info.offset); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const zoned_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', ' ', '%', 'Z', CharT{}}; + return to_stream(os, fmt, t); +} + +class utc_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static CONSTDATA bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_sys(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + from_sys(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&); +}; + +template + using utc_time = std::chrono::time_point; + +using utc_seconds = utc_time; + +template +utc_time::type> +utc_clock::from_sys(const sys_time& st) +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + auto const& leaps = get_tzdb().leap_seconds; + auto const lt = std::upper_bound(leaps.begin(), leaps.end(), st); + return utc_time{st.time_since_epoch() + seconds{lt-leaps.begin()}}; +} + +// Return pair +// first is true if ut is during a leap second insertion, otherwise false. +// If ut is during a leap second insertion, that leap second is included in the count +template +std::pair +is_leap_second(date::utc_time const& ut) +{ + using std::chrono::seconds; + using duration = typename std::common_type::type; + auto const& leaps = get_tzdb().leap_seconds; + auto tp = sys_time{ut.time_since_epoch()}; + auto const lt = std::upper_bound(leaps.begin(), leaps.end(), tp); + auto ds = seconds{lt-leaps.begin()}; + tp -= ds; + auto ls = false; + if (lt > leaps.begin()) + { + if (tp < lt[-1]) + { + if (tp >= lt[-1].date() - seconds{1}) + ls = true; + else + --ds; + } + } + return {ls, ds}; +} + +struct leap_second_info +{ + bool is_leap_second; + std::chrono::seconds elapsed; +}; + +template +leap_second_info +get_leap_second_info(date::utc_time const& ut) +{ + auto p = is_leap_second(ut); + return {p.first, p.second}; +} + +template +sys_time::type> +utc_clock::to_sys(const utc_time& ut) +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + auto ls = is_leap_second(ut); + auto tp = sys_time{ut.time_since_epoch() - ls.second}; + if (ls.first) + tp = floor(tp) + seconds{1} - CD{1}; + return tp; +} + +inline +utc_clock::time_point +utc_clock::now() +{ + return from_sys(std::chrono::system_clock::now()); +} + +template +utc_time::type> +utc_clock::from_local(const local_time& st) +{ + return from_sys(sys_time{st.time_since_epoch()}); +} + +template +local_time::type> +utc_clock::to_local(const utc_time& ut) +{ + using CD = typename std::common_type::type; + return local_time{to_sys(ut).time_since_epoch()}; +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const utc_time& t) +{ + using std::chrono::seconds; + using CT = typename std::common_type::type; + const std::string abbrev("UTC"); + CONSTDATA seconds offset{0}; + auto ls = is_leap_second(t); + auto tp = sys_time{t.time_since_epoch() - ls.second}; + auto const sd = floor(tp); + year_month_day ymd = sd; + auto time = make_time(tp - sys_seconds{sd}); + time.seconds(detail::undocumented{}) += seconds{ls.first}; + fields fds{ymd, time}; + return to_stream(os, fmt, fds, &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const utc_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + utc_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using std::chrono::seconds; + using std::chrono::minutes; + using CT = typename std::common_type::type; + minutes offset_local{}; + auto offptr = offset ? offset : &offset_local; + fields fds{}; + fds.has_tod = true; + from_stream(is, fmt, fds, abbrev, offptr); + if (!fds.ymd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + { + bool is_60_sec = fds.tod.seconds() == seconds{60}; + if (is_60_sec) + fds.tod.seconds(detail::undocumented{}) -= seconds{1}; + auto tmp = utc_clock::from_sys(sys_days(fds.ymd) - *offptr + fds.tod.to_duration()); + if (is_60_sec) + tmp += seconds{1}; + if (is_60_sec != is_leap_second(tmp).first || !fds.tod.in_conventional_range()) + { + is.setstate(std::ios::failbit); + return is; + } + tp = std::chrono::time_point_cast(tmp); + } + return is; +} + +// tai_clock + +class tai_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static const bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&) NOEXCEPT; +}; + +template + using tai_time = std::chrono::time_point; + +using tai_seconds = tai_time; + +template +inline +utc_time::type> +tai_clock::to_utc(const tai_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return utc_time{t.time_since_epoch()} - + (sys_days(year{1970}/January/1) - sys_days(year{1958}/January/1) + seconds{10}); +} + +template +inline +tai_time::type> +tai_clock::from_utc(const utc_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return tai_time{t.time_since_epoch()} + + (sys_days(year{1970}/January/1) - sys_days(year{1958}/January/1) + seconds{10}); +} + +inline +tai_clock::time_point +tai_clock::now() +{ + return from_utc(utc_clock::now()); +} + +template +inline +local_time::type> +tai_clock::to_local(const tai_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return local_time{t.time_since_epoch()} - + (local_days(year{1970}/January/1) - local_days(year{1958}/January/1)); +} + +template +inline +tai_time::type> +tai_clock::from_local(const local_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return tai_time{t.time_since_epoch()} + + (local_days(year{1970}/January/1) - local_days(year{1958}/January/1)); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const tai_time& t) +{ + const std::string abbrev("TAI"); + CONSTDATA std::chrono::seconds offset{0}; + return to_stream(os, fmt, tai_clock::to_local(t), &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const tai_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + tai_time& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + local_time lp; + from_stream(is, fmt, lp, abbrev, offset); + if (!is.fail()) + tp = tai_clock::from_local(lp); + return is; +} + +// gps_clock + +class gps_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static const bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&) NOEXCEPT; +}; + +template + using gps_time = std::chrono::time_point; + +using gps_seconds = gps_time; + +template +inline +utc_time::type> +gps_clock::to_utc(const gps_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return utc_time{t.time_since_epoch()} + + (sys_days(year{1980}/January/Sunday[1]) - sys_days(year{1970}/January/1) + + seconds{9}); +} + +template +inline +gps_time::type> +gps_clock::from_utc(const utc_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return gps_time{t.time_since_epoch()} - + (sys_days(year{1980}/January/Sunday[1]) - sys_days(year{1970}/January/1) + + seconds{9}); +} + +inline +gps_clock::time_point +gps_clock::now() +{ + return from_utc(utc_clock::now()); +} + +template +inline +local_time::type> +gps_clock::to_local(const gps_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return local_time{t.time_since_epoch()} + + (local_days(year{1980}/January/Sunday[1]) - local_days(year{1970}/January/1)); +} + +template +inline +gps_time::type> +gps_clock::from_local(const local_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return gps_time{t.time_since_epoch()} - + (local_days(year{1980}/January/Sunday[1]) - local_days(year{1970}/January/1)); +} + + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const gps_time& t) +{ + const std::string abbrev("GPS"); + CONSTDATA std::chrono::seconds offset{0}; + return to_stream(os, fmt, gps_clock::to_local(t), &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const gps_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + gps_time& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + local_time lp; + from_stream(is, fmt, lp, abbrev, offset); + if (!is.fail()) + tp = gps_clock::from_local(lp); + return is; +} + +// clock_time_conversion + +template +struct clock_time_conversion +{}; + +template <> +struct clock_time_conversion +{ + template + CONSTCD14 + sys_time + operator()(const sys_time& st) const + { + return st; + } +}; + +template <> +struct clock_time_conversion +{ + template + CONSTCD14 + utc_time + operator()(const utc_time& ut) const + { + return ut; + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + local_time + operator()(const local_time& lt) const + { + return lt; + } +}; + +template <> +struct clock_time_conversion +{ + template + utc_time::type> + operator()(const sys_time& st) const + { + return utc_clock::from_sys(st); + } +}; + +template <> +struct clock_time_conversion +{ + template + sys_time::type> + operator()(const utc_time& ut) const + { + return utc_clock::to_sys(ut); + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + local_time + operator()(const sys_time& st) const + { + return local_time{st.time_since_epoch()}; + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + sys_time + operator()(const local_time& lt) const + { + return sys_time{lt.time_since_epoch()}; + } +}; + +template<> +struct clock_time_conversion +{ + template + utc_time::type> + operator()(const local_time& lt) const + { + return utc_clock::from_local(lt); + } +}; + +template<> +struct clock_time_conversion +{ + template + local_time::type> + operator()(const utc_time& ut) const + { + return utc_clock::to_local(ut); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + std::chrono::time_point + operator()(const std::chrono::time_point& tp) const + { + return tp; + } +}; + +namespace ctc_detail +{ + +template + using time_point = std::chrono::time_point; + +using std::declval; +using std::chrono::system_clock; + +//Check if TimePoint is time for given clock, +//if not emits hard error +template +struct return_clock_time +{ + using clock_time_point = time_point; + using type = TimePoint; + + static_assert(std::is_same::value, + "time point with appropariate clock shall be returned"); +}; + +// Check if Clock has to_sys method accepting TimePoint with given duration const& and +// returning sys_time. If so has nested type member equal to return type to_sys. +template +struct return_to_sys +{}; + +template +struct return_to_sys + < + Clock, Duration, + decltype(Clock::to_sys(declval const&>()), void()) + > + : return_clock_time + < + system_clock, + decltype(Clock::to_sys(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_from_sys +{}; + +template +struct return_from_sys + < + Clock, Duration, + decltype(Clock::from_sys(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_sys(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_to_utc +{}; + +template +struct return_to_utc + < + Clock, Duration, + decltype(Clock::to_utc(declval const&>()), void()) + > + : return_clock_time + < + utc_clock, + decltype(Clock::to_utc(declval const&>()))> +{}; + +// Similiar to above +template +struct return_from_utc +{}; + +template +struct return_from_utc + < + Clock, Duration, + decltype(Clock::from_utc(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_utc(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_to_local +{}; + +template +struct return_to_local + < + Clock, Duration, + decltype(Clock::to_local(declval const&>()), + void()) + > + : return_clock_time + < + local_t, + decltype(Clock::to_local(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_from_local +{}; + +template +struct return_from_local + < + Clock, Duration, + decltype(Clock::from_local(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_local(declval const&>())) + > +{}; + +} // namespace ctc_detail + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_sys::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_sys(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_sys::type + operator()(const sys_time& st) const + { + return DstClock::from_sys(st); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_utc::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_utc(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_utc::type + operator()(const utc_time& ut) const + { + return DstClock::from_utc(ut); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_local::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_local(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_local::type + operator()(const local_time& lt) const + { + return DstClock::from_local(lt); + } +}; + +namespace clock_cast_detail +{ + +template + using time_point = std::chrono::time_point; +using std::chrono::system_clock; + +template +CONSTCD14 +auto +conv_clock(const time_point& t) + -> decltype(std::declval>()(t)) +{ + return clock_time_conversion{}(t); +} + +//direct trait conversion, 1st candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const time_point*) + -> decltype(conv_clock(t)) +{ + return conv_clock(t); +} + +//conversion through sys, 2nd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const void*) + -> decltype(conv_clock(conv_clock(t))) +{ + return conv_clock(conv_clock(t)); +} + +//conversion through utc, 2nd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const void*) + -> decltype(0, // MSVC_WORKAROUND + conv_clock(conv_clock(t))) +{ + return conv_clock(conv_clock(t)); +} + +//conversion through sys and utc, 3rd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, ...) + -> decltype(conv_clock(conv_clock(conv_clock(t)))) +{ + return conv_clock(conv_clock(conv_clock(t))); +} + +//conversion through utc and sys, 3rd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, ...) + -> decltype(0, // MSVC_WORKAROUND + conv_clock(conv_clock(conv_clock(t)))) +{ + return conv_clock(conv_clock(conv_clock(t))); +} + +} // namespace clock_cast_detail + +template +CONSTCD14 +auto +clock_cast(const std::chrono::time_point& tp) + -> decltype(clock_cast_detail::cc_impl(tp, &tp)) +{ + return clock_cast_detail::cc_impl(tp, &tp); +} + +// Deprecated API + +template +inline +sys_time::type> +to_sys_time(const utc_time& t) +{ + return utc_clock::to_sys(t); +} + +template +inline +sys_time::type> +to_sys_time(const tai_time& t) +{ + return utc_clock::to_sys(tai_clock::to_utc(t)); +} + +template +inline +sys_time::type> +to_sys_time(const gps_time& t) +{ + return utc_clock::to_sys(gps_clock::to_utc(t)); +} + + +template +inline +utc_time::type> +to_utc_time(const sys_time& t) +{ + return utc_clock::from_sys(t); +} + +template +inline +utc_time::type> +to_utc_time(const tai_time& t) +{ + return tai_clock::to_utc(t); +} + +template +inline +utc_time::type> +to_utc_time(const gps_time& t) +{ + return gps_clock::to_utc(t); +} + + +template +inline +tai_time::type> +to_tai_time(const sys_time& t) +{ + return tai_clock::from_utc(utc_clock::from_sys(t)); +} + +template +inline +tai_time::type> +to_tai_time(const utc_time& t) +{ + return tai_clock::from_utc(t); +} + +template +inline +tai_time::type> +to_tai_time(const gps_time& t) +{ + return tai_clock::from_utc(gps_clock::to_utc(t)); +} + + +template +inline +gps_time::type> +to_gps_time(const sys_time& t) +{ + return gps_clock::from_utc(utc_clock::from_sys(t)); +} + +template +inline +gps_time::type> +to_gps_time(const utc_time& t) +{ + return gps_clock::from_utc(t); +} + +template +inline +gps_time::type> +to_gps_time(const tai_time& t) +{ + return gps_clock::from_utc(tai_clock::to_utc(t)); +} + +} // namespace date +} // namespace arrow_vendored + +#endif // TZ_H diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h new file mode 100644 index 0000000000000000000000000000000000000000..6b7a91493e1033a7c2808040ee65b72cef5efc6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h @@ -0,0 +1,319 @@ +#ifndef TZ_PRIVATE_H +#define TZ_PRIVATE_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016 Howard Hinnant +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +#include "tz.h" +#else +#include "date.h" +#include +#endif + +namespace arrow_vendored +{ +namespace date +{ + +namespace detail +{ + +#if !USE_OS_TZDB + +enum class tz {utc, local, standard}; + +//forward declare to avoid warnings in gcc 6.2 +class MonthDayTime; +std::istream& operator>>(std::istream& is, MonthDayTime& x); +std::ostream& operator<<(std::ostream& os, const MonthDayTime& x); + + +class MonthDayTime +{ +private: + struct pair + { +#if defined(_MSC_VER) && (_MSC_VER < 1900) + pair() : month_day_(date::jan / 1), weekday_(0U) {} + + pair(const date::month_day& month_day, const date::weekday& weekday) + : month_day_(month_day), weekday_(weekday) {} +#endif + + date::month_day month_day_; + date::weekday weekday_; + }; + + enum Type {month_day, month_last_dow, lteq, gteq}; + + Type type_{month_day}; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + union U +#else + struct U +#endif + { + date::month_day month_day_; + date::month_weekday_last month_weekday_last_; + pair month_day_weekday_; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + U() : month_day_{date::jan/1} {} +#else + U() : + month_day_(date::jan/1), + month_weekday_last_(date::month(0U), date::weekday_last(date::weekday(0U))) + {} + +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + + U& operator=(const date::month_day& x); + U& operator=(const date::month_weekday_last& x); + U& operator=(const pair& x); + } u; + + std::chrono::hours h_{0}; + std::chrono::minutes m_{0}; + std::chrono::seconds s_{0}; + tz zone_{tz::local}; + +public: + MonthDayTime() = default; + MonthDayTime(local_seconds tp, tz timezone); + MonthDayTime(const date::month_day& md, tz timezone); + + date::day day() const; + date::month month() const; + tz zone() const {return zone_;} + + void canonicalize(date::year y); + + sys_seconds + to_sys(date::year y, std::chrono::seconds offset, std::chrono::seconds save) const; + sys_days to_sys_days(date::year y) const; + + sys_seconds to_time_point(date::year y) const; + int compare(date::year y, const MonthDayTime& x, date::year yx, + std::chrono::seconds offset, std::chrono::minutes prev_save) const; + + friend std::istream& operator>>(std::istream& is, MonthDayTime& x); + friend std::ostream& operator<<(std::ostream& os, const MonthDayTime& x); +}; + +// A Rule specifies one or more set of datetimes without using an offset. +// Multiple dates are specified with multiple years. The years in effect +// go from starting_year_ to ending_year_, inclusive. starting_year_ <= +// ending_year_. save_ is in effect for times from the specified time +// onward, including the specified time. When the specified time is +// local, it uses the save_ from the chronologically previous Rule, or if +// there is none, 0. + +//forward declare to avoid warnings in gcc 6.2 +class Rule; +bool operator==(const Rule& x, const Rule& y); +bool operator<(const Rule& x, const Rule& y); +bool operator==(const Rule& x, const date::year& y); +bool operator<(const Rule& x, const date::year& y); +bool operator==(const date::year& x, const Rule& y); +bool operator<(const date::year& x, const Rule& y); +bool operator==(const Rule& x, const std::string& y); +bool operator<(const Rule& x, const std::string& y); +bool operator==(const std::string& x, const Rule& y); +bool operator<(const std::string& x, const Rule& y); +std::ostream& operator<<(std::ostream& os, const Rule& r); + +class Rule +{ +private: + std::string name_; + date::year starting_year_{0}; + date::year ending_year_{0}; + MonthDayTime starting_at_; + std::chrono::minutes save_{0}; + std::string abbrev_; + +public: + Rule() = default; + explicit Rule(const std::string& s); + Rule(const Rule& r, date::year starting_year, date::year ending_year); + + const std::string& name() const {return name_;} + const std::string& abbrev() const {return abbrev_;} + + const MonthDayTime& mdt() const {return starting_at_;} + const date::year& starting_year() const {return starting_year_;} + const date::year& ending_year() const {return ending_year_;} + const std::chrono::minutes& save() const {return save_;} + + static void split_overlaps(std::vector& rules); + + friend bool operator==(const Rule& x, const Rule& y); + friend bool operator<(const Rule& x, const Rule& y); + friend bool operator==(const Rule& x, const date::year& y); + friend bool operator<(const Rule& x, const date::year& y); + friend bool operator==(const date::year& x, const Rule& y); + friend bool operator<(const date::year& x, const Rule& y); + friend bool operator==(const Rule& x, const std::string& y); + friend bool operator<(const Rule& x, const std::string& y); + friend bool operator==(const std::string& x, const Rule& y); + friend bool operator<(const std::string& x, const Rule& y); + + friend std::ostream& operator<<(std::ostream& os, const Rule& r); + +private: + date::day day() const; + date::month month() const; + static void split_overlaps(std::vector& rules, std::size_t i, std::size_t& e); + static bool overlaps(const Rule& x, const Rule& y); + static void split(std::vector& rules, std::size_t i, std::size_t k, + std::size_t& e); +}; + +inline bool operator!=(const Rule& x, const Rule& y) {return !(x == y);} +inline bool operator> (const Rule& x, const Rule& y) {return y < x;} +inline bool operator<=(const Rule& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const Rule& y) {return !(x < y);} + +inline bool operator!=(const Rule& x, const date::year& y) {return !(x == y);} +inline bool operator> (const Rule& x, const date::year& y) {return y < x;} +inline bool operator<=(const Rule& x, const date::year& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const date::year& y) {return !(x < y);} + +inline bool operator!=(const date::year& x, const Rule& y) {return !(x == y);} +inline bool operator> (const date::year& x, const Rule& y) {return y < x;} +inline bool operator<=(const date::year& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const date::year& x, const Rule& y) {return !(x < y);} + +inline bool operator!=(const Rule& x, const std::string& y) {return !(x == y);} +inline bool operator> (const Rule& x, const std::string& y) {return y < x;} +inline bool operator<=(const Rule& x, const std::string& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const std::string& y) {return !(x < y);} + +inline bool operator!=(const std::string& x, const Rule& y) {return !(x == y);} +inline bool operator> (const std::string& x, const Rule& y) {return y < x;} +inline bool operator<=(const std::string& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const std::string& x, const Rule& y) {return !(x < y);} + +struct zonelet +{ + enum tag {has_rule, has_save, is_empty}; + + std::chrono::seconds gmtoff_; + tag tag_ = has_rule; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + union U +#else + struct U +#endif + { + std::string rule_; + std::chrono::minutes save_; + + ~U() {} + U() {} + U(const U&) {} + U& operator=(const U&) = delete; + } u; + + std::string format_; + date::year until_year_{0}; + MonthDayTime until_date_; + sys_seconds until_utc_; + local_seconds until_std_; + local_seconds until_loc_; + std::chrono::minutes initial_save_{0}; + std::string initial_abbrev_; + std::pair first_rule_{nullptr, date::year::min()}; + std::pair last_rule_{nullptr, date::year::max()}; + + ~zonelet(); + zonelet(); + zonelet(const zonelet& i); + zonelet& operator=(const zonelet&) = delete; +}; + +#else // USE_OS_TZDB + +struct ttinfo +{ + std::int32_t tt_gmtoff; + unsigned char tt_isdst; + unsigned char tt_abbrind; + unsigned char pad[2]; +}; + +static_assert(sizeof(ttinfo) == 8, ""); + +struct expanded_ttinfo +{ + std::chrono::seconds offset; + std::string abbrev; + bool is_dst; +}; + +struct transition +{ + sys_seconds timepoint; + const expanded_ttinfo* info; + + transition(sys_seconds tp, const expanded_ttinfo* i = nullptr) + : timepoint(tp) + , info(i) + {} + + friend + std::ostream& + operator<<(std::ostream& os, const transition& t) + { + using date::operator<<; + os << t.timepoint << "Z "; + if (t.info->offset >= std::chrono::seconds{0}) + os << '+'; + os << make_time(t.info->offset); + if (t.info->is_dst > 0) + os << " daylight "; + else + os << " standard "; + os << t.info->abbrev; + return os; + } +}; + +#endif // USE_OS_TZDB + +} // namespace detail + +} // namespace date +} // namespace arrow_vendored + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +#include "tz.h" +#endif + +#endif // TZ_PRIVATE_H diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..ae031238d85ac81f91bdaccc8c519c250a6fa056 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(ARROW_STATIC) +// intentially empty +#elif defined(ARROW_EXPORTING) +#define DATE_BUILD_DLL +#else +#define DATE_USE_DLL +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..f56239e8e88956a319aca3ec25fa48e0db4d6547 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h @@ -0,0 +1,86 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_ +#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +enum BignumDtoaMode { + // Return the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate but + // correct) 0.3. + BIGNUM_DTOA_SHORTEST, + // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats. + BIGNUM_DTOA_SHORTEST_SINGLE, + // Return a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + BIGNUM_DTOA_FIXED, + // Return a fixed number of digits, no matter what the exponent is. + BIGNUM_DTOA_PRECISION +}; + +// Converts the given double 'v' to ascii. +// The result should be interpreted as buffer * 10^(point-length). +// The buffer will be null-terminated. +// +// The input v must be > 0 and different from NaN, and Infinity. +// +// The output depends on the given mode: +// - SHORTEST: produce the least amount of digits for which the internal +// identity requirement is still satisfied. If the digits are printed +// (together with the correct exponent) then reading this number will give +// 'v' again. The buffer will choose the representation that is closest to +// 'v'. If there are two at the same distance, than the number is round up. +// In this mode the 'requested_digits' parameter is ignored. +// - FIXED: produces digits necessary to print a given number with +// 'requested_digits' digits after the decimal point. The produced digits +// might be too short in which case the caller has to fill the gaps with '0's. +// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. +// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns +// buffer="2", point=0. +// Note: the length of the returned buffer has no meaning wrt the significance +// of its digits. That is, just because it contains '0's does not mean that +// any other digit would not satisfy the internal identity requirement. +// - PRECISION: produces 'requested_digits' where the first digit is not '0'. +// Even though the length of produced digits usually equals +// 'requested_digits', the function is allowed to return fewer digits, in +// which case the caller has to fill the missing digits with '0's. +// Halfway cases are again rounded up. +// 'BignumDtoa' expects the given buffer to be big enough to hold all digits +// and a terminating null-character. +void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, + Vector buffer, int* length, int* point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h new file mode 100644 index 0000000000000000000000000000000000000000..0bedb63b188f16e632df75a4b8d4256eb98ba387 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h @@ -0,0 +1,154 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_BIGNUM_H_ +#define DOUBLE_CONVERSION_BIGNUM_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class Bignum { + public: + // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately. + // This bignum can encode much bigger numbers, since it contains an + // exponent. + static const int kMaxSignificantBits = 3584; + + Bignum() : used_bigits_(0), exponent_(0) {} + + void AssignUInt16(const uint16_t value); + void AssignUInt64(uint64_t value); + void AssignBignum(const Bignum& other); + + void AssignDecimalString(const Vector value); + void AssignHexString(const Vector value); + + void AssignPowerUInt16(uint16_t base, const int exponent); + + void AddUInt64(const uint64_t operand); + void AddBignum(const Bignum& other); + // Precondition: this >= other. + void SubtractBignum(const Bignum& other); + + void Square(); + void ShiftLeft(const int shift_amount); + void MultiplyByUInt32(const uint32_t factor); + void MultiplyByUInt64(const uint64_t factor); + void MultiplyByPowerOfTen(const int exponent); + void Times10() { return MultiplyByUInt32(10); } + // Pseudocode: + // int result = this / other; + // this = this % other; + // In the worst case this function is in O(this/other). + uint16_t DivideModuloIntBignum(const Bignum& other); + + bool ToHexString(char* buffer, const int buffer_size) const; + + // Returns + // -1 if a < b, + // 0 if a == b, and + // +1 if a > b. + static int Compare(const Bignum& a, const Bignum& b); + static bool Equal(const Bignum& a, const Bignum& b) { + return Compare(a, b) == 0; + } + static bool LessEqual(const Bignum& a, const Bignum& b) { + return Compare(a, b) <= 0; + } + static bool Less(const Bignum& a, const Bignum& b) { + return Compare(a, b) < 0; + } + // Returns Compare(a + b, c); + static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c); + // Returns a + b == c + static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) == 0; + } + // Returns a + b <= c + static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) <= 0; + } + // Returns a + b < c + static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) < 0; + } + private: + typedef uint32_t Chunk; + typedef uint64_t DoubleChunk; + + static const int kChunkSize = sizeof(Chunk) * 8; + static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8; + // With bigit size of 28 we loose some bits, but a double still fits easily + // into two chunks, and more importantly we can use the Comba multiplication. + static const int kBigitSize = 28; + static const Chunk kBigitMask = (1 << kBigitSize) - 1; + // Every instance allocates kBigitLength chunks on the stack. Bignums cannot + // grow. There are no checks if the stack-allocated space is sufficient. + static const int kBigitCapacity = kMaxSignificantBits / kBigitSize; + + static void EnsureCapacity(const int size) { + if (size > kBigitCapacity) { + DOUBLE_CONVERSION_UNREACHABLE(); + } + } + void Align(const Bignum& other); + void Clamp(); + bool IsClamped() const { + return used_bigits_ == 0 || RawBigit(used_bigits_ - 1) != 0; + } + void Zero() { + used_bigits_ = 0; + exponent_ = 0; + } + // Requires this to have enough capacity (no tests done). + // Updates used_bigits_ if necessary. + // shift_amount must be < kBigitSize. + void BigitsShiftLeft(const int shift_amount); + // BigitLength includes the "hidden" bigits encoded in the exponent. + int BigitLength() const { return used_bigits_ + exponent_; } + Chunk& RawBigit(const int index); + const Chunk& RawBigit(const int index) const; + Chunk BigitOrZero(const int index) const; + void SubtractTimes(const Bignum& other, const int factor); + + // The Bignum's value is value(bigits_buffer_) * 2^(exponent_ * kBigitSize), + // where the value of the buffer consists of the lower kBigitSize bits of + // the first used_bigits_ Chunks in bigits_buffer_, first chunk has lowest + // significant bits. + int16_t used_bigits_; + int16_t exponent_; + Chunk bigits_buffer_[kBigitCapacity]; + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Bignum); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_BIGNUM_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h new file mode 100644 index 0000000000000000000000000000000000000000..68fd82d8059957a5af0099382b10e0ac8a9bac58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h @@ -0,0 +1,66 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_ +#define DOUBLE_CONVERSION_CACHED_POWERS_H_ + +#include "diy-fp.h" + +namespace arrow_vendored { +namespace double_conversion { + +namespace PowersOfTenCache { + + // Not all powers of ten are cached. The decimal exponent of two neighboring + // cached numbers will differ by kDecimalExponentDistance. + static const int kDecimalExponentDistance = 8; + + static const int kMinDecimalExponent = -348; + static const int kMaxDecimalExponent = 340; + + // Returns a cached power-of-ten with a binary exponent in the range + // [min_exponent; max_exponent] (boundaries included). + void GetCachedPowerForBinaryExponentRange(int min_exponent, + int max_exponent, + DiyFp* power, + int* decimal_exponent); + + // Returns a cached power of ten x ~= 10^k such that + // k <= decimal_exponent < k + kCachedPowersDecimalDistance. + // The given decimal_exponent must satisfy + // kMinDecimalExponent <= requested_exponent, and + // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance. + void GetCachedPowerForDecimalExponent(int requested_exponent, + DiyFp* power, + int* found_exponent); + +} // namespace PowersOfTenCache + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h new file mode 100644 index 0000000000000000000000000000000000000000..f3367b9392a32cd41d3204009120c7654be866de --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h @@ -0,0 +1,139 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DIY_FP_H_ +#define DOUBLE_CONVERSION_DIY_FP_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// This "Do It Yourself Floating Point" class implements a floating-point number +// with a uint64 significand and an int exponent. Normalized DiyFp numbers will +// have the most significant bit of the significand set. +// Multiplication and Subtraction do not normalize their results. +// DiyFp store only non-negative numbers and are not designed to contain special +// doubles (NaN and Infinity). +class DiyFp { + public: + static const int kSignificandSize = 64; + + DiyFp() : f_(0), e_(0) {} + DiyFp(const uint64_t significand, const int32_t exponent) : f_(significand), e_(exponent) {} + + // this -= other. + // The exponents of both numbers must be the same and the significand of this + // must be greater or equal than the significand of other. + // The result will not be normalized. + void Subtract(const DiyFp& other) { + DOUBLE_CONVERSION_ASSERT(e_ == other.e_); + DOUBLE_CONVERSION_ASSERT(f_ >= other.f_); + f_ -= other.f_; + } + + // Returns a - b. + // The exponents of both numbers must be the same and a must be greater + // or equal than b. The result will not be normalized. + static DiyFp Minus(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Subtract(b); + return result; + } + + // this *= other. + void Multiply(const DiyFp& other) { + // Simply "emulates" a 128 bit multiplication. + // However: the resulting number only contains 64 bits. The least + // significant 64 bits are only used for rounding the most significant 64 + // bits. + const uint64_t kM32 = 0xFFFFFFFFU; + const uint64_t a = f_ >> 32; + const uint64_t b = f_ & kM32; + const uint64_t c = other.f_ >> 32; + const uint64_t d = other.f_ & kM32; + const uint64_t ac = a * c; + const uint64_t bc = b * c; + const uint64_t ad = a * d; + const uint64_t bd = b * d; + // By adding 1U << 31 to tmp we round the final result. + // Halfway cases will be rounded up. + const uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32) + (1U << 31); + e_ += other.e_ + 64; + f_ = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32); + } + + // returns a * b; + static DiyFp Times(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Multiply(b); + return result; + } + + void Normalize() { + DOUBLE_CONVERSION_ASSERT(f_ != 0); + uint64_t significand = f_; + int32_t exponent = e_; + + // This method is mainly called for normalizing boundaries. In general, + // boundaries need to be shifted by 10 bits, and we optimize for this case. + const uint64_t k10MSBits = DOUBLE_CONVERSION_UINT64_2PART_C(0xFFC00000, 00000000); + while ((significand & k10MSBits) == 0) { + significand <<= 10; + exponent -= 10; + } + while ((significand & kUint64MSB) == 0) { + significand <<= 1; + exponent--; + } + f_ = significand; + e_ = exponent; + } + + static DiyFp Normalize(const DiyFp& a) { + DiyFp result = a; + result.Normalize(); + return result; + } + + uint64_t f() const { return f_; } + int32_t e() const { return e_; } + + void set_f(uint64_t new_value) { f_ = new_value; } + void set_e(int32_t new_value) { e_ = new_value; } + + private: + static const uint64_t kUint64MSB = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000); + + uint64_t f_; + int32_t e_; +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DIY_FP_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h new file mode 100644 index 0000000000000000000000000000000000000000..6e8884d84ca56dbfd05964e463dd7999364b3b35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h @@ -0,0 +1,34 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ +#define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ + +#include "string-to-double.h" +#include "double-to-string.h" + +#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h new file mode 100644 index 0000000000000000000000000000000000000000..90a88b902d6ea12d3adf917cdbf9e63b818d71ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h @@ -0,0 +1,472 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ +#define DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class DoubleToStringConverter { + public: + // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint + // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the + // function returns false. + static const int kMaxFixedDigitsBeforePoint = 60; + static const int kMaxFixedDigitsAfterPoint = 100; + + // When calling ToExponential with a requested_digits + // parameter > kMaxExponentialDigits then the function returns false. + static const int kMaxExponentialDigits = 120; + + // When calling ToPrecision with a requested_digits + // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits + // then the function returns false. + static const int kMinPrecisionDigits = 1; + static const int kMaxPrecisionDigits = 120; + + // The maximal number of digits that are needed to emit a double in base 10. + // A higher precision can be achieved by using more digits, but the shortest + // accurate representation of any double will never use more digits than + // kBase10MaximalLength. + // Note that DoubleToAscii null-terminates its input. So the given buffer + // should be at least kBase10MaximalLength + 1 characters long. + static const int kBase10MaximalLength = 17; + + // The maximal number of digits that are needed to emit a single in base 10. + // A higher precision can be achieved by using more digits, but the shortest + // accurate representation of any single will never use more digits than + // kBase10MaximalLengthSingle. + static const int kBase10MaximalLengthSingle = 9; + + // The length of the longest string that 'ToShortest' can produce when the + // converter is instantiated with EcmaScript defaults (see + // 'EcmaScriptConverter') + // This value does not include the trailing '\0' character. + // This amount of characters is needed for negative values that hit the + // 'decimal_in_shortest_low' limit. For example: "-0.0000033333333333333333" + static const int kMaxCharsEcmaScriptShortest = 25; + + enum Flags { + NO_FLAGS = 0, + EMIT_POSITIVE_EXPONENT_SIGN = 1, + EMIT_TRAILING_DECIMAL_POINT = 2, + EMIT_TRAILING_ZERO_AFTER_POINT = 4, + UNIQUE_ZERO = 8, + NO_TRAILING_ZERO = 16, + EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL = 32, + EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL = 64 + }; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent + // form, emits a '+' for positive exponents. Example: 1.2e+2. + // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is + // converted into decimal format then a trailing decimal point is appended. + // Example: 2345.0 is converted to "2345.". + // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point + // emits a trailing '0'-character. This flag requires the + // EMIT_TRAILING_DECIMAL_POINT flag. + // Example: 2345.0 is converted to "2345.0". + // - UNIQUE_ZERO: "-0.0" is converted to "0.0". + // - NO_TRAILING_ZERO: Trailing zeros are removed from the fractional portion + // of the result in precision mode. Matches printf's %g. + // When EMIT_TRAILING_ZERO_AFTER_POINT is also given, one trailing zero is + // preserved. + // - EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL: when the input number has + // exactly one significant digit and is converted into exponent form then a + // trailing decimal point is appended to the significand in shortest mode + // or in precision mode with one requested digit. + // - EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL: in addition to a trailing + // decimal point emits a trailing '0'-character. This flag requires the + // EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag. + // + // Infinity symbol and nan_symbol provide the string representation for these + // special values. If the string is NULL and the special value is encountered + // then the conversion functions return false. + // + // The exponent_character is used in exponential representations. It is + // usually 'e' or 'E'. + // + // When converting to the shortest representation the converter will + // represent input numbers in decimal format if they are in the interval + // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[ + // (lower boundary included, greater boundary excluded). + // Example: with decimal_in_shortest_low = -6 and + // decimal_in_shortest_high = 21: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // When converting to precision mode the converter may add + // max_leading_padding_zeroes before returning the number in exponential + // format. + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarly the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + // + // When converting numbers with exactly one significant digit to exponent + // form in shortest mode or in precision mode with one requested digit, the + // EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT flags have + // no effect. Use the EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag to + // append a decimal point in this case and the + // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL flag to also append a + // '0'-character in this case. + // Example with decimal_in_shortest_low = 0: + // ToShortest(0.0009) -> "9e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL deactivated. + // ToShortest(0.0009) -> "9.e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated. + // ToShortest(0.0009) -> "9.0e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated and + // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL activated. + // + // The min_exponent_width is used for exponential representations. + // The converter adds leading '0's to the exponent until the exponent + // is at least min_exponent_width digits long. + // The min_exponent_width is clamped to 5. + // As such, the exponent may never have more than 5 digits in total. + DoubleToStringConverter(int flags, + const char* infinity_symbol, + const char* nan_symbol, + char exponent_character, + int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode, + int min_exponent_width = 0) + : flags_(flags), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol), + exponent_character_(exponent_character), + decimal_in_shortest_low_(decimal_in_shortest_low), + decimal_in_shortest_high_(decimal_in_shortest_high), + max_leading_padding_zeroes_in_precision_mode_( + max_leading_padding_zeroes_in_precision_mode), + max_trailing_padding_zeroes_in_precision_mode_( + max_trailing_padding_zeroes_in_precision_mode), + min_exponent_width_(min_exponent_width) { + // When 'trailing zero after the point' is set, then 'trailing point' + // must be set too. + DOUBLE_CONVERSION_ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) || + !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0)); + } + + // Returns a converter following the EcmaScript specification. + // + // Flags: UNIQUE_ZERO and EMIT_POSITIVE_EXPONENT_SIGN. + // Special values: "Infinity" and "NaN". + // Lower case 'e' for exponential values. + // decimal_in_shortest_low: -6 + // decimal_in_shortest_high: 21 + // max_leading_padding_zeroes_in_precision_mode: 6 + // max_trailing_padding_zeroes_in_precision_mode: 0 + static const DoubleToStringConverter& EcmaScriptConverter(); + + // Computes the shortest string of digits that correctly represent the input + // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high + // (see constructor) it then either returns a decimal representation, or an + // exponential representation. + // Example with decimal_in_shortest_low = -6, + // decimal_in_shortest_high = 21, + // EMIT_POSITIVE_EXPONENT_SIGN activated, and + // EMIT_TRAILING_DECIMAL_POINT deactivated: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // Note: the conversion may round the output if the returned string + // is accurate enough to uniquely identify the input-number. + // For example the most precise representation of the double 9e59 equals + // "899999999999999918767229449717619953810131273674690656206848", but + // the converter will return the shorter (but still correct) "9e59". + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except when the input value is special and no infinity_symbol or + // nan_symbol has been given to the constructor. + // + // The length of the longest result is the maximum of the length of the + // following string representations (each with possible examples): + // - NaN and negative infinity: "NaN", "-Infinity", "-inf". + // - -10^(decimal_in_shortest_high - 1): + // "-100000000000000000000", "-1000000000000000.0" + // - the longest string in range [0; -10^decimal_in_shortest_low]. Generally, + // this string is 3 + kBase10MaximalLength - decimal_in_shortest_low. + // (Sign, '0', decimal point, padding zeroes for decimal_in_shortest_low, + // and the significant digits). + // "-0.0000033333333333333333", "-0.0012345678901234567" + // - the longest exponential representation. (A negative number with + // kBase10MaximalLength significant digits). + // "-1.7976931348623157e+308", "-1.7976931348623157E308" + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToShortest(double value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST); + } + + // Same as ToShortest, but for single-precision floats. + bool ToShortestSingle(float value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE); + } + + + // Computes a decimal representation with a fixed number of digits after the + // decimal point. The last emitted digit is rounded. + // + // Examples: + // ToFixed(3.12, 1) -> "3.1" + // ToFixed(3.1415, 3) -> "3.142" + // ToFixed(1234.56789, 4) -> "1234.5679" + // ToFixed(1.23, 5) -> "1.23000" + // ToFixed(0.1, 4) -> "0.1000" + // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00" + // ToFixed(0.1, 30) -> "0.100000000000000005551115123126" + // ToFixed(0.1, 17) -> "0.10000000000000001" + // + // If requested_digits equals 0, then the tail of the result depends on + // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples, for requested_digits == 0, + // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be + // - false and false: then 123.45 -> 123 + // 0.678 -> 1 + // - true and false: then 123.45 -> 123. + // 0.678 -> 1. + // - true and true: then 123.45 -> 123.0 + // 0.678 -> 1.0 + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'value' > 10^kMaxFixedDigitsBeforePoint, or + // - 'requested_digits' > kMaxFixedDigitsAfterPoint. + // The last two conditions imply that the result for non-special values never + // contains more than + // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters + // (one additional character for the sign, and one for the decimal point). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToFixed(double value, + int requested_digits, + StringBuilder* result_builder) const; + + // Computes a representation in exponential format with requested_digits + // after the decimal point. The last emitted digit is rounded. + // If requested_digits equals -1, then the shortest exponential representation + // is computed. + // + // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and + // exponent_character set to 'e'. + // ToExponential(3.12, 1) -> "3.1e0" + // ToExponential(5.0, 3) -> "5.000e0" + // ToExponential(0.001, 2) -> "1.00e-3" + // ToExponential(3.1415, -1) -> "3.1415e0" + // ToExponential(3.1415, 4) -> "3.1415e0" + // ToExponential(3.1415, 3) -> "3.142e0" + // ToExponential(123456789000000, 3) -> "1.235e14" + // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30" + // ToExponential(1000000000000000019884624838656.0, 32) -> + // "1.00000000000000001988462483865600e30" + // ToExponential(1234, 0) -> "1e3" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'requested_digits' > kMaxExponentialDigits. + // + // The last condition implies that the result never contains more than + // kMaxExponentialDigits + 8 characters (the sign, the digit before the + // decimal point, the decimal point, the exponent character, the + // exponent's sign, and at most 3 exponent digits). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToExponential(double value, + int requested_digits, + StringBuilder* result_builder) const; + + + // Computes 'precision' leading digits of the given 'value' and returns them + // either in exponential or decimal format, depending on + // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the + // constructor). + // The last computed digit is rounded. + // + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarly the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no + // EMIT_TRAILING_ZERO_AFTER_POINT: + // ToPrecision(123450.0, 6) -> "123450" + // ToPrecision(123450.0, 5) -> "123450" + // ToPrecision(123450.0, 4) -> "123500" + // ToPrecision(123450.0, 3) -> "123000" + // ToPrecision(123450.0, 2) -> "1.2e5" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - precision < kMinPericisionDigits + // - precision > kMaxPrecisionDigits + // + // The last condition implies that the result never contains more than + // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the + // exponent character, the exponent's sign, and at most 3 exponent digits). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToPrecision(double value, + int precision, + StringBuilder* result_builder) const; + + enum DtoaMode { + // Produce the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate + // but correct) 0.3. + SHORTEST, + // Same as SHORTEST, but for single-precision floats. + SHORTEST_SINGLE, + // Produce a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + FIXED, + // Fixed number of digits (independent of the decimal point). + PRECISION + }; + + // Converts the given double 'v' to digit characters. 'v' must not be NaN, + // +Infinity, or -Infinity. In SHORTEST_SINGLE-mode this restriction also + // applies to 'v' after it has been casted to a single-precision float. That + // is, in this mode static_cast(v) must not be NaN, +Infinity or + // -Infinity. + // + // The result should be interpreted as buffer * 10^(point-length). + // + // The digits are written to the buffer in the platform's charset, which is + // often UTF-8 (with ASCII-range digits) but may be another charset, such + // as EBCDIC. + // + // The output depends on the given mode: + // - SHORTEST: produce the least amount of digits for which the internal + // identity requirement is still satisfied. If the digits are printed + // (together with the correct exponent) then reading this number will give + // 'v' again. The buffer will choose the representation that is closest to + // 'v'. If there are two at the same distance, than the one farther away + // from 0 is chosen (halfway cases - ending with 5 - are rounded up). + // In this mode the 'requested_digits' parameter is ignored. + // - SHORTEST_SINGLE: same as SHORTEST but with single-precision. + // - FIXED: produces digits necessary to print a given number with + // 'requested_digits' digits after the decimal point. The produced digits + // might be too short in which case the caller has to fill the remainder + // with '0's. + // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. + // Halfway cases are rounded towards +/-Infinity (away from 0). The call + // toFixed(0.15, 2) thus returns buffer="2", point=0. + // The returned buffer may contain digits that would be truncated from the + // shortest representation of the input. + // - PRECISION: produces 'requested_digits' where the first digit is not '0'. + // Even though the length of produced digits usually equals + // 'requested_digits', the function is allowed to return fewer digits, in + // which case the caller has to fill the missing digits with '0's. + // Halfway cases are again rounded away from 0. + // DoubleToAscii expects the given buffer to be big enough to hold all + // digits and a terminating null-character. In SHORTEST-mode it expects a + // buffer of at least kBase10MaximalLength + 1. In all other modes the + // requested_digits parameter and the padding-zeroes limit the size of the + // output. Don't forget the decimal point, the exponent character and the + // terminating null-character when computing the maximal output size. + // The given length is only used in debug mode to ensure the buffer is big + // enough. + static void DoubleToAscii(double v, + DtoaMode mode, + int requested_digits, + char* buffer, + int buffer_length, + bool* sign, + int* length, + int* point); + + private: + // Implementation for ToShortest and ToShortestSingle. + bool ToShortestIeeeNumber(double value, + StringBuilder* result_builder, + DtoaMode mode) const; + + // If the value is a special value (NaN or Infinity) constructs the + // corresponding string using the configured infinity/nan-symbol. + // If either of them is NULL or the value is not special then the + // function returns false. + bool HandleSpecialValues(double value, StringBuilder* result_builder) const; + // Constructs an exponential representation (i.e. 1.234e56). + // The given exponent assumes a decimal point after the first decimal digit. + void CreateExponentialRepresentation(const char* decimal_digits, + int length, + int exponent, + StringBuilder* result_builder) const; + // Creates a decimal representation (i.e 1234.5678). + void CreateDecimalRepresentation(const char* decimal_digits, + int length, + int decimal_point, + int digits_after_point, + StringBuilder* result_builder) const; + + const int flags_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + const char exponent_character_; + const int decimal_in_shortest_low_; + const int decimal_in_shortest_high_; + const int max_leading_padding_zeroes_in_precision_mode_; + const int max_trailing_padding_zeroes_in_precision_mode_; + const int min_exponent_width_; + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..ddd0f04dcf02c8222b24e8e5c55654e80d127684 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h @@ -0,0 +1,90 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_ +#define DOUBLE_CONVERSION_FAST_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +enum FastDtoaMode { + // Computes the shortest representation of the given input. The returned + // result will be the most accurate number of this length. Longer + // representations might be more accurate. + FAST_DTOA_SHORTEST, + // Same as FAST_DTOA_SHORTEST but for single-precision floats. + FAST_DTOA_SHORTEST_SINGLE, + // Computes a representation where the precision (number of digits) is + // given as input. The precision is independent of the decimal point. + FAST_DTOA_PRECISION +}; + +// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not +// include the terminating '\0' character. +static const int kFastDtoaMaximalLength = 17; +// Same for single-precision numbers. +static const int kFastDtoaMaximalSingleLength = 9; + +// Provides a decimal representation of v. +// The result should be interpreted as buffer * 10^(point - length). +// +// Precondition: +// * v must be a strictly positive finite double. +// +// Returns true if it succeeds, otherwise the result can not be trusted. +// There will be *length digits inside the buffer followed by a null terminator. +// If the function returns true and mode equals +// - FAST_DTOA_SHORTEST, then +// the parameter requested_digits is ignored. +// The result satisfies +// v == (double) (buffer * 10^(point - length)). +// The digits in the buffer are the shortest representation possible. E.g. +// if 0.099999999999 and 0.1 represent the same double then "1" is returned +// with point = 0. +// The last digit will be closest to the actual v. That is, even if several +// digits might correctly yield 'v' when read again, the buffer will contain +// the one closest to v. +// - FAST_DTOA_PRECISION, then +// the buffer contains requested_digits digits. +// the difference v - (buffer * 10^(point-length)) is closest to zero for +// all possible representations of requested_digits digits. +// If there are two values that are equally close, then FastDtoa returns +// false. +// For both modes the buffer must be large enough to hold the result. +bool FastDtoa(double d, + FastDtoaMode mode, + int requested_digits, + Vector buffer, + int* length, + int* decimal_point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_FAST_DTOA_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..cf2a59a9805193063997ae621e6e3367d3cc029d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h @@ -0,0 +1,58 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_ +#define DOUBLE_CONVERSION_FIXED_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// Produces digits necessary to print a given number with +// 'fractional_count' digits after the decimal point. +// The buffer must be big enough to hold the result plus one terminating null +// character. +// +// The produced digits might be too short in which case the caller has to fill +// the gaps with '0's. +// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and +// decimal_point = -2. +// Halfway cases are rounded towards +/-Infinity (away from 0). The call +// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0. +// The returned buffer may contain digits that would be truncated from the +// shortest representation of the input. +// +// This method only works for some parameters. If it can't handle the input it +// returns false. The output is null-terminated when the function succeeds. +bool FastFixedDtoa(double v, int fractional_count, + Vector buffer, int* length, int* decimal_point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h new file mode 100644 index 0000000000000000000000000000000000000000..4cedc0bee04e6470ce02d27b9390068e0ec0fcc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h @@ -0,0 +1,449 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_H_ +#define DOUBLE_CONVERSION_DOUBLE_H_ + +#include "diy-fp.h" + +namespace arrow_vendored { +namespace double_conversion { + +// We assume that doubles and uint64_t have the same endianness. +static uint64_t double_to_uint64(double d) { return BitCast(d); } +static double uint64_to_double(uint64_t d64) { return BitCast(d64); } +static uint32_t float_to_uint32(float f) { return BitCast(f); } +static float uint32_to_float(uint32_t d32) { return BitCast(d32); } + +// Helper functions for doubles. +class Double { + public: + static const uint64_t kSignMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000); + static const uint64_t kExponentMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000); + static const uint64_t kSignificandMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x000FFFFF, FFFFFFFF); + static const uint64_t kHiddenBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00100000, 00000000); + static const uint64_t kQuietNanBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00080000, 00000000); + static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit. + static const int kSignificandSize = 53; + static const int kExponentBias = 0x3FF + kPhysicalSignificandSize; + static const int kMaxExponent = 0x7FF - kExponentBias; + + Double() : d64_(0) {} + explicit Double(double d) : d64_(double_to_uint64(d)) {} + explicit Double(uint64_t d64) : d64_(d64) {} + explicit Double(DiyFp diy_fp) + : d64_(DiyFpToUint64(diy_fp)) {} + + // The value encoded by this Double must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + DOUBLE_CONVERSION_ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // The value encoded by this Double must be strictly greater than 0. + DiyFp AsNormalizedDiyFp() const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + uint64_t f = Significand(); + int e = Exponent(); + + // The current double could be a denormal. + while ((f & kHiddenBit) == 0) { + f <<= 1; + e--; + } + // Do the final shifts in one go. + f <<= DiyFp::kSignificandSize - kSignificandSize; + e -= DiyFp::kSignificandSize - kSignificandSize; + return DiyFp(f, e); + } + + // Returns the double's bit as uint64. + uint64_t AsUint64() const { + return d64_; + } + + // Returns the next greater double. Returns +infinity on input +infinity. + double NextDouble() const { + if (d64_ == kInfinity) return Double(kInfinity).value(); + if (Sign() < 0 && Significand() == 0) { + // -0.0 + return 0.0; + } + if (Sign() < 0) { + return Double(d64_ - 1).value(); + } else { + return Double(d64_ + 1).value(); + } + } + + double PreviousDouble() const { + if (d64_ == (kInfinity | kSignMask)) return -Infinity(); + if (Sign() < 0) { + return Double(d64_ + 1).value(); + } else { + if (Significand() == 0) return -0.0; + return Double(d64_ - 1).value(); + } + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint64_t d64 = AsUint64(); + int biased_e = + static_cast((d64 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint64_t Significand() const { + uint64_t d64 = AsUint64(); + uint64_t significand = d64 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the double is a denormal. + bool IsDenormal() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) != 0); + } + + bool IsQuietNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint64() & kQuietNanBit) == 0); +#else + return IsNan() && ((AsUint64() & kQuietNanBit) != 0); +#endif + } + + bool IsSignalingNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint64() & kQuietNanBit) != 0); +#else + return IsNan() && ((AsUint64() & kQuietNanBit) == 0); +#endif + } + + + bool IsInfinite() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) == 0); + } + + int Sign() const { + uint64_t d64 = AsUint64(); + return (d64 & kSignMask) == 0? 1: -1; + } + + // Precondition: the value encoded by this Double must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Double must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + double value() const { return uint64_to_double(d64_); } + + // Returns the significand size for a given order of magnitude. + // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude. + // This function returns the number of significant binary digits v will have + // once it's encoded into a double. In almost all cases this is equal to + // kSignificandSize. The only exceptions are denormals. They start with + // leading zeroes and their effective significand-size is hence smaller. + static int SignificandSizeForOrderOfMagnitude(int order) { + if (order >= (kDenormalExponent + kSignificandSize)) { + return kSignificandSize; + } + if (order <= kDenormalExponent) return 0; + return order - kDenormalExponent; + } + + static double Infinity() { + return Double(kInfinity).value(); + } + + static double NaN() { + return Double(kNaN).value(); + } + + private: + static const int kDenormalExponent = -kExponentBias + 1; + static const uint64_t kInfinity = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000); +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF7FFFF, FFFFFFFF); +#else + static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF80000, 00000000); +#endif + + + const uint64_t d64_; + + static uint64_t DiyFpToUint64(DiyFp diy_fp) { + uint64_t significand = diy_fp.f(); + int exponent = diy_fp.e(); + while (significand > kHiddenBit + kSignificandMask) { + significand >>= 1; + exponent++; + } + if (exponent >= kMaxExponent) { + return kInfinity; + } + if (exponent < kDenormalExponent) { + return 0; + } + while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) { + significand <<= 1; + exponent--; + } + uint64_t biased_exponent; + if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) { + biased_exponent = 0; + } else { + biased_exponent = static_cast(exponent + kExponentBias); + } + return (significand & kSignificandMask) | + (biased_exponent << kPhysicalSignificandSize); + } + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Double); +}; + +class Single { + public: + static const uint32_t kSignMask = 0x80000000; + static const uint32_t kExponentMask = 0x7F800000; + static const uint32_t kSignificandMask = 0x007FFFFF; + static const uint32_t kHiddenBit = 0x00800000; + static const uint32_t kQuietNanBit = 0x00400000; + static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit. + static const int kSignificandSize = 24; + + Single() : d32_(0) {} + explicit Single(float f) : d32_(float_to_uint32(f)) {} + explicit Single(uint32_t d32) : d32_(d32) {} + + // The value encoded by this Single must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + DOUBLE_CONVERSION_ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // Returns the single's bit as uint64. + uint32_t AsUint32() const { + return d32_; + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint32_t d32 = AsUint32(); + int biased_e = + static_cast((d32 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint32_t Significand() const { + uint32_t d32 = AsUint32(); + uint32_t significand = d32 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the single is a denormal. + bool IsDenormal() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) != 0); + } + + bool IsQuietNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint32() & kQuietNanBit) == 0); +#else + return IsNan() && ((AsUint32() & kQuietNanBit) != 0); +#endif + } + + bool IsSignalingNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint32() & kQuietNanBit) != 0); +#else + return IsNan() && ((AsUint32() & kQuietNanBit) == 0); +#endif + } + + + bool IsInfinite() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) == 0); + } + + int Sign() const { + uint32_t d32 = AsUint32(); + return (d32 & kSignMask) == 0? 1: -1; + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Single must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + // Precondition: the value encoded by this Single must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + float value() const { return uint32_to_float(d32_); } + + static float Infinity() { + return Single(kInfinity).value(); + } + + static float NaN() { + return Single(kNaN).value(); + } + + private: + static const int kExponentBias = 0x7F + kPhysicalSignificandSize; + static const int kDenormalExponent = -kExponentBias + 1; + static const int kMaxExponent = 0xFF - kExponentBias; + static const uint32_t kInfinity = 0x7F800000; +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + static const uint32_t kNaN = 0x7FBFFFFF; +#else + static const uint32_t kNaN = 0x7FC00000; +#endif + + const uint32_t d32_; + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Single); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DOUBLE_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h new file mode 100644 index 0000000000000000000000000000000000000000..83eb6fec5f44400cf9a81d45862c4bfd71ac52fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h @@ -0,0 +1,240 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ +#define DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class StringToDoubleConverter { + public: + // Enumeration for allowing octals and ignoring junk when converting + // strings to numbers. + enum Flags { + NO_FLAGS = 0, + ALLOW_HEX = 1, + ALLOW_OCTALS = 2, + ALLOW_TRAILING_JUNK = 4, + ALLOW_LEADING_SPACES = 8, + ALLOW_TRAILING_SPACES = 16, + ALLOW_SPACES_AFTER_SIGN = 32, + ALLOW_CASE_INSENSITIVITY = 64, + ALLOW_CASE_INSENSIBILITY = 64, // Deprecated + ALLOW_HEX_FLOATS = 128, + }; + + static const uc16 kNoSeparator = '\0'; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers. + // Ex: StringToDouble("0x1234") -> 4660.0 + // In StringToDouble("0x1234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK, + // the string will not be parsed as "0" followed by junk. + // + // - ALLOW_OCTALS: recognizes the prefix "0" for octals: + // If a sequence of octal digits starts with '0', then the number is + // read as octal integer. Octal numbers may only be integers. + // Ex: StringToDouble("01234") -> 668.0 + // StringToDouble("012349") -> 12349.0 // Not a sequence of octal + // // digits. + // In StringToDouble("01234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // In StringToDouble("01234e56") the characters "e56" are trailing + // junk, too. + // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of + // a double literal. + // - ALLOW_LEADING_SPACES: skip over leading whitespace, including spaces, + // new-lines, and tabs. + // - ALLOW_TRAILING_SPACES: ignore trailing whitespace. + // - ALLOW_SPACES_AFTER_SIGN: ignore whitespace after the sign. + // Ex: StringToDouble("- 123.2") -> -123.2. + // StringToDouble("+ 123.2") -> 123.2 + // - ALLOW_CASE_INSENSITIVITY: ignore case of characters for special values: + // infinity and nan. + // - ALLOW_HEX_FLOATS: allows hexadecimal float literals. + // This *must* start with "0x" and separate the exponent with "p". + // Examples: 0x1.2p3 == 9.0 + // 0x10.1p0 == 16.0625 + // ALLOW_HEX and ALLOW_HEX_FLOATS are indented. + // + // empty_string_value is returned when an empty string is given as input. + // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string + // containing only spaces is converted to the 'empty_string_value', too. + // + // junk_string_value is returned when + // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not + // part of a double-literal) is found. + // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a + // double literal. + // + // infinity_symbol and nan_symbol are strings that are used to detect + // inputs that represent infinity and NaN. They can be null, in which case + // they are ignored. + // The conversion routine first reads any possible signs. Then it compares the + // following character of the input-string with the first character of + // the infinity, and nan-symbol. If either matches, the function assumes, that + // a match has been found, and expects the following input characters to match + // the remaining characters of the special-value symbol. + // This means that the following restrictions apply to special-value symbols: + // - they must not start with signs ('+', or '-'), + // - they must not have the same first character. + // - they must not start with digits. + // + // If the separator character is not kNoSeparator, then that specific + // character is ignored when in between two valid digits of the significant. + // It is not allowed to appear in the exponent. + // It is not allowed to lead or trail the number. + // It is not allowed to appear twice next to each other. + // + // Examples: + // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = "infinity", + // nan_symbol = "nan": + // StringToDouble("0x1234") -> 4660.0. + // StringToDouble("0x1234K") -> 4660.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> NaN // junk_string_value. + // StringToDouble(" 1") -> NaN // junk_string_value. + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("-123.45") -> -123.45. + // StringToDouble("--123.45") -> NaN // junk_string_value. + // StringToDouble("123e45") -> 123e45. + // StringToDouble("123E45") -> 123e45. + // StringToDouble("123e+45") -> 123e45. + // StringToDouble("123E-45") -> 123e-45. + // StringToDouble("123e") -> 123.0 // trailing junk ignored. + // StringToDouble("123e-") -> 123.0 // trailing junk ignored. + // StringToDouble("+NaN") -> NaN // NaN string literal. + // StringToDouble("-infinity") -> -inf. // infinity literal. + // StringToDouble("Infinity") -> NaN // junk_string_value. + // + // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = NULL, + // nan_symbol = NULL: + // StringToDouble("0x1234") -> NaN // junk_string_value. + // StringToDouble("01234") -> 668.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> 0.0 // empty_string_value. + // StringToDouble(" 1") -> 1.0 + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("0123e45") -> NaN // junk_string_value. + // StringToDouble("01239E45") -> 1239e45. + // StringToDouble("-infinity") -> NaN // junk_string_value. + // StringToDouble("NaN") -> NaN // junk_string_value. + // + // flags = NO_FLAGS, + // separator = ' ': + // StringToDouble("1 2 3 4") -> 1234.0 + // StringToDouble("1 2") -> NaN // junk_string_value + // StringToDouble("1 000 000.0") -> 1000000.0 + // StringToDouble("1.000 000") -> 1.0 + // StringToDouble("1.0e1 000") -> NaN // junk_string_value + StringToDoubleConverter(int flags, + double empty_string_value, + double junk_string_value, + const char* infinity_symbol, + const char* nan_symbol, + uc16 separator = kNoSeparator) + : flags_(flags), + empty_string_value_(empty_string_value), + junk_string_value_(junk_string_value), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol), + separator_(separator) { + } + + // Performs the conversion. + // The output parameter 'processed_characters_count' is set to the number + // of characters that have been processed to read the number. + // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included + // in the 'processed_characters_count'. Trailing junk is never included. + double StringToDouble(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble above but for 16 bit characters. + double StringToDouble(const uc16* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble but reads a float. + // Note that this is not equivalent to static_cast(StringToDouble(...)) + // due to potential double-rounding. + float StringToFloat(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToFloat above but for 16 bit characters. + float StringToFloat(const uc16* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble for T = double, and StringToFloat for T = float. + template + T StringTo(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringTo above but for 16 bit characters. + template + T StringTo(const uc16* buffer, + int length, + int* processed_characters_count) const; + + private: + const int flags_; + const double empty_string_value_; + const double junk_string_value_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + const uc16 separator_; + + template + double StringToIeee(Iterator start_pointer, + int length, + bool read_as_double, + int* processed_characters_count) const; + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h new file mode 100644 index 0000000000000000000000000000000000000000..619db5838d2f75b6ea7b18ac3df458a029b70294 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h @@ -0,0 +1,66 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_STRTOD_H_ +#define DOUBLE_CONVERSION_STRTOD_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// The buffer must only contain digits in the range [0-9]. It must not +// contain a dot or a sign. It must not start with '0', and must not be empty. +double Strtod(Vector buffer, int exponent); + +// The buffer must only contain digits in the range [0-9]. It must not +// contain a dot or a sign. It must not start with '0', and must not be empty. +float Strtof(Vector buffer, int exponent); + +// Same as Strtod, but assumes that 'trimmed' is already trimmed, as if run +// through TrimAndCut. That is, 'trimmed' must have no leading or trailing +// zeros, must not be a lone zero, and must not have 'too many' digits. +double StrtodTrimmed(Vector trimmed, int exponent); + +// Same as Strtof, but assumes that 'trimmed' is already trimmed, as if run +// through TrimAndCut. That is, 'trimmed' must have no leading or trailing +// zeros, must not be a lone zero, and must not have 'too many' digits. +float StrtofTrimmed(Vector trimmed, int exponent); + +inline Vector TrimTrailingZeros(Vector buffer) { + for (int i = buffer.length() - 1; i >= 0; --i) { + if (buffer[i] != '0') { + return buffer.SubVector(0, i + 1); + } + } + return Vector(buffer.start(), 0); +} + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_STRTOD_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..332619a31270d709fda1c4f85248ae71546debde --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h @@ -0,0 +1,420 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_UTILS_H_ +#define DOUBLE_CONVERSION_UTILS_H_ + +// Use DOUBLE_CONVERSION_NON_PREFIXED_MACROS to get unprefixed macros as was +// the case in double-conversion releases prior to 3.1.6 + +#include +#include + +// For pre-C++11 compatibility +#if __cplusplus >= 201103L +#define DOUBLE_CONVERSION_NULLPTR nullptr +#else +#define DOUBLE_CONVERSION_NULLPTR NULL +#endif + +#include +#ifndef DOUBLE_CONVERSION_ASSERT +#define DOUBLE_CONVERSION_ASSERT(condition) \ + assert(condition) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ASSERT) +#define ASSERT DOUBLE_CONVERSION_ASSERT +#endif + +#ifndef DOUBLE_CONVERSION_UNIMPLEMENTED +#define DOUBLE_CONVERSION_UNIMPLEMENTED() (abort()) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNIMPLEMENTED) +#define UNIMPLEMENTED DOUBLE_CONVERSION_UNIMPLEMENTED +#endif + +#ifndef DOUBLE_CONVERSION_NO_RETURN +#ifdef _MSC_VER +#define DOUBLE_CONVERSION_NO_RETURN __declspec(noreturn) +#else +#define DOUBLE_CONVERSION_NO_RETURN __attribute__((noreturn)) +#endif +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(NO_RETURN) +#define NO_RETURN DOUBLE_CONVERSION_NO_RETURN +#endif + +#ifndef DOUBLE_CONVERSION_UNREACHABLE +#ifdef _MSC_VER +void DOUBLE_CONVERSION_NO_RETURN abort_noreturn(); +inline void abort_noreturn() { abort(); } +#define DOUBLE_CONVERSION_UNREACHABLE() (abort_noreturn()) +#else +#define DOUBLE_CONVERSION_UNREACHABLE() (abort()) +#endif +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNREACHABLE) +#define UNREACHABLE DOUBLE_CONVERSION_UNREACHABLE +#endif + +// Not all compilers support __has_attribute and combining a check for both +// ifdef and __has_attribute on the same preprocessor line isn't portable. +#ifdef __has_attribute +# define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) 0 +#endif + +#ifndef DOUBLE_CONVERSION_UNUSED +#if DOUBLE_CONVERSION_HAS_ATTRIBUTE(unused) +#define DOUBLE_CONVERSION_UNUSED __attribute__((unused)) +#else +#define DOUBLE_CONVERSION_UNUSED +#endif +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNUSED) +#define UNUSED DOUBLE_CONVERSION_UNUSED +#endif + +#if DOUBLE_CONVERSION_HAS_ATTRIBUTE(uninitialized) +#define DOUBLE_CONVERSION_STACK_UNINITIALIZED __attribute__((uninitialized)) +#else +#define DOUBLE_CONVERSION_STACK_UNINITIALIZED +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(STACK_UNINITIALIZED) +#define STACK_UNINITIALIZED DOUBLE_CONVERSION_STACK_UNINITIALIZED +#endif + +// Double operations detection based on target architecture. +// Linux uses a 80bit wide floating point stack on x86. This induces double +// rounding, which in turn leads to wrong results. +// An easy way to test if the floating-point operations are correct is to +// evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then +// the result is equal to 89255e-22. +// The best way to test this, is to create a division-function and to compare +// the output of the division with the expected result. (Inlining must be +// disabled.) +// On Linux,x86 89255e-22 != Div_double(89255.0/1e22) +// +// For example: +/* +// -- in div.c +double Div_double(double x, double y) { return x / y; } + +// -- in main.c +double Div_double(double x, double y); // Forward declaration. + +int main(int argc, char** argv) { + return Div_double(89255.0, 1e22) == 89255e-22; +} +*/ +// Run as follows ./main || echo "correct" +// +// If it prints "correct" then the architecture should be here, in the "correct" section. +#if defined(_M_X64) || defined(__x86_64__) || \ + defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__hppa__) || defined(__ia64__) || \ + defined(__mips__) || \ + defined(__loongarch__) || \ + defined(__nios2__) || defined(__ghs) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \ + defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ + defined(__sparc__) || defined(__sparc) || defined(__s390__) || \ + defined(__SH4__) || defined(__alpha__) || \ + defined(_MIPS_ARCH_MIPS32R2) || defined(__ARMEB__) ||\ + defined(__AARCH64EL__) || defined(__aarch64__) || defined(__AARCH64EB__) || \ + defined(__riscv) || defined(__e2k__) || \ + defined(__or1k__) || defined(__arc__) || defined(__ARC64__) || \ + defined(__microblaze__) || defined(__XTENSA__) || \ + defined(__EMSCRIPTEN__) || defined(__wasm32__) +#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 +#elif defined(__mc68000__) || \ + defined(__pnacl__) || defined(__native_client__) +#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#elif defined(_M_IX86) || defined(__i386__) || defined(__i386) +#if defined(_WIN32) +// Windows uses a 64bit wide floating point stack. +#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 +#else +#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#endif // _WIN32 +#else +#error Target architecture was not detected as supported by Double-Conversion. +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(CORRECT_DOUBLE_OPERATIONS) +#define CORRECT_DOUBLE_OPERATIONS DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#endif + +#if defined(_WIN32) && !defined(__MINGW32__) + +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; // NOLINT +typedef unsigned short uint16_t; // NOLINT +typedef int int32_t; +typedef unsigned int uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +// intptr_t and friends are defined in crtdefs.h through stdio.h. + +#else + +#include + +#endif + +typedef uint16_t uc16; + +// The following macro works on both 32 and 64-bit platforms. +// Usage: instead of writing 0x1234567890123456 +// write DOUBLE_CONVERSION_UINT64_2PART_C(0x12345678,90123456); +#define DOUBLE_CONVERSION_UINT64_2PART_C(a, b) (((static_cast(a) << 32) + 0x##b##u)) +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UINT64_2PART_C) +#define UINT64_2PART_C DOUBLE_CONVERSION_UINT64_2PART_C +#endif + +// The expression DOUBLE_CONVERSION_ARRAY_SIZE(a) is a compile-time constant of type +// size_t which represents the number of elements of the given +// array. You should only use DOUBLE_CONVERSION_ARRAY_SIZE on statically allocated +// arrays. +#ifndef DOUBLE_CONVERSION_ARRAY_SIZE +#define DOUBLE_CONVERSION_ARRAY_SIZE(a) \ + ((sizeof(a) / sizeof(*(a))) / \ + static_cast(!(sizeof(a) % sizeof(*(a))))) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ARRAY_SIZE) +#define ARRAY_SIZE DOUBLE_CONVERSION_ARRAY_SIZE +#endif + +// A macro to disallow the evil copy constructor and operator= functions +// This should be used in the private: declarations for a class +#ifndef DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN +#define DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_COPY_AND_ASSIGN) +#define DC_DISALLOW_COPY_AND_ASSIGN DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN +#endif + +// A macro to disallow all the implicit constructors, namely the +// default constructor, copy constructor and operator= functions. +// +// This should be used in the private: declarations for a class +// that wants to prevent anyone from instantiating it. This is +// especially useful for classes containing only static methods. +#ifndef DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS +#define DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ + TypeName(); \ + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_IMPLICIT_CONSTRUCTORS) +#define DC_DISALLOW_IMPLICIT_CONSTRUCTORS DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS +#endif + +namespace arrow_vendored { +namespace double_conversion { + +inline int StrLength(const char* string) { + size_t length = strlen(string); + DOUBLE_CONVERSION_ASSERT(length == static_cast(static_cast(length))); + return static_cast(length); +} + +// This is a simplified version of V8's Vector class. +template +class Vector { + public: + Vector() : start_(DOUBLE_CONVERSION_NULLPTR), length_(0) {} + Vector(T* data, int len) : start_(data), length_(len) { + DOUBLE_CONVERSION_ASSERT(len == 0 || (len > 0 && data != DOUBLE_CONVERSION_NULLPTR)); + } + + // Returns a vector using the same backing storage as this one, + // spanning from and including 'from', to but not including 'to'. + Vector SubVector(int from, int to) { + DOUBLE_CONVERSION_ASSERT(to <= length_); + DOUBLE_CONVERSION_ASSERT(from < to); + DOUBLE_CONVERSION_ASSERT(0 <= from); + return Vector(start() + from, to - from); + } + + // Returns the length of the vector. + int length() const { return length_; } + + // Returns whether or not the vector is empty. + bool is_empty() const { return length_ == 0; } + + // Returns the pointer to the start of the data in the vector. + T* start() const { return start_; } + + // Access individual vector elements - checks bounds in debug mode. + T& operator[](int index) const { + DOUBLE_CONVERSION_ASSERT(0 <= index && index < length_); + return start_[index]; + } + + T& first() { return start_[0]; } + + T& last() { return start_[length_ - 1]; } + + void pop_back() { + DOUBLE_CONVERSION_ASSERT(!is_empty()); + --length_; + } + + private: + T* start_; + int length_; +}; + + +// Helper class for building result strings in a character buffer. The +// purpose of the class is to use safe operations that checks the +// buffer bounds on all operations in debug mode. +class StringBuilder { + public: + StringBuilder(char* buffer, int buffer_size) + : buffer_(buffer, buffer_size), position_(0) { } + + ~StringBuilder() { if (!is_finalized()) Finalize(); } + + int size() const { return buffer_.length(); } + + // Get the current position in the builder. + int position() const { + DOUBLE_CONVERSION_ASSERT(!is_finalized()); + return position_; + } + + // Reset the position. + void Reset() { position_ = 0; } + + // Add a single character to the builder. It is not allowed to add + // 0-characters; use the Finalize() method to terminate the string + // instead. + void AddCharacter(char c) { + DOUBLE_CONVERSION_ASSERT(c != '\0'); + DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length()); + buffer_[position_++] = c; + } + + // Add an entire string to the builder. Uses strlen() internally to + // compute the length of the input string. + void AddString(const char* s) { + AddSubstring(s, StrLength(s)); + } + + // Add the first 'n' characters of the given string 's' to the + // builder. The input string must have enough characters. + void AddSubstring(const char* s, int n) { + DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ + n < buffer_.length()); + DOUBLE_CONVERSION_ASSERT(static_cast(n) <= strlen(s)); + memmove(&buffer_[position_], s, static_cast(n)); + position_ += n; + } + + + // Add character padding to the builder. If count is non-positive, + // nothing is added to the builder. + void AddPadding(char c, int count) { + for (int i = 0; i < count; i++) { + AddCharacter(c); + } + } + + // Finalize the string by 0-terminating it and returning the buffer. + char* Finalize() { + DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length()); + buffer_[position_] = '\0'; + // Make sure nobody managed to add a 0-character to the + // buffer while building the string. + DOUBLE_CONVERSION_ASSERT(strlen(buffer_.start()) == static_cast(position_)); + position_ = -1; + DOUBLE_CONVERSION_ASSERT(is_finalized()); + return buffer_.start(); + } + + private: + Vector buffer_; + int position_; + + bool is_finalized() const { return position_ < 0; } + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); +}; + +// The type-based aliasing rule allows the compiler to assume that pointers of +// different types (for some definition of different) never alias each other. +// Thus the following code does not work: +// +// float f = foo(); +// int fbits = *(int*)(&f); +// +// The compiler 'knows' that the int pointer can't refer to f since the types +// don't match, so the compiler may cache f in a register, leaving random data +// in fbits. Using C++ style casts makes no difference, however a pointer to +// char data is assumed to alias any other pointer. This is the 'memcpy +// exception'. +// +// Bit_cast uses the memcpy exception to move the bits from a variable of one +// type of a variable of another type. Of course the end result is likely to +// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005) +// will completely optimize BitCast away. +// +// There is an additional use for BitCast. +// Recent gccs will warn when they see casts that may result in breakage due to +// the type-based aliasing rule. If you have checked that there is no breakage +// you can use BitCast to cast one pointer type to another. This confuses gcc +// enough that it can no longer see that you have cast one pointer type to +// another thus avoiding the warning. +template +Dest BitCast(const Source& source) { + // Compile time assertion: sizeof(Dest) == sizeof(Source) + // A compile error here means your Dest and Source have different sizes. +#if __cplusplus >= 201103L + static_assert(sizeof(Dest) == sizeof(Source), + "source and destination size mismatch"); +#else + DOUBLE_CONVERSION_UNUSED + typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1]; +#endif + + Dest dest; + memmove(&dest, &source, sizeof(dest)); + return dest; +} + +template +Dest BitCast(Source* source) { + return BitCast(reinterpret_cast(source)); +} + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_UTILS_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp new file mode 100644 index 0000000000000000000000000000000000000000..36576cfa91d8c903576d56fb807f594f23a5bf7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp @@ -0,0 +1,649 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2017 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This file provides support code that is useful for random-number generation + * but not specific to the PCG generation scheme, including: + * - 128-bit int support for platforms where it isn't available natively + * - bit twiddling operations + * - I/O of 128-bit and 8-bit integers + * - Handling the evilness of SeedSeq + * - Support for efficiently producing random numbers less than a given + * bound + */ + +#ifndef PCG_EXTRAS_HPP_INCLUDED +#define PCG_EXTRAS_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __GNUC__ + #include +#endif + +/* + * Abstractions for compiler-specific directives + */ + +#ifdef __GNUC__ + #define PCG_NOINLINE __attribute__((noinline)) +#else + #define PCG_NOINLINE +#endif + +/* + * Some members of the PCG library use 128-bit math. When compiling on 64-bit + * platforms, both GCC and Clang provide 128-bit integer types that are ideal + * for the job. + * + * On 32-bit platforms (or with other compilers), we fall back to a C++ + * class that provides 128-bit unsigned integers instead. It may seem + * like we're reinventing the wheel here, because libraries already exist + * that support large integers, but most existing libraries provide a very + * generic multiprecision code, but here we're operating at a fixed size. + * Also, most other libraries are fairly heavyweight. So we use a direct + * implementation. Sadly, it's much slower than hand-coded assembly or + * direct CPU support. + * + */ +#if __SIZEOF_INT128__ && !PCG_FORCE_EMULATED_128BIT_MATH + namespace arrow_vendored { + namespace pcg_extras { + typedef __uint128_t pcg128_t; + } + } + #define PCG_128BIT_CONSTANT(high,low) \ + ((pcg_extras::pcg128_t(high) << 64) + low) +#else + #include "pcg_uint128.hpp" + namespace arrow_vendored { + namespace pcg_extras { + typedef pcg_extras::uint_x4 pcg128_t; + } + } + #define PCG_128BIT_CONSTANT(high,low) \ + pcg_extras::pcg128_t(high,low) + #define PCG_EMULATED_128BIT_MATH 1 +#endif + + +namespace arrow_vendored { +namespace pcg_extras { + +/* + * We often need to represent a "number of bits". When used normally, these + * numbers are never greater than 128, so an unsigned char is plenty. + * If you're using a nonstandard generator of a larger size, you can set + * PCG_BITCOUNT_T to have it define it as a larger size. (Some compilers + * might produce faster code if you set it to an unsigned int.) + */ + +#ifndef PCG_BITCOUNT_T + typedef uint8_t bitcount_t; +#else + typedef PCG_BITCOUNT_T bitcount_t; +#endif + +/* + * C++ requires us to be able to serialize RNG state by printing or reading + * it from a stream. Because we use 128-bit ints, we also need to be able + * ot print them, so here is code to do so. + * + * This code provides enough functionality to print 128-bit ints in decimal + * and zero-padded in hex. It's not a full-featured implementation. + */ + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, pcg128_t value) +{ + auto desired_base = out.flags() & out.basefield; + bool want_hex = desired_base == out.hex; + + if (want_hex) { + uint64_t highpart = uint64_t(value >> 64); + uint64_t lowpart = uint64_t(value); + auto desired_width = out.width(); + if (desired_width > 16) { + out.width(desired_width - 16); + } + if (highpart != 0 || desired_width > 16) + out << highpart; + CharT oldfill = '\0'; + if (highpart != 0) { + out.width(16); + oldfill = out.fill('0'); + } + auto oldflags = out.setf(decltype(desired_base){}, out.showbase); + out << lowpart; + out.setf(oldflags); + if (highpart != 0) { + out.fill(oldfill); + } + return out; + } + constexpr size_t MAX_CHARS_128BIT = 40; + + char buffer[MAX_CHARS_128BIT]; + char* pos = buffer+sizeof(buffer); + *(--pos) = '\0'; + constexpr auto BASE = pcg128_t(10ULL); + do { + auto div = value / BASE; + auto mod = uint32_t(value - (div * BASE)); + *(--pos) = '0' + char(mod); + value = div; + } while(value != pcg128_t(0ULL)); + return out << pos; +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, pcg128_t& value) +{ + typename std::basic_istream::sentry s(in); + + if (!s) + return in; + + constexpr auto BASE = pcg128_t(10ULL); + pcg128_t current(0ULL); + bool did_nothing = true; + bool overflow = false; + for(;;) { + CharT wide_ch = in.get(); + if (!in.good()) + break; + auto ch = in.narrow(wide_ch, '\0'); + if (ch < '0' || ch > '9') { + in.unget(); + break; + } + did_nothing = false; + pcg128_t digit(uint32_t(ch - '0')); + pcg128_t timesbase = current*BASE; + overflow = overflow || timesbase < current; + current = timesbase + digit; + overflow = overflow || current < digit; + } + + if (did_nothing || overflow) { + in.setstate(std::ios::failbit); + if (overflow) + current = ~pcg128_t(0ULL); + } + + value = current; + + return in; +} + +/* + * Likewise, if people use tiny rngs, we'll be serializing uint8_t. + * If we just used the provided IO operators, they'd read/write chars, + * not ints, so we need to define our own. We *can* redefine this operator + * here because we're in our own namespace. + */ + +template +std::basic_ostream& +operator<<(std::basic_ostream&out, uint8_t value) +{ + return out << uint32_t(value); +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, uint8_t& target) +{ + uint32_t value = 0xdecea5edU; + in >> value; + if (!in && value == 0xdecea5edU) + return in; + if (value > uint8_t(~0)) { + in.setstate(std::ios::failbit); + value = ~0U; + } + target = uint8_t(value); + return in; +} + +/* Unfortunately, the above functions don't get found in preference to the + * built in ones, so we create some more specific overloads that will. + * Ugh. + */ + +inline std::ostream& operator<<(std::ostream& out, uint8_t value) +{ + return pcg_extras::operator<< (out, value); +} + +inline std::istream& operator>>(std::istream& in, uint8_t& value) +{ + return pcg_extras::operator>> (in, value); +} + + + +/* + * Useful bitwise operations. + */ + +/* + * XorShifts are invertable, but they are someting of a pain to invert. + * This function backs them out. It's used by the whacky "inside out" + * generator defined later. + */ + +template +inline itype unxorshift(itype x, bitcount_t bits, bitcount_t shift) +{ + if (2*shift >= bits) { + return x ^ (x >> shift); + } + itype lowmask1 = (itype(1U) << (bits - shift*2)) - 1; + itype highmask1 = ~lowmask1; + itype top1 = x; + itype bottom1 = x & lowmask1; + top1 ^= top1 >> shift; + top1 &= highmask1; + x = top1 | bottom1; + itype lowmask2 = (itype(1U) << (bits - shift)) - 1; + itype bottom2 = x & lowmask2; + bottom2 = unxorshift(bottom2, bits - shift, shift); + bottom2 &= lowmask1; + return top1 | bottom2; +} + +/* + * Rotate left and right. + * + * In ideal world, compilers would spot idiomatic rotate code and convert it + * to a rotate instruction. Of course, opinions vary on what the correct + * idiom is and how to spot it. For clang, sometimes it generates better + * (but still crappy) code if you define PCG_USE_ZEROCHECK_ROTATE_IDIOM. + */ + +template +inline itype rotl(itype value, bitcount_t rot) +{ + constexpr bitcount_t bits = sizeof(itype) * 8; + constexpr bitcount_t mask = bits - 1; +#if PCG_USE_ZEROCHECK_ROTATE_IDIOM + return rot ? (value << rot) | (value >> (bits - rot)) : value; +#else + return (value << rot) | (value >> ((- rot) & mask)); +#endif +} + +template +inline itype rotr(itype value, bitcount_t rot) +{ + constexpr bitcount_t bits = sizeof(itype) * 8; + constexpr bitcount_t mask = bits - 1; +#if PCG_USE_ZEROCHECK_ROTATE_IDIOM + return rot ? (value >> rot) | (value << (bits - rot)) : value; +#else + return (value >> rot) | (value << ((- rot) & mask)); +#endif +} + +/* Unfortunately, both Clang and GCC sometimes perform poorly when it comes + * to properly recognizing idiomatic rotate code, so for we also provide + * assembler directives (enabled with PCG_USE_INLINE_ASM). Boo, hiss. + * (I hope that these compilers get better so that this code can die.) + * + * These overloads will be preferred over the general template code above. + */ +#if PCG_USE_INLINE_ASM && __GNUC__ && (__x86_64__ || __i386__) + +inline uint8_t rotr(uint8_t value, bitcount_t rot) +{ + asm ("rorb %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +inline uint16_t rotr(uint16_t value, bitcount_t rot) +{ + asm ("rorw %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +inline uint32_t rotr(uint32_t value, bitcount_t rot) +{ + asm ("rorl %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +#if __x86_64__ +inline uint64_t rotr(uint64_t value, bitcount_t rot) +{ + asm ("rorq %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} +#endif // __x86_64__ + +#elif defined(_MSC_VER) + // Use MSVC++ bit rotation intrinsics + +#pragma intrinsic(_rotr, _rotr64, _rotr8, _rotr16) + +inline uint8_t rotr(uint8_t value, bitcount_t rot) +{ + return _rotr8(value, rot); +} + +inline uint16_t rotr(uint16_t value, bitcount_t rot) +{ + return _rotr16(value, rot); +} + +inline uint32_t rotr(uint32_t value, bitcount_t rot) +{ + return _rotr(value, rot); +} + +inline uint64_t rotr(uint64_t value, bitcount_t rot) +{ + return _rotr64(value, rot); +} + +#endif // PCG_USE_INLINE_ASM + + +/* + * The C++ SeedSeq concept (modelled by seed_seq) can fill an array of + * 32-bit integers with seed data, but sometimes we want to produce + * larger or smaller integers. + * + * The following code handles this annoyance. + * + * uneven_copy will copy an array of 32-bit ints to an array of larger or + * smaller ints (actually, the code is general it only needing forward + * iterators). The copy is identical to the one that would be performed if + * we just did memcpy on a standard little-endian machine, but works + * regardless of the endian of the machine (or the weirdness of the ints + * involved). + * + * generate_to initializes an array of integers using a SeedSeq + * object. It is given the size as a static constant at compile time and + * tries to avoid memory allocation. If we're filling in 32-bit constants + * we just do it directly. If we need a separate buffer and it's small, + * we allocate it on the stack. Otherwise, we fall back to heap allocation. + * Ugh. + * + * generate_one produces a single value of some integral type using a + * SeedSeq object. + */ + + /* uneven_copy helper, case where destination ints are less than 32 bit. */ + +template +SrcIter uneven_copy_impl( + SrcIter src_first, DestIter dest_first, DestIter dest_last, + std::true_type) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr bitcount_t SRC_SIZE = sizeof(src_t); + constexpr bitcount_t DEST_SIZE = sizeof(dest_t); + constexpr bitcount_t DEST_BITS = DEST_SIZE * 8; + constexpr bitcount_t SCALE = SRC_SIZE / DEST_SIZE; + + size_t count = 0; + src_t value = 0; + + while (dest_first != dest_last) { + if ((count++ % SCALE) == 0) + value = *src_first++; // Get more bits + else + value >>= DEST_BITS; // Move down bits + + *dest_first++ = dest_t(value); // Truncates, ignores high bits. + } + return src_first; +} + + /* uneven_copy helper, case where destination ints are more than 32 bit. */ + +template +SrcIter uneven_copy_impl( + SrcIter src_first, DestIter dest_first, DestIter dest_last, + std::false_type) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr auto SRC_SIZE = sizeof(src_t); + constexpr auto SRC_BITS = SRC_SIZE * 8; + constexpr auto DEST_SIZE = sizeof(dest_t); + constexpr auto SCALE = (DEST_SIZE+SRC_SIZE-1) / SRC_SIZE; + + while (dest_first != dest_last) { + dest_t value(0UL); + unsigned int shift = 0; + + for (size_t i = 0; i < SCALE; ++i) { + value |= dest_t(*src_first++) << shift; + shift += SRC_BITS; + } + + *dest_first++ = value; + } + return src_first; +} + +/* uneven_copy, call the right code for larger vs. smaller */ + +template +inline SrcIter uneven_copy(SrcIter src_first, + DestIter dest_first, DestIter dest_last) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr bool DEST_IS_SMALLER = sizeof(dest_t) < sizeof(src_t); + + return uneven_copy_impl(src_first, dest_first, dest_last, + std::integral_constant{}); +} + +/* generate_to, fill in a fixed-size array of integral type using a SeedSeq + * (actually works for any random-access iterator) + */ + +template +inline void generate_to_impl(SeedSeq&& generator, DestIter dest, + std::true_type) +{ + generator.generate(dest, dest+size); +} + +template +void generate_to_impl(SeedSeq&& generator, DestIter dest, + std::false_type) +{ + typedef typename std::iterator_traits::value_type dest_t; + constexpr auto DEST_SIZE = sizeof(dest_t); + constexpr auto GEN_SIZE = sizeof(uint32_t); + + constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE; + constexpr size_t FROM_ELEMS = + GEN_IS_SMALLER + ? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE) + : (size + (GEN_SIZE / DEST_SIZE) - 1) + / ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER); + // this odd code ^^^^^^^^^^^^^^^^^ is work-around for + // a bug: http://llvm.org/bugs/show_bug.cgi?id=21287 + + if (FROM_ELEMS <= 1024) { + uint32_t buffer[FROM_ELEMS]; + generator.generate(buffer, buffer+FROM_ELEMS); + uneven_copy(buffer, dest, dest+size); + } else { + uint32_t* buffer = static_cast(malloc(GEN_SIZE * FROM_ELEMS)); + generator.generate(buffer, buffer+FROM_ELEMS); + uneven_copy(buffer, dest, dest+size); + free(static_cast(buffer)); + } +} + +template +inline void generate_to(SeedSeq&& generator, DestIter dest) +{ + typedef typename std::iterator_traits::value_type dest_t; + constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t); + + generate_to_impl(std::forward(generator), dest, + std::integral_constant{}); +} + +/* generate_one, produce a value of integral type using a SeedSeq + * (optionally, we can have it produce more than one and pick which one + * we want) + */ + +template +inline UInt generate_one(SeedSeq&& generator) +{ + UInt result[N]; + generate_to(std::forward(generator), result); + return result[i]; +} + +template +auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) + -> typename RngType::result_type +{ + typedef typename RngType::result_type rtype; + rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) + % upper_bound; + for (;;) { + rtype r = rng() - RngType::min(); + if (r >= threshold) + return r % upper_bound; + } +} + +template +void shuffle(Iter from, Iter to, RandType&& rng) +{ + typedef typename std::iterator_traits::difference_type delta_t; + typedef typename std::remove_reference::type::result_type result_t; + auto count = to - from; + while (count > 1) { + delta_t chosen = delta_t(bounded_rand(rng, result_t(count))); + --count; + --to; + using std::swap; + swap(*(from + chosen), *to); + } +} + +/* + * Although std::seed_seq is useful, it isn't everything. Often we want to + * initialize a random-number generator some other way, such as from a random + * device. + * + * Technically, it does not meet the requirements of a SeedSequence because + * it lacks some of the rarely-used member functions (some of which would + * be impossible to provide). However the C++ standard is quite specific + * that actual engines only called the generate method, so it ought not to be + * a problem in practice. + */ + +template +class seed_seq_from { +private: + RngType rng_; + + typedef uint_least32_t result_type; + +public: + template + seed_seq_from(Args&&... args) : + rng_(std::forward(args)...) + { + // Nothing (else) to do... + } + + template + void generate(Iter start, Iter finish) + { + for (auto i = start; i != finish; ++i) + *i = result_type(rng_()); + } + + constexpr size_t size() const + { + return (sizeof(typename RngType::result_type) > sizeof(result_type) + && RngType::max() > ~size_t(0UL)) + ? ~size_t(0UL) + : size_t(RngType::max()); + } +}; + +// Sometimes, when debugging or testing, it's handy to be able print the name +// of a (in human-readable form). This code allows the idiom: +// +// cout << printable_typename() +// +// to print out my_foo_type_t (or its concrete type if it is a synonym) + +#if __cpp_rtti || __GXX_RTTI + +template +struct printable_typename {}; + +template +std::ostream& operator<<(std::ostream& out, printable_typename) { + const char *implementation_typename = typeid(T).name(); +#ifdef __GNUC__ + int status; + char* pretty_name = + abi::__cxa_demangle(implementation_typename, nullptr, nullptr, &status); + if (status == 0) + out << pretty_name; + free(static_cast(pretty_name)); + if (status == 0) + return out; +#endif + out << implementation_typename; + return out; +} + +#endif // __cpp_rtti || __GXX_RTTI + +} // namespace pcg_extras +} // namespace arrow_vendored + +#endif // PCG_EXTRAS_HPP_INCLUDED diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp new file mode 100644 index 0000000000000000000000000000000000000000..e39e61e908a2a30863673fecd0f19e5090b40179 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp @@ -0,0 +1,1954 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2019 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This code provides the reference implementation of the PCG family of + * random number generators. The code is complex because it implements + * + * - several members of the PCG family, specifically members corresponding + * to the output functions: + * - XSH RR (good for 64-bit state, 32-bit output) + * - XSH RS (good for 64-bit state, 32-bit output) + * - XSL RR (good for 128-bit state, 64-bit output) + * - RXS M XS (statistically most powerful generator) + * - XSL RR RR (good for 128-bit state, 128-bit output) + * - and RXS, RXS M, XSH, XSL (mostly for testing) + * - at potentially *arbitrary* bit sizes + * - with four different techniques for random streams (MCG, one-stream + * LCG, settable-stream LCG, unique-stream LCG) + * - and the extended generation schemes allowing arbitrary periods + * - with all features of C++11 random number generation (and more), + * some of which are somewhat painful, including + * - initializing with a SeedSequence which writes 32-bit values + * to memory, even though the state of the generator may not + * use 32-bit values (it might use smaller or larger integers) + * - I/O for RNGs and a prescribed format, which needs to handle + * the issue that 8-bit and 128-bit integers don't have working + * I/O routines (e.g., normally 8-bit = char, not integer) + * - equality and inequality for RNGs + * - and a number of convenience typedefs to mask all the complexity + * + * The code employes a fairly heavy level of abstraction, and has to deal + * with various C++ minutia. If you're looking to learn about how the PCG + * scheme works, you're probably best of starting with one of the other + * codebases (see www.pcg-random.org). But if you're curious about the + * constants for the various output functions used in those other, simpler, + * codebases, this code shows how they are calculated. + * + * On the positive side, at least there are convenience typedefs so that you + * can say + * + * pcg32 myRNG; + * + * rather than: + * + * pcg_detail::engine< + * uint32_t, // Output Type + * uint64_t, // State Type + * pcg_detail::xsh_rr_mixin, true, // Output Func + * pcg_detail::specific_stream, // Stream Kind + * pcg_detail::default_multiplier // LCG Mult + * > myRNG; + * + */ + +#ifndef PCG_RAND_HPP_INCLUDED +#define PCG_RAND_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(disable:4146) +#endif + +#ifdef _MSC_VER + #define PCG_ALWAYS_INLINE __forceinline +#elif __GNUC__ + #define PCG_ALWAYS_INLINE __attribute__((always_inline)) +#else + #define PCG_ALWAYS_INLINE inline +#endif + +/* + * The pcg_extras namespace contains some support code that is likley to + * be useful for a variety of RNGs, including: + * - 128-bit int support for platforms where it isn't available natively + * - bit twiddling operations + * - I/O of 128-bit and 8-bit integers + * - Handling the evilness of SeedSeq + * - Support for efficiently producing random numbers less than a given + * bound + */ + +#include "pcg_extras.hpp" + +namespace arrow_vendored { +namespace pcg_detail { + +using namespace pcg_extras; + +/* + * The LCG generators need some constants to function. This code lets you + * look up the constant by *type*. For example + * + * default_multiplier::multiplier() + * + * gives you the default multipler for 32-bit integers. We use the name + * of the constant and not a generic word like value to allow these classes + * to be used as mixins. + */ + +template +struct default_multiplier { + // Not defined for an arbitrary type +}; + +template +struct default_increment { + // Not defined for an arbitrary type +}; + +#define PCG_DEFINE_CONSTANT(type, what, kind, constant) \ + template <> \ + struct what ## _ ## kind { \ + static constexpr type kind() { \ + return constant; \ + } \ + }; + +PCG_DEFINE_CONSTANT(uint8_t, default, multiplier, 141U) +PCG_DEFINE_CONSTANT(uint8_t, default, increment, 77U) + +PCG_DEFINE_CONSTANT(uint16_t, default, multiplier, 12829U) +PCG_DEFINE_CONSTANT(uint16_t, default, increment, 47989U) + +PCG_DEFINE_CONSTANT(uint32_t, default, multiplier, 747796405U) +PCG_DEFINE_CONSTANT(uint32_t, default, increment, 2891336453U) + +PCG_DEFINE_CONSTANT(uint64_t, default, multiplier, 6364136223846793005ULL) +PCG_DEFINE_CONSTANT(uint64_t, default, increment, 1442695040888963407ULL) + +PCG_DEFINE_CONSTANT(pcg128_t, default, multiplier, + PCG_128BIT_CONSTANT(2549297995355413924ULL,4865540595714422341ULL)) +PCG_DEFINE_CONSTANT(pcg128_t, default, increment, + PCG_128BIT_CONSTANT(6364136223846793005ULL,1442695040888963407ULL)) + +/* Alternative (cheaper) multipliers for 128-bit */ + +template +struct cheap_multiplier : public default_multiplier { + // For most types just use the default. +}; + +template <> +struct cheap_multiplier { + static constexpr uint64_t multiplier() { + return 0xda942042e4dd58b5ULL; + } +}; + + +/* + * Each PCG generator is available in four variants, based on how it applies + * the additive constant for its underlying LCG; the variations are: + * + * single stream - all instances use the same fixed constant, thus + * the RNG always somewhere in same sequence + * mcg - adds zero, resulting in a single stream and reduced + * period + * specific stream - the constant can be changed at any time, selecting + * a different random sequence + * unique stream - the constant is based on the memory address of the + * object, thus every RNG has its own unique sequence + * + * This variation is provided though mixin classes which define a function + * value called increment() that returns the nesessary additive constant. + */ + + + +/* + * unique stream + */ + + +template +class unique_stream { +protected: + static constexpr bool is_mcg = false; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + constexpr itype increment() const { + return itype(reinterpret_cast(this) | 1); + } + + constexpr itype stream() const + { + return increment() >> 1; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return (sizeof(itype) < sizeof(size_t) ? sizeof(itype) + : sizeof(size_t))*8 - 1u; + } + +protected: + constexpr unique_stream() = default; +}; + + +/* + * no stream (mcg) + */ + +template +class no_stream { +protected: + static constexpr bool is_mcg = true; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + static constexpr itype increment() { + return 0; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return 0u; + } + +protected: + constexpr no_stream() = default; +}; + + +/* + * single stream/sequence (oneseq) + */ + +template +class oneseq_stream : public default_increment { +protected: + static constexpr bool is_mcg = false; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + static constexpr itype stream() + { + return default_increment::increment() >> 1; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return 0u; + } + +protected: + constexpr oneseq_stream() = default; +}; + + +/* + * specific stream + */ + +template +class specific_stream { +protected: + static constexpr bool is_mcg = false; + + itype inc_ = default_increment::increment(); + +public: + typedef itype state_type; + typedef itype stream_state; + + constexpr itype increment() const { + return inc_; + } + + itype stream() + { + return inc_ >> 1; + } + + void set_stream(itype specific_seq) + { + inc_ = (specific_seq << 1) | 1; + } + + static constexpr bool can_specify_stream = true; + + static constexpr size_t streams_pow2() + { + return (sizeof(itype)*8) - 1u; + } + +protected: + specific_stream() = default; + + specific_stream(itype specific_seq) + : inc_(itype(specific_seq << 1) | itype(1U)) + { + // Nothing (else) to do. + } +}; + + +/* + * This is where it all comes together. This function joins together three + * mixin classes which define + * - the LCG additive constant (the stream) + * - the LCG multiplier + * - the output function + * in addition, we specify the type of the LCG state, and the result type, + * and whether to use the pre-advance version of the state for the output + * (increasing instruction-level parallelism) or the post-advance version + * (reducing register pressure). + * + * Given the high level of parameterization, the code has to use some + * template-metaprogramming tricks to handle some of the suble variations + * involved. + */ + +template , + typename multiplier_mixin = default_multiplier > +class engine : protected output_mixin, + public stream_mixin, + protected multiplier_mixin { +protected: + itype state_; + + struct can_specify_stream_tag {}; + struct no_specifiable_stream_tag {}; + + using stream_mixin::increment; + using multiplier_mixin::multiplier; + +public: + typedef xtype result_type; + typedef itype state_type; + + static constexpr size_t period_pow2() + { + return sizeof(state_type)*8 - 2*stream_mixin::is_mcg; + } + + // It would be nice to use std::numeric_limits for these, but + // we can't be sure that it'd be defined for the 128-bit types. + + static constexpr result_type min() + { + return result_type(0UL); + } + + static constexpr result_type max() + { + return result_type(~result_type(0UL)); + } + +protected: + itype bump(itype state) + { + return state * multiplier() + increment(); + } + + itype base_generate() + { + return state_ = bump(state_); + } + + itype base_generate0() + { + itype old_state = state_; + state_ = bump(state_); + return old_state; + } + +public: + result_type operator()() + { + if (output_previous) + return this->output(base_generate0()); + else + return this->output(base_generate()); + } + + result_type operator()(result_type upper_bound) + { + return bounded_rand(*this, upper_bound); + } + +protected: + static itype advance(itype state, itype delta, + itype cur_mult, itype cur_plus); + + static itype distance(itype cur_state, itype newstate, itype cur_mult, + itype cur_plus, itype mask = ~itype(0U)); + + itype distance(itype newstate, itype mask = itype(~itype(0U))) const + { + return distance(state_, newstate, multiplier(), increment(), mask); + } + +public: + void advance(itype delta) + { + state_ = advance(state_, delta, this->multiplier(), this->increment()); + } + + void backstep(itype delta) + { + advance(-delta); + } + + void discard(itype delta) + { + advance(delta); + } + + bool wrapped() + { + if (stream_mixin::is_mcg) { + // For MCGs, the low order two bits never change. In this + // implementation, we keep them fixed at 3 to make this test + // easier. + return state_ == 3; + } else { + return state_ == 0; + } + } + + engine(itype state = itype(0xcafef00dd15ea5e5ULL)) + : state_(this->is_mcg ? state|state_type(3U) + : bump(state + this->increment())) + { + // Nothing else to do. + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + engine(itype state, typename sm::stream_state stream_seed) + : stream_mixin(stream_seed), + state_(this->is_mcg ? state|state_type(3U) + : bump(state + this->increment())) + { + // Nothing else to do. + } + + template + engine(SeedSeq&& seedSeq, typename std::enable_if< + !stream_mixin::can_specify_stream + && !std::is_convertible::value + && !std::is_convertible::value, + no_specifiable_stream_tag>::type = {}) + : engine(generate_one(std::forward(seedSeq))) + { + // Nothing else to do. + } + + template + engine(SeedSeq&& seedSeq, typename std::enable_if< + stream_mixin::can_specify_stream + && !std::is_convertible::value + && !std::is_convertible::value, + can_specify_stream_tag>::type = {}) + { + itype seeddata[2]; + generate_to<2>(std::forward(seedSeq), seeddata); + seed(seeddata[1], seeddata[0]); + } + + + template + void seed(Args&&... args) + { + new (this) engine(std::forward(args)...); + } + + template + friend bool operator==(const engine&, + const engine&); + + template + friend itype1 operator-(const engine&, + const engine&); + + template + friend std::basic_ostream& + operator<<(std::basic_ostream& out, + const engine&); + + template + friend std::basic_istream& + operator>>(std::basic_istream& in, + engine& rng); +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, + const engine& rng) +{ + using pcg_extras::operator<<; + + auto orig_flags = out.flags(std::ios_base::dec | std::ios_base::left); + auto space = out.widen(' '); + auto orig_fill = out.fill(); + + out << rng.multiplier() << space + << rng.increment() << space + << rng.state_; + + out.flags(orig_flags); + out.fill(orig_fill); + return out; +} + + +template +std::basic_istream& +operator>>(std::basic_istream& in, + engine& rng) +{ + using pcg_extras::operator>>; + + auto orig_flags = in.flags(std::ios_base::dec | std::ios_base::skipws); + + itype multiplier, increment, state; + in >> multiplier >> increment >> state; + + if (!in.fail()) { + bool good = true; + if (multiplier != rng.multiplier()) { + good = false; + } else if (rng.can_specify_stream) { + rng.set_stream(increment >> 1); + } else if (increment != rng.increment()) { + good = false; + } + if (good) { + rng.state_ = state; + } else { + in.clear(std::ios::failbit); + } + } + + in.flags(orig_flags); + return in; +} + + +template +itype engine::advance( + itype state, itype delta, itype cur_mult, itype cur_plus) +{ + // The method used here is based on Brown, "Random Number Generation + // with Arbitrary Stride,", Transactions of the American Nuclear + // Society (Nov. 1994). The algorithm is very similar to fast + // exponentiation. + // + // Even though delta is an unsigned integer, we can pass a + // signed integer to go backwards, it just goes "the long way round". + + constexpr itype ZERO = 0u; // itype may be a non-trivial types, so + constexpr itype ONE = 1u; // we define some ugly constants. + itype acc_mult = 1; + itype acc_plus = 0; + while (delta > ZERO) { + if (delta & ONE) { + acc_mult *= cur_mult; + acc_plus = acc_plus*cur_mult + cur_plus; + } + cur_plus = (cur_mult+ONE)*cur_plus; + cur_mult *= cur_mult; + delta >>= 1; + } + return acc_mult * state + acc_plus; +} + +template +itype engine::distance( + itype cur_state, itype newstate, itype cur_mult, itype cur_plus, itype mask) +{ + constexpr itype ONE = 1u; // itype could be weird, so use constant + bool is_mcg = cur_plus == itype(0); + itype the_bit = is_mcg ? itype(4u) : itype(1u); + itype distance = 0u; + while ((cur_state & mask) != (newstate & mask)) { + if ((cur_state & the_bit) != (newstate & the_bit)) { + cur_state = cur_state * cur_mult + cur_plus; + distance |= the_bit; + } + assert((cur_state & the_bit) == (newstate & the_bit)); + the_bit <<= 1; + cur_plus = (cur_mult+ONE)*cur_plus; + cur_mult *= cur_mult; + } + return is_mcg ? distance >> 2 : distance; +} + +template +itype operator-(const engine& lhs, + const engine& rhs) +{ + static_assert( + std::is_same::value && + std::is_same::value, + "Incomparable generators"); + if (lhs.increment() == rhs.increment()) { + return rhs.distance(lhs.state_); + } else { + constexpr itype ONE = 1u; + itype lhs_diff = lhs.increment() + (lhs.multiplier()-ONE) * lhs.state_; + itype rhs_diff = rhs.increment() + (rhs.multiplier()-ONE) * rhs.state_; + if ((lhs_diff & itype(3u)) != (rhs_diff & itype(3u))) { + rhs_diff = -rhs_diff; + } + return rhs.distance(rhs_diff, lhs_diff, rhs.multiplier(), itype(0u)); + } +} + + +template +bool operator==(const engine& lhs, + const engine& rhs) +{ + return (lhs.multiplier() == rhs.multiplier()) + && (lhs.increment() == rhs.increment()) + && (lhs.state_ == rhs.state_); +} + +template +inline bool operator!=(const engine& lhs, + const engine& rhs) +{ + return !operator==(lhs,rhs); +} + + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using oneseq_base = engine, output_previous, + oneseq_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using unique_base = engine, output_previous, + unique_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using setseq_base = engine, output_previous, + specific_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using mcg_base = engine, output_previous, + no_stream, + multiplier_mixin >; + +/* + * OUTPUT FUNCTIONS. + * + * These are the core of the PCG generation scheme. They specify how to + * turn the base LCG's internal state into the output value of the final + * generator. + * + * They're implemented as mixin classes. + * + * All of the classes have code that is written to allow it to be applied + * at *arbitrary* bit sizes, although in practice they'll only be used at + * standard sizes supported by C++. + */ + +/* + * XSH RS -- high xorshift, followed by a random shift + * + * Fast. A good performer. + */ + +template +struct xsh_rs_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t opbits = + sparebits-5 >= 64 ? 5 + : sparebits-4 >= 32 ? 4 + : sparebits-3 >= 16 ? 3 + : sparebits-2 >= 4 ? 2 + : sparebits-1 >= 1 ? 1 + : 0; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t maxrandshift = mask; + constexpr bitcount_t topspare = opbits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = topspare + (xtypebits+maxrandshift)/2; + bitcount_t rshift = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> xshift; + xtype result = xtype(internal >> (bottomspare - maxrandshift + rshift)); + return result; + } +}; + +/* + * XSH RR -- high xorshift, followed by a random rotate + * + * Fast. A good performer. Slightly better statistically than XSH RS. + */ + +template +struct xsh_rr_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype)*8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t wantedopbits = + xtypebits >= 128 ? 7 + : xtypebits >= 64 ? 6 + : xtypebits >= 32 ? 5 + : xtypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = + sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = opbits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits)/2; + bitcount_t rot = opbits ? bitcount_t(internal >> (bits - opbits)) & mask + : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + xtype result = xtype(internal >> bottomspare); + result = rotr(result, amprot); + return result; + } +}; + +/* + * RXS -- random xorshift + */ + +template +struct rxs_mixin { +static xtype output_rxs(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype)*8); + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t extrashift = (xtypebits - shift)/2; + bitcount_t rshift = shift > 64+8 ? (internal >> (bits - 6)) & 63 + : shift > 32+4 ? (internal >> (bits - 5)) & 31 + : shift > 16+2 ? (internal >> (bits - 4)) & 15 + : shift > 8+1 ? (internal >> (bits - 3)) & 7 + : shift > 4+1 ? (internal >> (bits - 2)) & 3 + : shift > 2+1 ? (internal >> (bits - 1)) & 1 + : 0; + internal ^= internal >> (shift + extrashift - rshift); + xtype result = internal >> rshift; + return result; + } +}; + +/* + * RXS M XS -- random xorshift, mcg multiply, fixed xorshift + * + * The most statistically powerful generator, but all those steps + * make it slower than some of the others. We give it the rottenest jobs. + * + * Because it's usually used in contexts where the state type and the + * result type are the same, it is a permutation and is thus invertable. + * We thus provide a function to invert it. This function is used to + * for the "inside out" generator used by the extended generator. + */ + +/* Defined type-based concepts for the multiplication step. They're actually + * all derived by truncating the 128-bit, which was computed to be a good + * "universal" constant. + */ + +template +struct mcg_multiplier { + // Not defined for an arbitrary type +}; + +template +struct mcg_unmultiplier { + // Not defined for an arbitrary type +}; + +PCG_DEFINE_CONSTANT(uint8_t, mcg, multiplier, 217U) +PCG_DEFINE_CONSTANT(uint8_t, mcg, unmultiplier, 105U) + +PCG_DEFINE_CONSTANT(uint16_t, mcg, multiplier, 62169U) +PCG_DEFINE_CONSTANT(uint16_t, mcg, unmultiplier, 28009U) + +PCG_DEFINE_CONSTANT(uint32_t, mcg, multiplier, 277803737U) +PCG_DEFINE_CONSTANT(uint32_t, mcg, unmultiplier, 2897767785U) + +PCG_DEFINE_CONSTANT(uint64_t, mcg, multiplier, 12605985483714917081ULL) +PCG_DEFINE_CONSTANT(uint64_t, mcg, unmultiplier, 15009553638781119849ULL) + +PCG_DEFINE_CONSTANT(pcg128_t, mcg, multiplier, + PCG_128BIT_CONSTANT(17766728186571221404ULL, 12605985483714917081ULL)) +PCG_DEFINE_CONSTANT(pcg128_t, mcg, unmultiplier, + PCG_128BIT_CONSTANT(14422606686972528997ULL, 15009553638781119849ULL)) + + +template +struct rxs_m_xs_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = xtypebits >= 128 ? 6 + : xtypebits >= 64 ? 5 + : xtypebits >= 32 ? 4 + : xtypebits >= 16 ? 3 + : 2; + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t mask = (1 << opbits) - 1; + bitcount_t rshift = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> (opbits + rshift); + internal *= mcg_multiplier::multiplier(); + xtype result = internal >> shift; + result ^= result >> ((2U*xtypebits+2U)/3U); + return result; + } + + static itype unoutput(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = bits >= 128 ? 6 + : bits >= 64 ? 5 + : bits >= 32 ? 4 + : bits >= 16 ? 3 + : 2; + constexpr bitcount_t mask = (1 << opbits) - 1; + + internal = unxorshift(internal, bits, (2U*bits+2U)/3U); + + internal *= mcg_unmultiplier::unmultiplier(); + + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + internal = unxorshift(internal, bits, opbits + rshift); + + return internal; + } +}; + + +/* + * RXS M -- random xorshift, mcg multiply + */ + +template +struct rxs_m_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = xtypebits >= 128 ? 6 + : xtypebits >= 64 ? 5 + : xtypebits >= 32 ? 4 + : xtypebits >= 16 ? 3 + : 2; + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t mask = (1 << opbits) - 1; + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> (opbits + rshift); + internal *= mcg_multiplier::multiplier(); + xtype result = internal >> shift; + return result; + } +}; + + +/* + * DXSM -- double xorshift multiply + * + * This is a new, more powerful output permutation (added in 2019). It's + * a more comprehensive scrambling than RXS M, but runs faster on 128-bit + * types. Although primarily intended for use at large sizes, also works + * at smaller sizes as well. + * + * This permutation is similar to xorshift multiply hash functions, except + * that one of the multipliers is the LCG multiplier (to avoid needing to + * have a second constant) and the other is based on the low-order bits. + * This latter aspect means that the scrambling applied to the high bits + * depends on the low bits, and makes it (to my eye) impractical to back + * out the permutation without having the low-order bits. + */ + +template +struct dxsm_mixin { + inline xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t itypebits = bitcount_t(sizeof(itype) * 8); + static_assert(xtypebits <= itypebits/2, + "Output type must be half the size of the state type."); + + xtype hi = xtype(internal >> (itypebits - xtypebits)); + xtype lo = xtype(internal); + + lo |= 1; + hi ^= hi >> (xtypebits/2); + hi *= xtype(cheap_multiplier::multiplier()); + hi ^= hi >> (3*(xtypebits/4)); + hi *= lo; + return hi; + } +}; + + +/* + * XSL RR -- fixed xorshift (to low bits), random rotate + * + * Useful for 128-bit types that are split across two CPU registers. + */ + +template +struct xsl_rr_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t wantedopbits = xtypebits >= 128 ? 7 + : xtypebits >= 64 ? 6 + : xtypebits >= 32 ? 5 + : xtypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + bitcount_t rot = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + xtype result = xtype(internal >> bottomspare); + result = rotr(result, amprot); + return result; + } +}; + + +/* + * XSL RR RR -- fixed xorshift (to low bits), random rotate (both parts) + * + * Useful for 128-bit types that are split across two CPU registers. + * If you really want an invertable 128-bit RNG, I guess this is the one. + */ + +template struct halfsize_trait {}; +template <> struct halfsize_trait { typedef uint64_t type; }; +template <> struct halfsize_trait { typedef uint32_t type; }; +template <> struct halfsize_trait { typedef uint16_t type; }; +template <> struct halfsize_trait { typedef uint8_t type; }; + +template +struct xsl_rr_rr_mixin { + typedef typename halfsize_trait::type htype; + + static itype output(itype internal) + { + constexpr bitcount_t htypebits = bitcount_t(sizeof(htype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - htypebits; + constexpr bitcount_t wantedopbits = htypebits >= 128 ? 7 + : htypebits >= 64 ? 6 + : htypebits >= 32 ? 5 + : htypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t xshift = (topspare + htypebits) / 2; + + bitcount_t rot = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + htype lowbits = htype(internal); + lowbits = rotr(lowbits, amprot); + htype highbits = htype(internal >> topspare); + bitcount_t rot2 = lowbits & mask; + bitcount_t amprot2 = (rot2 << amplifier) & mask; + highbits = rotr(highbits, amprot2); + return (itype(highbits) << topspare) ^ itype(lowbits); + } +}; + + +/* + * XSH -- fixed xorshift (to high bits) + * + * You shouldn't use this at 64-bits or less. + */ + +template +struct xsh_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t topspare = 0; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + internal ^= internal >> xshift; + xtype result = internal >> bottomspare; + return result; + } +}; + +/* + * XSL -- fixed xorshift (to low bits) + * + * You shouldn't use this at 64-bits or less. + */ + +template +struct xsl_mixin { + inline xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + internal ^= internal >> xshift; + xtype result = internal >> bottomspare; + return result; + } +}; + + +/* ---- End of Output Functions ---- */ + + +template +struct inside_out : private baseclass { + inside_out() = delete; + + typedef typename baseclass::result_type result_type; + typedef typename baseclass::state_type state_type; + static_assert(sizeof(result_type) == sizeof(state_type), + "Require a RNG whose output function is a permutation"); + + static bool external_step(result_type& randval, size_t i) + { + state_type state = baseclass::unoutput(randval); + state = state * baseclass::multiplier() + baseclass::increment() + + state_type(i*2); + result_type result = baseclass::output(state); + randval = result; + state_type zero = + baseclass::is_mcg ? state & state_type(3U) : state_type(0U); + return result == zero; + } + + static bool external_advance(result_type& randval, size_t i, + result_type delta, bool forwards = true) + { + state_type state = baseclass::unoutput(randval); + state_type mult = baseclass::multiplier(); + state_type inc = baseclass::increment() + state_type(i*2); + state_type zero = + baseclass::is_mcg ? state & state_type(3U) : state_type(0U); + state_type dist_to_zero = baseclass::distance(state, zero, mult, inc); + bool crosses_zero = + forwards ? dist_to_zero <= delta + : (-dist_to_zero) <= delta; + if (!forwards) + delta = -delta; + state = baseclass::advance(state, delta, mult, inc); + randval = baseclass::output(state); + return crosses_zero; + } +}; + + +template +class extended : public baseclass { +public: + typedef typename baseclass::state_type state_type; + typedef typename baseclass::result_type result_type; + typedef inside_out insideout; + +private: + static constexpr bitcount_t rtypebits = sizeof(result_type)*8; + static constexpr bitcount_t stypebits = sizeof(state_type)*8; + + static constexpr bitcount_t tick_limit_pow2 = 64U; + + static constexpr size_t table_size = 1UL << table_pow2; + static constexpr size_t table_shift = stypebits - table_pow2; + static constexpr state_type table_mask = + (state_type(1U) << table_pow2) - state_type(1U); + + static constexpr bool may_tick = + (advance_pow2 < stypebits) && (advance_pow2 < tick_limit_pow2); + static constexpr size_t tick_shift = stypebits - advance_pow2; + static constexpr state_type tick_mask = + may_tick ? state_type( + (uint64_t(1) << (advance_pow2*may_tick)) - 1) + // ^-- stupidity to appease GCC warnings + : ~state_type(0U); + + static constexpr bool may_tock = stypebits < tick_limit_pow2; + + result_type data_[table_size]; + + PCG_NOINLINE void advance_table(); + + PCG_NOINLINE void advance_table(state_type delta, bool isForwards = true); + + result_type& get_extended_value() + { + state_type state = this->state_; + if (kdd && baseclass::is_mcg) { + // The low order bits of an MCG are constant, so drop them. + state >>= 2; + } + size_t index = kdd ? state & table_mask + : state >> table_shift; + + if (may_tick) { + bool tick = kdd ? (state & tick_mask) == state_type(0u) + : (state >> tick_shift) == state_type(0u); + if (tick) + advance_table(); + } + if (may_tock) { + bool tock = state == state_type(0u); + if (tock) + advance_table(); + } + return data_[index]; + } + +public: + static constexpr size_t period_pow2() + { + return baseclass::period_pow2() + table_size*extvalclass::period_pow2(); + } + + PCG_ALWAYS_INLINE result_type operator()() + { + result_type rhs = get_extended_value(); + result_type lhs = this->baseclass::operator()(); + return lhs ^ rhs; + } + + result_type operator()(result_type upper_bound) + { + return bounded_rand(*this, upper_bound); + } + + void set(result_type wanted) + { + result_type& rhs = get_extended_value(); + result_type lhs = this->baseclass::operator()(); + rhs = lhs ^ wanted; + } + + void advance(state_type distance, bool forwards = true); + + void backstep(state_type distance) + { + advance(distance, false); + } + + extended(const result_type* data) + : baseclass() + { + datainit(data); + } + + extended(const result_type* data, state_type seed) + : baseclass(seed) + { + datainit(data); + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + extended(const result_type* data, state_type seed, + typename bc::stream_state stream_seed) + : baseclass(seed, stream_seed) + { + datainit(data); + } + + extended() + : baseclass() + { + selfinit(); + } + + extended(state_type seed) + : baseclass(seed) + { + selfinit(); + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + extended(state_type seed, typename bc::stream_state stream_seed) + : baseclass(seed, stream_seed) + { + selfinit(); + } + +private: + void selfinit(); + void datainit(const result_type* data); + +public: + + template::value + && !std::is_convertible::value>::type> + extended(SeedSeq&& seedSeq) + : baseclass(seedSeq) + { + generate_to(seedSeq, data_); + } + + template + void seed(Args&&... args) + { + new (this) extended(std::forward(args)...); + } + + template + friend bool operator==(const extended&, + const extended&); + + template + friend std::basic_ostream& + operator<<(std::basic_ostream& out, + const extended&); + + template + friend std::basic_istream& + operator>>(std::basic_istream& in, + extended&); + +}; + + +template +void extended::datainit( + const result_type* data) +{ + for (size_t i = 0; i < table_size; ++i) + data_[i] = data[i]; +} + +template +void extended::selfinit() +{ + // We need to fill the extended table with something, and we have + // very little provided data, so we use the base generator to + // produce values. Although not ideal (use a seed sequence, folks!), + // unexpected correlations are mitigated by + // - using XOR differences rather than the number directly + // - the way the table is accessed, its values *won't* be accessed + // in the same order the were written. + // - any strange correlations would only be apparent if we + // were to backstep the generator so that the base generator + // was generating the same values again + result_type lhs = baseclass::operator()(); + result_type rhs = baseclass::operator()(); + result_type xdiff = lhs - rhs; + for (size_t i = 0; i < table_size; ++i) { + data_[i] = baseclass::operator()() ^ xdiff; + } +} + +template +bool operator==(const extended& lhs, + const extended& rhs) +{ + auto& base_lhs = static_cast(lhs); + auto& base_rhs = static_cast(rhs); + return base_lhs == base_rhs + && std::equal( + std::begin(lhs.data_), std::end(lhs.data_), + std::begin(rhs.data_) + ); +} + +template +inline bool operator!=(const extended& lhs, + const extended& rhs) +{ + return !operator==(lhs, rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, + const extended& rng) +{ + using pcg_extras::operator<<; + + auto orig_flags = out.flags(std::ios_base::dec | std::ios_base::left); + auto space = out.widen(' '); + auto orig_fill = out.fill(); + + out << rng.multiplier() << space + << rng.increment() << space + << rng.state_; + + for (const auto& datum : rng.data_) + out << space << datum; + + out.flags(orig_flags); + out.fill(orig_fill); + return out; +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, + extended& rng) +{ + extended new_rng; + auto& base_rng = static_cast(new_rng); + in >> base_rng; + + if (in.fail()) + return in; + + using pcg_extras::operator>>; + + auto orig_flags = in.flags(std::ios_base::dec | std::ios_base::skipws); + + for (auto& datum : new_rng.data_) { + in >> datum; + if (in.fail()) + goto bail; + } + + rng = new_rng; + +bail: + in.flags(orig_flags); + return in; +} + + + +template +void +extended::advance_table() +{ + bool carry = false; + for (size_t i = 0; i < table_size; ++i) { + if (carry) { + carry = insideout::external_step(data_[i],i+1); + } + bool carry2 = insideout::external_step(data_[i],i+1); + carry = carry || carry2; + } +} + +template +void +extended::advance_table( + state_type delta, bool isForwards) +{ + typedef typename baseclass::state_type base_state_t; + typedef typename extvalclass::state_type ext_state_t; + constexpr bitcount_t basebits = sizeof(base_state_t)*8; + constexpr bitcount_t extbits = sizeof(ext_state_t)*8; + static_assert(basebits <= extbits || advance_pow2 > 0, + "Current implementation might overflow its carry"); + + base_state_t carry = 0; + for (size_t i = 0; i < table_size; ++i) { + base_state_t total_delta = carry + delta; + ext_state_t trunc_delta = ext_state_t(total_delta); + if (basebits > extbits) { + carry = total_delta >> extbits; + } else { + carry = 0; + } + carry += + insideout::external_advance(data_[i],i+1, trunc_delta, isForwards); + } +} + +template +void extended::advance( + state_type distance, bool forwards) +{ + static_assert(kdd, + "Efficient advance is too hard for non-kdd extension. " + "For a weak advance, cast to base class"); + state_type zero = + baseclass::is_mcg ? this->state_ & state_type(3U) : state_type(0U); + if (may_tick) { + state_type ticks = distance >> (advance_pow2*may_tick); + // ^-- stupidity to appease GCC + // warnings + state_type adv_mask = + baseclass::is_mcg ? tick_mask << 2 : tick_mask; + state_type next_advance_distance = this->distance(zero, adv_mask); + if (!forwards) + next_advance_distance = (-next_advance_distance) & tick_mask; + if (next_advance_distance < (distance & tick_mask)) { + ++ticks; + } + if (ticks) + advance_table(ticks, forwards); + } + if (forwards) { + if (may_tock && this->distance(zero) <= distance) + advance_table(); + baseclass::advance(distance); + } else { + if (may_tock && -(this->distance(zero)) <= distance) + advance_table(state_type(1U), false); + baseclass::advance(-distance); + } +} + +} // namespace pcg_detail + +namespace pcg_engines { + +using namespace pcg_detail; + +/* Predefined types for XSH RS */ + +typedef oneseq_base oneseq_xsh_rs_16_8; +typedef oneseq_base oneseq_xsh_rs_32_16; +typedef oneseq_base oneseq_xsh_rs_64_32; +typedef oneseq_base oneseq_xsh_rs_128_64; +typedef oneseq_base + cm_oneseq_xsh_rs_128_64; + +typedef unique_base unique_xsh_rs_16_8; +typedef unique_base unique_xsh_rs_32_16; +typedef unique_base unique_xsh_rs_64_32; +typedef unique_base unique_xsh_rs_128_64; +typedef unique_base + cm_unique_xsh_rs_128_64; + +typedef setseq_base setseq_xsh_rs_16_8; +typedef setseq_base setseq_xsh_rs_32_16; +typedef setseq_base setseq_xsh_rs_64_32; +typedef setseq_base setseq_xsh_rs_128_64; +typedef setseq_base + cm_setseq_xsh_rs_128_64; + +typedef mcg_base mcg_xsh_rs_16_8; +typedef mcg_base mcg_xsh_rs_32_16; +typedef mcg_base mcg_xsh_rs_64_32; +typedef mcg_base mcg_xsh_rs_128_64; +typedef mcg_base + cm_mcg_xsh_rs_128_64; + +/* Predefined types for XSH RR */ + +typedef oneseq_base oneseq_xsh_rr_16_8; +typedef oneseq_base oneseq_xsh_rr_32_16; +typedef oneseq_base oneseq_xsh_rr_64_32; +typedef oneseq_base oneseq_xsh_rr_128_64; +typedef oneseq_base + cm_oneseq_xsh_rr_128_64; + +typedef unique_base unique_xsh_rr_16_8; +typedef unique_base unique_xsh_rr_32_16; +typedef unique_base unique_xsh_rr_64_32; +typedef unique_base unique_xsh_rr_128_64; +typedef unique_base + cm_unique_xsh_rr_128_64; + +typedef setseq_base setseq_xsh_rr_16_8; +typedef setseq_base setseq_xsh_rr_32_16; +typedef setseq_base setseq_xsh_rr_64_32; +typedef setseq_base setseq_xsh_rr_128_64; +typedef setseq_base + cm_setseq_xsh_rr_128_64; + +typedef mcg_base mcg_xsh_rr_16_8; +typedef mcg_base mcg_xsh_rr_32_16; +typedef mcg_base mcg_xsh_rr_64_32; +typedef mcg_base mcg_xsh_rr_128_64; +typedef mcg_base + cm_mcg_xsh_rr_128_64; + + +/* Predefined types for RXS M XS */ + +typedef oneseq_base oneseq_rxs_m_xs_8_8; +typedef oneseq_base oneseq_rxs_m_xs_16_16; +typedef oneseq_base oneseq_rxs_m_xs_32_32; +typedef oneseq_base oneseq_rxs_m_xs_64_64; +typedef oneseq_base + oneseq_rxs_m_xs_128_128; +typedef oneseq_base + cm_oneseq_rxs_m_xs_128_128; + +typedef unique_base unique_rxs_m_xs_8_8; +typedef unique_base unique_rxs_m_xs_16_16; +typedef unique_base unique_rxs_m_xs_32_32; +typedef unique_base unique_rxs_m_xs_64_64; +typedef unique_base unique_rxs_m_xs_128_128; +typedef unique_base + cm_unique_rxs_m_xs_128_128; + +typedef setseq_base setseq_rxs_m_xs_8_8; +typedef setseq_base setseq_rxs_m_xs_16_16; +typedef setseq_base setseq_rxs_m_xs_32_32; +typedef setseq_base setseq_rxs_m_xs_64_64; +typedef setseq_base setseq_rxs_m_xs_128_128; +typedef setseq_base + cm_setseq_rxs_m_xs_128_128; + + // MCG versions don't make sense here, so aren't defined. + +/* Predefined types for RXS M */ + +typedef oneseq_base oneseq_rxs_m_16_8; +typedef oneseq_base oneseq_rxs_m_32_16; +typedef oneseq_base oneseq_rxs_m_64_32; +typedef oneseq_base oneseq_rxs_m_128_64; +typedef oneseq_base + cm_oneseq_rxs_m_128_64; + +typedef unique_base unique_rxs_m_16_8; +typedef unique_base unique_rxs_m_32_16; +typedef unique_base unique_rxs_m_64_32; +typedef unique_base unique_rxs_m_128_64; +typedef unique_base + cm_unique_rxs_m_128_64; + +typedef setseq_base setseq_rxs_m_16_8; +typedef setseq_base setseq_rxs_m_32_16; +typedef setseq_base setseq_rxs_m_64_32; +typedef setseq_base setseq_rxs_m_128_64; +typedef setseq_base + cm_setseq_rxs_m_128_64; + +typedef mcg_base mcg_rxs_m_16_8; +typedef mcg_base mcg_rxs_m_32_16; +typedef mcg_base mcg_rxs_m_64_32; +typedef mcg_base mcg_rxs_m_128_64; +typedef mcg_base + cm_mcg_rxs_m_128_64; + +/* Predefined types for DXSM */ + +typedef oneseq_base oneseq_dxsm_16_8; +typedef oneseq_base oneseq_dxsm_32_16; +typedef oneseq_base oneseq_dxsm_64_32; +typedef oneseq_base oneseq_dxsm_128_64; +typedef oneseq_base + cm_oneseq_dxsm_128_64; + +typedef unique_base unique_dxsm_16_8; +typedef unique_base unique_dxsm_32_16; +typedef unique_base unique_dxsm_64_32; +typedef unique_base unique_dxsm_128_64; +typedef unique_base + cm_unique_dxsm_128_64; + +typedef setseq_base setseq_dxsm_16_8; +typedef setseq_base setseq_dxsm_32_16; +typedef setseq_base setseq_dxsm_64_32; +typedef setseq_base setseq_dxsm_128_64; +typedef setseq_base + cm_setseq_dxsm_128_64; + +typedef mcg_base mcg_dxsm_16_8; +typedef mcg_base mcg_dxsm_32_16; +typedef mcg_base mcg_dxsm_64_32; +typedef mcg_base mcg_dxsm_128_64; +typedef mcg_base + cm_mcg_dxsm_128_64; + +/* Predefined types for XSL RR (only defined for "large" types) */ + +typedef oneseq_base oneseq_xsl_rr_64_32; +typedef oneseq_base oneseq_xsl_rr_128_64; +typedef oneseq_base + cm_oneseq_xsl_rr_128_64; + +typedef unique_base unique_xsl_rr_64_32; +typedef unique_base unique_xsl_rr_128_64; +typedef unique_base + cm_unique_xsl_rr_128_64; + +typedef setseq_base setseq_xsl_rr_64_32; +typedef setseq_base setseq_xsl_rr_128_64; +typedef setseq_base + cm_setseq_xsl_rr_128_64; + +typedef mcg_base mcg_xsl_rr_64_32; +typedef mcg_base mcg_xsl_rr_128_64; +typedef mcg_base + cm_mcg_xsl_rr_128_64; + + +/* Predefined types for XSL RR RR (only defined for "large" types) */ + +typedef oneseq_base + oneseq_xsl_rr_rr_64_64; +typedef oneseq_base + oneseq_xsl_rr_rr_128_128; +typedef oneseq_base + cm_oneseq_xsl_rr_rr_128_128; + +typedef unique_base + unique_xsl_rr_rr_64_64; +typedef unique_base + unique_xsl_rr_rr_128_128; +typedef unique_base + cm_unique_xsl_rr_rr_128_128; + +typedef setseq_base + setseq_xsl_rr_rr_64_64; +typedef setseq_base + setseq_xsl_rr_rr_128_128; +typedef setseq_base + cm_setseq_xsl_rr_rr_128_128; + + // MCG versions don't make sense here, so aren't defined. + +/* Extended generators */ + +template +using ext_std8 = extended; + +template +using ext_std16 = extended; + +template +using ext_std32 = extended; + +template +using ext_std64 = extended; + + +template +using ext_oneseq_rxs_m_xs_32_32 = + ext_std32; + +template +using ext_mcg_xsh_rs_64_32 = + ext_std32; + +template +using ext_oneseq_xsh_rs_64_32 = + ext_std32; + +template +using ext_setseq_xsh_rr_64_32 = + ext_std32; + +template +using ext_mcg_xsl_rr_128_64 = + ext_std64; + +template +using ext_oneseq_xsl_rr_128_64 = + ext_std64; + +template +using ext_setseq_xsl_rr_128_64 = + ext_std64; + +} // namespace pcg_engines + +typedef pcg_engines::setseq_xsh_rr_64_32 pcg32; +typedef pcg_engines::oneseq_xsh_rr_64_32 pcg32_oneseq; +typedef pcg_engines::unique_xsh_rr_64_32 pcg32_unique; +typedef pcg_engines::mcg_xsh_rs_64_32 pcg32_fast; + +typedef pcg_engines::setseq_xsl_rr_128_64 pcg64; +typedef pcg_engines::oneseq_xsl_rr_128_64 pcg64_oneseq; +typedef pcg_engines::unique_xsl_rr_128_64 pcg64_unique; +typedef pcg_engines::mcg_xsl_rr_128_64 pcg64_fast; + +typedef pcg_engines::setseq_rxs_m_xs_8_8 pcg8_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_16_16 pcg16_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_32_32 pcg32_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_64_64 pcg64_once_insecure; +typedef pcg_engines::setseq_xsl_rr_rr_128_128 pcg128_once_insecure; + +typedef pcg_engines::oneseq_rxs_m_xs_8_8 pcg8_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_16_16 pcg16_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_32_32 pcg32_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_64_64 pcg64_oneseq_once_insecure; +typedef pcg_engines::oneseq_xsl_rr_rr_128_128 pcg128_oneseq_once_insecure; + + +// These two extended RNGs provide two-dimensionally equidistributed +// 32-bit generators. pcg32_k2_fast occupies the same space as pcg64, +// and can be called twice to generate 64 bits, but does not required +// 128-bit math; on 32-bit systems, it's faster than pcg64 as well. + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<1,16,true> pcg32_k2; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<1,32,true> pcg32_k2_fast; + +// These eight extended RNGs have about as much state as arc4random +// +// - the k variants are k-dimensionally equidistributed +// - the c variants offer better crypographic security +// +// (just how good the cryptographic security is an open question) + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<6,16,true> pcg32_k64; +typedef pcg_engines::ext_mcg_xsh_rs_64_32<6,32,true> pcg32_k64_oneseq; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<6,32,true> pcg32_k64_fast; + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<6,16,false> pcg32_c64; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<6,32,false> pcg32_c64_oneseq; +typedef pcg_engines::ext_mcg_xsh_rs_64_32<6,32,false> pcg32_c64_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<5,16,true> pcg64_k32; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<5,128,true> pcg64_k32_oneseq; +typedef pcg_engines::ext_mcg_xsl_rr_128_64<5,128,true> pcg64_k32_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<5,16,false> pcg64_c32; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<5,128,false> pcg64_c32_oneseq; +typedef pcg_engines::ext_mcg_xsl_rr_128_64<5,128,false> pcg64_c32_fast; + +// These eight extended RNGs have more state than the Mersenne twister +// +// - the k variants are k-dimensionally equidistributed +// - the c variants offer better crypographic security +// +// (just how good the cryptographic security is an open question) + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<10,16,true> pcg32_k1024; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<10,32,true> pcg32_k1024_fast; + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<10,16,false> pcg32_c1024; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<10,32,false> pcg32_c1024_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<10,16,true> pcg64_k1024; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<10,128,true> pcg64_k1024_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<10,16,false> pcg64_c1024; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<10,128,false> pcg64_c1024_fast; + +// These generators have an insanely huge period (2^524352), and is suitable +// for silly party tricks, such as dumping out 64 KB ZIP files at an arbitrary +// point in the future. [Actually, over the full period of the generator, it +// will produce every 64 KB ZIP file 2^64 times!] + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<14,16,true> pcg32_k16384; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<14,32,true> pcg32_k16384_fast; + +} // namespace arrow_vendored + +#ifdef _MSC_VER + #pragma warning(default:4146) +#endif + +#endif // PCG_RAND_HPP_INCLUDED diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0181e69e4ef37fc279e4d067fd42d0cedacb9126 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp @@ -0,0 +1,1008 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2021 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This code provides a a C++ class that can provide 128-bit (or higher) + * integers. To produce 2K-bit integers, it uses two K-bit integers, + * placed in a union that allowes the code to also see them as four K/2 bit + * integers (and access them either directly name, or by index). + * + * It may seem like we're reinventing the wheel here, because several + * libraries already exist that support large integers, but most existing + * libraries provide a very generic multiprecision code, but here we're + * operating at a fixed size. Also, most other libraries are fairly + * heavyweight. So we use a direct implementation. Sadly, it's much slower + * than hand-coded assembly or direct CPU support. + */ + +#ifndef PCG_UINT128_HPP_INCLUDED +#define PCG_UINT128_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) // Use MSVC++ intrinsics +#include +#endif + +/* + * We want to lay the type out the same way that a native type would be laid + * out, which means we must know the machine's endian, at compile time. + * This ugliness attempts to do so. + */ + +#ifndef PCG_LITTLE_ENDIAN + #if defined(__BYTE_ORDER__) + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #define PCG_LITTLE_ENDIAN 1 + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #define PCG_LITTLE_ENDIAN 0 + #else + #error __BYTE_ORDER__ does not match a standard endian, pick a side + #endif + #elif __LITTLE_ENDIAN__ || _LITTLE_ENDIAN + #define PCG_LITTLE_ENDIAN 1 + #elif __BIG_ENDIAN__ || _BIG_ENDIAN + #define PCG_LITTLE_ENDIAN 0 + #elif __x86_64 || __x86_64__ || _M_X64 || __i386 || __i386__ || _M_IX86 + #define PCG_LITTLE_ENDIAN 1 + #elif __powerpc__ || __POWERPC__ || __ppc__ || __PPC__ \ + || __m68k__ || __mc68000__ + #define PCG_LITTLE_ENDIAN 0 + #else + #error Unable to determine target endianness + #endif +#endif + +#if INTPTR_MAX == INT64_MAX && !defined(PCG_64BIT_SPECIALIZATIONS) + #define PCG_64BIT_SPECIALIZATIONS 1 +#endif + +namespace arrow_vendored { +namespace pcg_extras { + +// Recent versions of GCC have intrinsics we can use to quickly calculate +// the number of leading and trailing zeros in a number. If possible, we +// use them, otherwise we fall back to old-fashioned bit twiddling to figure +// them out. + +#ifndef PCG_BITCOUNT_T + typedef uint8_t bitcount_t; +#else + typedef PCG_BITCOUNT_T bitcount_t; +#endif + +/* + * Provide some useful helper functions + * * flog2 floor(log2(x)) + * * trailingzeros number of trailing zero bits + */ + +#if defined(__GNUC__) // Any GNU-compatible compiler supporting C++11 has + // some useful intrinsics we can use. + +inline bitcount_t flog2(uint32_t v) +{ + return 31 - __builtin_clz(v); +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + return __builtin_ctz(v); +} + +inline bitcount_t flog2(uint64_t v) +{ +#if UINT64_MAX == ULONG_MAX + return 63 - __builtin_clzl(v); +#elif UINT64_MAX == ULLONG_MAX + return 63 - __builtin_clzll(v); +#else + #error Cannot find a function for uint64_t +#endif +} + +inline bitcount_t trailingzeros(uint64_t v) +{ +#if UINT64_MAX == ULONG_MAX + return __builtin_ctzl(v); +#elif UINT64_MAX == ULLONG_MAX + return __builtin_ctzll(v); +#else + #error Cannot find a function for uint64_t +#endif +} + +#elif defined(_MSC_VER) // Use MSVC++ intrinsics + +#pragma intrinsic(_BitScanReverse, _BitScanForward) +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) +#pragma intrinsic(_BitScanReverse64, _BitScanForward64) +#endif + +inline bitcount_t flog2(uint32_t v) +{ + unsigned long i; + _BitScanReverse(&i, v); + return bitcount_t(i); +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + unsigned long i; + _BitScanForward(&i, v); + return bitcount_t(i); +} + +inline bitcount_t flog2(uint64_t v) +{ +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) + unsigned long i; + _BitScanReverse64(&i, v); + return bitcount_t(i); +#else + // 32-bit x86 + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + return high ? 32+flog2(high) : flog2(low); +#endif +} + +inline bitcount_t trailingzeros(uint64_t v) +{ +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) + unsigned long i; + _BitScanForward64(&i, v); + return bitcount_t(i); +#else + // 32-bit x86 + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + return low ? trailingzeros(low) : trailingzeros(high)+32; +#endif +} + +#else // Otherwise, we fall back to bit twiddling + // implementations + +inline bitcount_t flog2(uint32_t v) +{ + // Based on code by Eric Cole and Mark Dickinson, which appears at + // https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn + + static const uint8_t multiplyDeBruijnBitPos[32] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + + v |= v >> 1; // first round down to one less than a power of 2 + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + + return multiplyDeBruijnBitPos[(uint32_t)(v * 0x07C4ACDDU) >> 27]; +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + static const uint8_t multiplyDeBruijnBitPos[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + + return multiplyDeBruijnBitPos[((uint32_t)((v & -v) * 0x077CB531U)) >> 27]; +} + +inline bitcount_t flog2(uint64_t v) +{ + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + + return high ? 32+flog2(high) : flog2(low); +} + +inline bitcount_t trailingzeros(uint64_t v) +{ + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + + return low ? trailingzeros(low) : trailingzeros(high)+32; +} + +#endif + +inline bitcount_t flog2(uint8_t v) +{ + return flog2(uint32_t(v)); +} + +inline bitcount_t flog2(uint16_t v) +{ + return flog2(uint32_t(v)); +} + +#if __SIZEOF_INT128__ +inline bitcount_t flog2(__uint128_t v) +{ + uint64_t high = uint64_t(v >> 64); + uint64_t low = uint64_t(v); + + return high ? 64+flog2(high) : flog2(low); +} +#endif + +inline bitcount_t trailingzeros(uint8_t v) +{ + return trailingzeros(uint32_t(v)); +} + +inline bitcount_t trailingzeros(uint16_t v) +{ + return trailingzeros(uint32_t(v)); +} + +#if __SIZEOF_INT128__ +inline bitcount_t trailingzeros(__uint128_t v) +{ + uint64_t high = uint64_t(v >> 64); + uint64_t low = uint64_t(v); + return low ? trailingzeros(low) : trailingzeros(high)+64; +} +#endif + +template +inline bitcount_t clog2(UInt v) +{ + return flog2(v) + ((v & (-v)) != v); +} + +template +inline UInt addwithcarry(UInt x, UInt y, bool carryin, bool* carryout) +{ + UInt half_result = y + carryin; + UInt result = x + half_result; + *carryout = (half_result < y) || (result < x); + return result; +} + +template +inline UInt subwithcarry(UInt x, UInt y, bool carryin, bool* carryout) +{ + UInt half_result = y + carryin; + UInt result = x - half_result; + *carryout = (half_result < y) || (result > x); + return result; +} + + +template +class uint_x4 { +// private: + static constexpr unsigned int UINT_BITS = sizeof(UInt) * CHAR_BIT; +public: + union { +#if PCG_LITTLE_ENDIAN + struct { + UInt v0, v1, v2, v3; + } w; + struct { + UIntX2 v01, v23; + } d; +#else + struct { + UInt v3, v2, v1, v0; + } w; + struct { + UIntX2 v23, v01; + } d; +#endif + // For the array access versions, the code that uses the array + // must handle endian itself. Yuck. + UInt wa[4]; + }; + +public: + uint_x4() = default; + + constexpr uint_x4(UInt v3, UInt v2, UInt v1, UInt v0) +#if PCG_LITTLE_ENDIAN + : w{v0, v1, v2, v3} +#else + : w{v3, v2, v1, v0} +#endif + { + // Nothing (else) to do + } + + constexpr uint_x4(UIntX2 v23, UIntX2 v01) +#if PCG_LITTLE_ENDIAN + : d{v01,v23} +#else + : d{v23,v01} +#endif + { + // Nothing (else) to do + } + + constexpr uint_x4(UIntX2 v01) +#if PCG_LITTLE_ENDIAN + : d{v01, UIntX2(0)} +#else + : d{UIntX2(0),v01} +#endif + { + // Nothing (else) to do + } + + template::value + && sizeof(Integral) <= sizeof(UIntX2)) + >::type* = nullptr> + constexpr uint_x4(Integral v01) +#if PCG_LITTLE_ENDIAN + : d{UIntX2(v01), UIntX2(0)} +#else + : d{UIntX2(0), UIntX2(v01)} +#endif + { + // Nothing (else) to do + } + + explicit constexpr operator UIntX2() const + { + return d.v01; + } + + template::value + && sizeof(Integral) <= sizeof(UIntX2)) + >::type* = nullptr> + explicit constexpr operator Integral() const + { + return Integral(d.v01); + } + + explicit constexpr operator bool() const + { + return d.v01 || d.v23; + } + + template + friend uint_x4 operator*(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator*(const uint_x4&, V); + + template + friend std::pair< uint_x4,uint_x4 > + divmod(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator+(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator-(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator<<(const uint_x4&, const bitcount_t shift); + + template + friend uint_x4 operator>>(const uint_x4&, const bitcount_t shift); + +#if PCG_64BIT_SPECIALIZATIONS + template + friend uint_x4 operator<<(const uint_x4&, const bitcount_t shift); + + template + friend uint_x4 operator>>(const uint_x4&, const bitcount_t shift); +#endif + + template + friend uint_x4 operator&(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator|(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator^(const uint_x4&, const uint_x4&); + + template + friend bool operator==(const uint_x4&, const uint_x4&); + + template + friend bool operator!=(const uint_x4&, const uint_x4&); + + template + friend bool operator<(const uint_x4&, const uint_x4&); + + template + friend bool operator<=(const uint_x4&, const uint_x4&); + + template + friend bool operator>(const uint_x4&, const uint_x4&); + + template + friend bool operator>=(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator~(const uint_x4&); + + template + friend uint_x4 operator-(const uint_x4&); + + template + friend bitcount_t flog2(const uint_x4&); + + template + friend bitcount_t trailingzeros(const uint_x4&); + +#if PCG_64BIT_SPECIALIZATIONS + template + friend bitcount_t flog2(const uint_x4&); + + template + friend bitcount_t trailingzeros(const uint_x4&); +#endif + + uint_x4& operator*=(const uint_x4& rhs) + { + uint_x4 result = *this * rhs; + return *this = result; + } + + uint_x4& operator*=(UIntX2 rhs) + { + uint_x4 result = *this * rhs; + return *this = result; + } + + uint_x4& operator/=(const uint_x4& rhs) + { + uint_x4 result = *this / rhs; + return *this = result; + } + + uint_x4& operator%=(const uint_x4& rhs) + { + uint_x4 result = *this % rhs; + return *this = result; + } + + uint_x4& operator+=(const uint_x4& rhs) + { + uint_x4 result = *this + rhs; + return *this = result; + } + + uint_x4& operator-=(const uint_x4& rhs) + { + uint_x4 result = *this - rhs; + return *this = result; + } + + uint_x4& operator&=(const uint_x4& rhs) + { + uint_x4 result = *this & rhs; + return *this = result; + } + + uint_x4& operator|=(const uint_x4& rhs) + { + uint_x4 result = *this | rhs; + return *this = result; + } + + uint_x4& operator^=(const uint_x4& rhs) + { + uint_x4 result = *this ^ rhs; + return *this = result; + } + + uint_x4& operator>>=(bitcount_t shift) + { + uint_x4 result = *this >> shift; + return *this = result; + } + + uint_x4& operator<<=(bitcount_t shift) + { + uint_x4 result = *this << shift; + return *this = result; + } + +}; + +template +bitcount_t flog2(const uint_x4& v) +{ +#if PCG_LITTLE_ENDIAN + for (uint8_t i = 4; i !=0; /* dec in loop */) { + --i; +#else + for (uint8_t i = 0; i < 4; ++i) { +#endif + if (v.wa[i] == 0) + continue; + return flog2(v.wa[i]) + uint_x4::UINT_BITS*i; + } + abort(); +} + +template +bitcount_t trailingzeros(const uint_x4& v) +{ +#if PCG_LITTLE_ENDIAN + for (uint8_t i = 0; i < 4; ++i) { +#else + for (uint8_t i = 4; i !=0; /* dec in loop */) { + --i; +#endif + if (v.wa[i] != 0) + return trailingzeros(v.wa[i]) + uint_x4::UINT_BITS*i; + } + return uint_x4::UINT_BITS*4; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +bitcount_t flog2(const uint_x4& v) +{ + return v.d.v23 > 0 ? flog2(v.d.v23) + uint_x4::UINT_BITS*2 + : flog2(v.d.v01); +} + +template +bitcount_t trailingzeros(const uint_x4& v) +{ + return v.d.v01 == 0 ? trailingzeros(v.d.v23) + uint_x4::UINT_BITS*2 + : trailingzeros(v.d.v01); +} +#endif + +template +std::pair< uint_x4, uint_x4 > + divmod(const uint_x4& orig_dividend, + const uint_x4& divisor) +{ + // If the dividend is less than the divisor, the answer is always zero. + // This takes care of boundary cases like 0/x (which would otherwise be + // problematic because we can't take the log of zero. (The boundary case + // of division by zero is undefined.) + if (orig_dividend < divisor) + return { uint_x4(UIntX2(0)), orig_dividend }; + + auto dividend = orig_dividend; + + auto log2_divisor = flog2(divisor); + auto log2_dividend = flog2(dividend); + // assert(log2_dividend >= log2_divisor); + bitcount_t logdiff = log2_dividend - log2_divisor; + + constexpr uint_x4 ONE(UIntX2(1)); + if (logdiff == 0) + return { ONE, dividend - divisor }; + + // Now we change the log difference to + // floor(log2(divisor)) - ceil(log2(dividend)) + // to ensure that we *underestimate* the result. + logdiff -= 1; + + uint_x4 quotient(UIntX2(0)); + + auto qfactor = ONE << logdiff; + auto factor = divisor << logdiff; + + do { + dividend -= factor; + quotient += qfactor; + while (dividend < factor) { + factor >>= 1; + qfactor >>= 1; + } + } while (dividend >= divisor); + + return { quotient, dividend }; +} + +template +uint_x4 operator/(const uint_x4& dividend, + const uint_x4& divisor) +{ + return divmod(dividend, divisor).first; +} + +template +uint_x4 operator%(const uint_x4& dividend, + const uint_x4& divisor) +{ + return divmod(dividend, divisor).second; +} + + +template +uint_x4 operator*(const uint_x4& a, + const uint_x4& b) +{ + constexpr auto UINT_BITS = uint_x4::UINT_BITS; + uint_x4 r = {0U, 0U, 0U, 0U}; + bool carryin = false; + bool carryout; + UIntX2 a0b0 = UIntX2(a.w.v0) * UIntX2(b.w.v0); + r.w.v0 = UInt(a0b0); + r.w.v1 = UInt(a0b0 >> UINT_BITS); + + UIntX2 a1b0 = UIntX2(a.w.v1) * UIntX2(b.w.v0); + r.w.v2 = UInt(a1b0 >> UINT_BITS); + r.w.v1 = addwithcarry(r.w.v1, UInt(a1b0), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a0b1 = UIntX2(a.w.v0) * UIntX2(b.w.v1); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a0b1 >> UINT_BITS), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + carryin = false; + r.w.v1 = addwithcarry(r.w.v1, UInt(a0b1), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a1b1 = UIntX2(a.w.v1) * UIntX2(b.w.v1); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a1b1), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(a1b1 >> UINT_BITS), carryin, &carryout); + + r.d.v23 += a.d.v01 * b.d.v23 + a.d.v23 * b.d.v01; + + return r; +} + + +template +uint_x4 operator*(const uint_x4& a, + UIntX2 b01) +{ + constexpr auto UINT_BITS = uint_x4::UINT_BITS; + uint_x4 r = {0U, 0U, 0U, 0U}; + bool carryin = false; + bool carryout; + UIntX2 a0b0 = UIntX2(a.w.v0) * UIntX2(UInt(b01)); + r.w.v0 = UInt(a0b0); + r.w.v1 = UInt(a0b0 >> UINT_BITS); + + UIntX2 a1b0 = UIntX2(a.w.v1) * UIntX2(UInt(b01)); + r.w.v2 = UInt(a1b0 >> UINT_BITS); + r.w.v1 = addwithcarry(r.w.v1, UInt(a1b0), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a0b1 = UIntX2(a.w.v0) * UIntX2(b01 >> UINT_BITS); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a0b1 >> UINT_BITS), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + carryin = false; + r.w.v1 = addwithcarry(r.w.v1, UInt(a0b1), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a1b1 = UIntX2(a.w.v1) * UIntX2(b01 >> UINT_BITS); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a1b1), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(a1b1 >> UINT_BITS), carryin, &carryout); + + r.d.v23 += a.d.v23 * b01; + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +#if defined(_MSC_VER) +#pragma intrinsic(_umul128) +#endif + +#if defined(_MSC_VER) || __SIZEOF_INT128__ +template +uint_x4 operator*(const uint_x4& a, + const uint_x4& b) +{ +#if defined(_MSC_VER) + uint64_t hi; + uint64_t lo = _umul128(a.d.v01, b.d.v01, &hi); +#else + __uint128_t r = __uint128_t(a.d.v01) * __uint128_t(b.d.v01); + uint64_t lo = uint64_t(r); + uint64_t hi = r >> 64; +#endif + hi += a.d.v23 * b.d.v01 + a.d.v01 * b.d.v23; + return {hi, lo}; +} +#endif +#endif + + +template +uint_x4 operator+(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + + bool carryin = false; + bool carryout; + r.w.v0 = addwithcarry(a.w.v0, b.w.v0, carryin, &carryout); + carryin = carryout; + r.w.v1 = addwithcarry(a.w.v1, b.w.v1, carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(a.w.v2, b.w.v2, carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(a.w.v3, b.w.v3, carryin, &carryout); + + return r; +} + +template +uint_x4 operator-(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + + bool carryin = false; + bool carryout; + r.w.v0 = subwithcarry(a.w.v0, b.w.v0, carryin, &carryout); + carryin = carryout; + r.w.v1 = subwithcarry(a.w.v1, b.w.v1, carryin, &carryout); + carryin = carryout; + r.w.v2 = subwithcarry(a.w.v2, b.w.v2, carryin, &carryout); + carryin = carryout; + r.w.v3 = subwithcarry(a.w.v3, b.w.v3, carryin, &carryout); + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +uint_x4 operator+(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {uint64_t(0u), uint64_t(0u)}; + + bool carryin = false; + bool carryout; + r.d.v01 = addwithcarry(a.d.v01, b.d.v01, carryin, &carryout); + carryin = carryout; + r.d.v23 = addwithcarry(a.d.v23, b.d.v23, carryin, &carryout); + + return r; +} + +template +uint_x4 operator-(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {uint64_t(0u), uint64_t(0u)}; + + bool carryin = false; + bool carryout; + r.d.v01 = subwithcarry(a.d.v01, b.d.v01, carryin, &carryout); + carryin = carryout; + r.d.v23 = subwithcarry(a.d.v23, b.d.v23, carryin, &carryout); + + return r; +} +#endif + +template +uint_x4 operator&(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 & b.d.v23, a.d.v01 & b.d.v01); +} + +template +uint_x4 operator|(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 | b.d.v23, a.d.v01 | b.d.v01); +} + +template +uint_x4 operator^(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 ^ b.d.v23, a.d.v01 ^ b.d.v01); +} + +template +uint_x4 operator~(const uint_x4& v) +{ + return uint_x4(~v.d.v23, ~v.d.v01); +} + +template +uint_x4 operator-(const uint_x4& v) +{ + return uint_x4(0UL,0UL) - v; +} + +template +bool operator==(const uint_x4& a, const uint_x4& b) +{ + return (a.d.v01 == b.d.v01) && (a.d.v23 == b.d.v23); +} + +template +bool operator!=(const uint_x4& a, const uint_x4& b) +{ + return !operator==(a,b); +} + + +template +bool operator<(const uint_x4& a, const uint_x4& b) +{ + return (a.d.v23 < b.d.v23) + || ((a.d.v23 == b.d.v23) && (a.d.v01 < b.d.v01)); +} + +template +bool operator>(const uint_x4& a, const uint_x4& b) +{ + return operator<(b,a); +} + +template +bool operator<=(const uint_x4& a, const uint_x4& b) +{ + return !(operator<(b,a)); +} + +template +bool operator>=(const uint_x4& a, const uint_x4& b) +{ + return !(operator<(a,b)); +} + + + +template +uint_x4 operator<<(const uint_x4& v, + const bitcount_t shift) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + const bitcount_t bits = uint_x4::UINT_BITS; + const bitcount_t bitmask = bits - 1; + const bitcount_t shiftdiv = shift / bits; + const bitcount_t shiftmod = shift & bitmask; + + if (shiftmod) { + UInt carryover = 0; +#if PCG_LITTLE_ENDIAN + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#else + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#endif + r.wa[out] = (v.wa[in] << shiftmod) | carryover; + carryover = (v.wa[in] >> (bits - shiftmod)); + } + } else { +#if PCG_LITTLE_ENDIAN + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#else + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#endif + r.wa[out] = v.wa[in]; + } + } + + return r; +} + +template +uint_x4 operator>>(const uint_x4& v, + const bitcount_t shift) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + const bitcount_t bits = uint_x4::UINT_BITS; + const bitcount_t bitmask = bits - 1; + const bitcount_t shiftdiv = shift / bits; + const bitcount_t shiftmod = shift & bitmask; + + if (shiftmod) { + UInt carryover = 0; +#if PCG_LITTLE_ENDIAN + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#else + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#endif + r.wa[out] = (v.wa[in] >> shiftmod) | carryover; + carryover = (v.wa[in] << (bits - shiftmod)); + } + } else { +#if PCG_LITTLE_ENDIAN + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#else + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#endif + r.wa[out] = v.wa[in]; + } + } + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +uint_x4 operator<<(const uint_x4& v, + const bitcount_t shift) +{ + constexpr bitcount_t bits2 = uint_x4::UINT_BITS * 2; + + if (shift >= bits2) { + return {v.d.v01 << (shift-bits2), uint64_t(0u)}; + } else { + return {shift ? (v.d.v23 << shift) | (v.d.v01 >> (bits2-shift)) + : v.d.v23, + v.d.v01 << shift}; + } +} + +template +uint_x4 operator>>(const uint_x4& v, + const bitcount_t shift) +{ + constexpr bitcount_t bits2 = uint_x4::UINT_BITS * 2; + + if (shift >= bits2) { + return {uint64_t(0u), v.d.v23 >> (shift-bits2)}; + } else { + return {v.d.v23 >> shift, + shift ? (v.d.v01 >> shift) | (v.d.v23 << (bits2-shift)) + : v.d.v01}; + } +} +#endif + +} // namespace pcg_extras +} // namespace arrow_vendored + +#endif // PCG_UINT128_HPP_INCLUDED diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h new file mode 100644 index 0000000000000000000000000000000000000000..6d039064d6ab3f56a2215ae50d1b0286db61ddfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h @@ -0,0 +1,83 @@ +/* Debugging assertions and traps + * Portable Snippets - https://github.com/nemequ/portable-snippets + * Created by Evan Nemerson + * + * To the extent possible under law, the authors have waived all + * copyright and related or neighboring rights to this code. For + * details, see the Creative Commons Zero 1.0 Universal license at + * https://creativecommons.org/publicdomain/zero/1.0/ + */ + +#if !defined(PSNIP_DEBUG_TRAP_H) +#define PSNIP_DEBUG_TRAP_H + +#if !defined(PSNIP_NDEBUG) && defined(NDEBUG) && !defined(PSNIP_DEBUG) +# define PSNIP_NDEBUG 1 +#endif + +#if defined(__has_builtin) && !defined(__ibmxl__) +# if __has_builtin(__builtin_debugtrap) +# define psnip_trap() __builtin_debugtrap() +# elif __has_builtin(__debugbreak) +# define psnip_trap() __debugbreak() +# endif +#endif +#if !defined(psnip_trap) +# if defined(_MSC_VER) || defined(__INTEL_COMPILER) +# define psnip_trap() __debugbreak() +# elif defined(__ARMCC_VERSION) +# define psnip_trap() __breakpoint(42) +# elif defined(__ibmxl__) || defined(__xlC__) +# include +# define psnip_trap() __trap(42) +# elif defined(__DMC__) && defined(_M_IX86) + static inline void psnip_trap(void) { __asm int 3h; } +# elif defined(__i386__) || defined(__x86_64__) + static inline void psnip_trap(void) { __asm__ __volatile__("int $03"); } +# elif defined(__thumb__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xde01"); } +# elif defined(__aarch64__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xd4200000"); } +# elif defined(__arm__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xe7f001f0"); } +# elif defined (__alpha__) && !defined(__osf__) + static inline void psnip_trap(void) { __asm__ __volatile__("bpt"); } +# elif defined(_54_) + static inline void psnip_trap(void) { __asm__ __volatile__("ESTOP"); } +# elif defined(_55_) + static inline void psnip_trap(void) { __asm__ __volatile__(";\n .if (.MNEMONIC)\n ESTOP_1\n .else\n ESTOP_1()\n .endif\n NOP"); } +# elif defined(_64P_) + static inline void psnip_trap(void) { __asm__ __volatile__("SWBP 0"); } +# elif defined(_6x_) + static inline void psnip_trap(void) { __asm__ __volatile__("NOP\n .word 0x10000000"); } +# elif defined(__STDC_HOSTED__) && (__STDC_HOSTED__ == 0) && defined(__GNUC__) +# define psnip_trap() __builtin_trap() +# else +# include +# if defined(SIGTRAP) +# define psnip_trap() raise(SIGTRAP) +# else +# define psnip_trap() raise(SIGABRT) +# endif +# endif +#endif + +#if defined(HEDLEY_LIKELY) +# define PSNIP_DBG_LIKELY(expr) HEDLEY_LIKELY(expr) +#elif defined(__GNUC__) && (__GNUC__ >= 3) +# define PSNIP_DBG_LIKELY(expr) __builtin_expect(!!(expr), 1) +#else +# define PSNIP_DBG_LIKELY(expr) (!!(expr)) +#endif + +#if !defined(PSNIP_NDEBUG) || (PSNIP_NDEBUG == 0) +# define psnip_dbg_assert(expr) do { \ + if (!PSNIP_DBG_LIKELY(expr)) { \ + psnip_trap(); \ + } \ + } while (0) +#else +# define psnip_dbg_assert(expr) +#endif + +#endif /* !defined(PSNIP_DEBUG_TRAP_H) */ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h new file mode 100644 index 0000000000000000000000000000000000000000..7f6426ac7657192f6e0aa92bc25a828f106b4f7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h @@ -0,0 +1,1072 @@ +/* Overflow-safe math functions + * Portable Snippets - https://github.com/nemequ/portable-snippets + * Created by Evan Nemerson + * + * To the extent possible under law, the authors have waived all + * copyright and related or neighboring rights to this code. For + * details, see the Creative Commons Zero 1.0 Universal license at + * https://creativecommons.org/publicdomain/zero/1.0/ + */ + +#if !defined(PSNIP_SAFE_H) +#define PSNIP_SAFE_H + +#if !defined(PSNIP_SAFE_FORCE_PORTABLE) +# if defined(__has_builtin) +# if __has_builtin(__builtin_add_overflow) && !defined(__ibmxl__) +# define PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__INTEL_COMPILER) +# define PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW +# endif +# if defined(__has_include) +# if __has_include() +# define PSNIP_SAFE_HAVE_INTSAFE_H +# endif +# elif defined(_WIN32) +# define PSNIP_SAFE_HAVE_INTSAFE_H +# endif +#endif /* !defined(PSNIP_SAFE_FORCE_PORTABLE) */ + +#if defined(__GNUC__) +# define PSNIP_SAFE_LIKELY(expr) __builtin_expect(!!(expr), 1) +# define PSNIP_SAFE_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#else +# define PSNIP_SAFE_LIKELY(expr) !!(expr) +# define PSNIP_SAFE_UNLIKELY(expr) !!(expr) +#endif /* defined(__GNUC__) */ + +#if !defined(PSNIP_SAFE_STATIC_INLINE) +# if defined(__GNUC__) +# define PSNIP_SAFE__COMPILER_ATTRIBUTES __attribute__((__unused__)) +# else +# define PSNIP_SAFE__COMPILER_ATTRIBUTES +# endif + +# if defined(HEDLEY_INLINE) +# define PSNIP_SAFE__INLINE HEDLEY_INLINE +# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define PSNIP_SAFE__INLINE inline +# elif defined(__GNUC_STDC_INLINE__) +# define PSNIP_SAFE__INLINE __inline__ +# elif defined(_MSC_VER) && _MSC_VER >= 1200 +# define PSNIP_SAFE__INLINE __inline +# else +# define PSNIP_SAFE__INLINE +# endif + +# define PSNIP_SAFE__FUNCTION PSNIP_SAFE__COMPILER_ATTRIBUTES static PSNIP_SAFE__INLINE +#endif + +// !defined(__cplusplus) added for Solaris support +#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define psnip_safe_bool _Bool +#else +# define psnip_safe_bool int +#endif + +#if !defined(PSNIP_SAFE_NO_FIXED) +/* For maximum portability include the exact-int module from + portable snippets. */ +# if \ + !defined(psnip_int64_t) || !defined(psnip_uint64_t) || \ + !defined(psnip_int32_t) || !defined(psnip_uint32_t) || \ + !defined(psnip_int16_t) || !defined(psnip_uint16_t) || \ + !defined(psnip_int8_t) || !defined(psnip_uint8_t) +# include +# if !defined(psnip_int64_t) +# define psnip_int64_t int64_t +# endif +# if !defined(psnip_uint64_t) +# define psnip_uint64_t uint64_t +# endif +# if !defined(psnip_int32_t) +# define psnip_int32_t int32_t +# endif +# if !defined(psnip_uint32_t) +# define psnip_uint32_t uint32_t +# endif +# if !defined(psnip_int16_t) +# define psnip_int16_t int16_t +# endif +# if !defined(psnip_uint16_t) +# define psnip_uint16_t uint16_t +# endif +# if !defined(psnip_int8_t) +# define psnip_int8_t int8_t +# endif +# if !defined(psnip_uint8_t) +# define psnip_uint8_t uint8_t +# endif +# endif +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ +#include +#include + +#if !defined(PSNIP_SAFE_SIZE_MAX) +# if defined(__SIZE_MAX__) +# define PSNIP_SAFE_SIZE_MAX __SIZE_MAX__ +# elif defined(PSNIP_EXACT_INT_HAVE_STDINT) +# include +# endif +#endif + +#if defined(PSNIP_SAFE_SIZE_MAX) +# define PSNIP_SAFE__SIZE_MAX_RT PSNIP_SAFE_SIZE_MAX +#else +# define PSNIP_SAFE__SIZE_MAX_RT (~((size_t) 0)) +#endif + +#if defined(PSNIP_SAFE_HAVE_INTSAFE_H) +/* In VS 10, stdint.h and intsafe.h both define (U)INTN_MIN/MAX, which + triggers warning C4005 (level 1). */ +# if defined(_MSC_VER) && (_MSC_VER == 1600) +# pragma warning(push) +# pragma warning(disable:4005) +# endif +# include +# if defined(_MSC_VER) && (_MSC_VER == 1600) +# pragma warning(pop) +# endif +#endif /* defined(PSNIP_SAFE_HAVE_INTSAFE_H) */ + +/* If there is a type larger than the one we're concerned with it's + * likely much faster to simply promote the operands, perform the + * requested operation, verify that the result falls within the + * original type, then cast the result back to the original type. */ + +#if !defined(PSNIP_SAFE_NO_PROMOTIONS) + +#define PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, op_name, op) \ + PSNIP_SAFE__FUNCTION psnip_safe_##name##_larger \ + psnip_safe_larger_##name##_##op_name (T a, T b) { \ + return ((psnip_safe_##name##_larger) a) op ((psnip_safe_##name##_larger) b); \ + } + +#define PSNIP_SAFE_DEFINE_LARGER_UNARY_OP(T, name, op_name, op) \ + PSNIP_SAFE__FUNCTION psnip_safe_##name##_larger \ + psnip_safe_larger_##name##_##op_name (T value) { \ + return (op ((psnip_safe_##name##_larger) value)); \ + } + +#define PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(T, name) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, add, +) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, sub, -) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mul, *) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, div, /) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mod, %) \ + PSNIP_SAFE_DEFINE_LARGER_UNARY_OP (T, name, neg, -) + +#define PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(T, name) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, add, +) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, sub, -) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mul, *) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, div, /) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mod, %) + +#define PSNIP_SAFE_IS_LARGER(ORIG_MAX, DEST_MAX) ((DEST_MAX / ORIG_MAX) >= ORIG_MAX) + +#if defined(__GNUC__) && ((__GNUC__ >= 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__SIZEOF_INT128__) && !defined(__ibmxl__) +#define PSNIP_SAFE_HAVE_128 +typedef __int128 psnip_safe_int128_t; +typedef unsigned __int128 psnip_safe_uint128_t; +#endif /* defined(__GNUC__) */ + +#if !defined(PSNIP_SAFE_NO_FIXED) +#define PSNIP_SAFE_HAVE_INT8_LARGER +#define PSNIP_SAFE_HAVE_UINT8_LARGER +typedef psnip_int16_t psnip_safe_int8_larger; +typedef psnip_uint16_t psnip_safe_uint8_larger; + +#define PSNIP_SAFE_HAVE_INT16_LARGER +typedef psnip_int32_t psnip_safe_int16_larger; +typedef psnip_uint32_t psnip_safe_uint16_larger; + +#define PSNIP_SAFE_HAVE_INT32_LARGER +typedef psnip_int64_t psnip_safe_int32_larger; +typedef psnip_uint64_t psnip_safe_uint32_larger; + +#if defined(PSNIP_SAFE_HAVE_128) +#define PSNIP_SAFE_HAVE_INT64_LARGER +typedef psnip_safe_int128_t psnip_safe_int64_larger; +typedef psnip_safe_uint128_t psnip_safe_uint64_larger; +#endif /* defined(PSNIP_SAFE_HAVE_128) */ +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ + +#define PSNIP_SAFE_HAVE_LARGER_SCHAR +#if PSNIP_SAFE_IS_LARGER(SCHAR_MAX, SHRT_MAX) +typedef short psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, INT_MAX) +typedef int psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, LONG_MAX) +typedef long psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, LLONG_MAX) +typedef long long psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (SCHAR_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_schar_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SCHAR +#endif + +#define PSNIP_SAFE_HAVE_LARGER_UCHAR +#if PSNIP_SAFE_IS_LARGER(UCHAR_MAX, USHRT_MAX) +typedef unsigned short psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, UINT_MAX) +typedef unsigned int psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffU) +typedef psnip_uint16_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (UCHAR_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_uchar_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_UCHAR +#endif + +#if CHAR_MIN == 0 && defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +#define PSNIP_SAFE_HAVE_LARGER_CHAR +typedef psnip_safe_uchar_larger psnip_safe_char_larger; +#elif CHAR_MIN < 0 && defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +#define PSNIP_SAFE_HAVE_LARGER_CHAR +typedef psnip_safe_schar_larger psnip_safe_char_larger; +#endif + +#define PSNIP_SAFE_HAVE_LARGER_SHRT +#if PSNIP_SAFE_IS_LARGER(SHRT_MAX, INT_MAX) +typedef int psnip_safe_short_larger; +#elif PSNIP_SAFE_IS_LARGER(SHRT_MAX, LONG_MAX) +typedef long psnip_safe_short_larger; +#elif PSNIP_SAFE_IS_LARGER(SHRT_MAX, LLONG_MAX) +typedef long long psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (SHRT_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_short_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SHRT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_USHRT +#if PSNIP_SAFE_IS_LARGER(USHRT_MAX, UINT_MAX) +typedef unsigned int psnip_safe_ushort_larger; +#elif PSNIP_SAFE_IS_LARGER(USHRT_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_ushort_larger; +#elif PSNIP_SAFE_IS_LARGER(USHRT_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (USHRT_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ushort_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_USHRT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_INT +#if PSNIP_SAFE_IS_LARGER(INT_MAX, LONG_MAX) +typedef long psnip_safe_int_larger; +#elif PSNIP_SAFE_IS_LARGER(INT_MAX, LLONG_MAX) +typedef long long psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (INT_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_int_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_INT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_UINT +#if PSNIP_SAFE_IS_LARGER(UINT_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_uint_larger; +#elif PSNIP_SAFE_IS_LARGER(UINT_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (UINT_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_uint_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_UINT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_LONG +#if PSNIP_SAFE_IS_LARGER(LONG_MAX, LLONG_MAX) +typedef long long psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (LONG_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_long_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_LONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_ULONG +#if PSNIP_SAFE_IS_LARGER(ULONG_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (ULONG_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ulong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_ULONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_LLONG +#if !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (LLONG_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_llong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_LLONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_ULLONG +#if !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (ULLONG_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ullong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_ULLONG +#endif + +#if defined(PSNIP_SAFE_SIZE_MAX) +#define PSNIP_SAFE_HAVE_LARGER_SIZE +#if PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, USHRT_MAX) +typedef unsigned short psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, UINT_MAX) +typedef unsigned int psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (PSNIP_SAFE_SIZE_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_size_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SIZE +#endif +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(signed char, schar) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned char, uchar) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +#if CHAR_MIN == 0 +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(char, char) +#else +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(char, char) +#endif +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SHORT) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(short, short) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_USHORT) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned short, ushort) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_INT) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(int, int) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_UINT) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned int, uint) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_LONG) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(long, long) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_ULONG) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned long, ulong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_LLONG) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(long long, llong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_ULLONG) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned long long, ullong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SIZE) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(size_t, size) +#endif + +#if !defined(PSNIP_SAFE_NO_FIXED) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int8_t, int8) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint8_t, uint8) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int16_t, int16) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint16_t, uint16) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int32_t, int32) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint32_t, uint32) +#if defined(PSNIP_SAFE_HAVE_128) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int64_t, int64) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint64_t, uint64) +#endif +#endif + +#endif /* !defined(PSNIP_SAFE_NO_PROMOTIONS) */ + +#define PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(T, name, op_name) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + return !__builtin_##op_name##_overflow(a, b, res); \ + } + +#define PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(T, name, op_name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + const psnip_safe_##name##_larger r = psnip_safe_larger_##name##_##op_name(a, b); \ + *res = (T) r; \ + return (r >= min) && (r <= max); \ + } + +#define PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(T, name, op_name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + const psnip_safe_##name##_larger r = psnip_safe_larger_##name##_##op_name(a, b); \ + *res = (T) r; \ + return (r <= max); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_ADD(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_add (T* res, T a, T b) { \ + psnip_safe_bool r = !( ((b > 0) && (a > (max - b))) || \ + ((b < 0) && (a < (min - b))) ); \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a + b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_ADD(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_add (T* res, T a, T b) { \ + *res = (T) (a + b); \ + return !PSNIP_SAFE_UNLIKELY((b > 0) && (a > (max - b))); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_SUB(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_sub (T* res, T a, T b) { \ + psnip_safe_bool r = !((b > 0 && a < (min + b)) || \ + (b < 0 && a > (max + b))); \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a - b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_SUB(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_sub (T* res, T a, T b) { \ + *res = a - b; \ + return !PSNIP_SAFE_UNLIKELY(b > a); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_MUL(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mul (T* res, T a, T b) { \ + psnip_safe_bool r = 1; \ + if (a > 0) { \ + if (b > 0) { \ + if (a > (max / b)) { \ + r = 0; \ + } \ + } else { \ + if (b < (min / a)) { \ + r = 0; \ + } \ + } \ + } else { \ + if (b > 0) { \ + if (a < (min / b)) { \ + r = 0; \ + } \ + } else { \ + if ( (a != 0) && (b < (max / a))) { \ + r = 0; \ + } \ + } \ + } \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a * b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_MUL(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mul (T* res, T a, T b) { \ + *res = (T) (a * b); \ + return !PSNIP_SAFE_UNLIKELY((a > 0) && (b > 0) && (a > (max / b))); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_DIV(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_div (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else if (PSNIP_SAFE_UNLIKELY(a == min && b == -1)) { \ + *res = min; \ + return 0; \ + } else { \ + *res = (T) (a / b); \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_DIV(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_div (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else { \ + *res = a / b; \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_MOD(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mod (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else if (PSNIP_SAFE_UNLIKELY(a == min && b == -1)) { \ + *res = min; \ + return 0; \ + } else { \ + *res = (T) (a % b); \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_MOD(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mod (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else { \ + *res = a % b; \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_NEG(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_neg (T* res, T value) { \ + psnip_safe_bool r = value != min; \ + *res = PSNIP_SAFE_LIKELY(r) ? -value : max; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_INTSAFE(T, name, op, isf) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op (T* res, T a, T b) { \ + return isf(a, b, res) == S_OK; \ + } + +#if CHAR_MIN == 0 +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, add, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, sub, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, mul, CHAR_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(char, char, CHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(char, char, CHAR_MAX) +#else /* CHAR_MIN != 0 */ +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, add, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, sub, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, mul, CHAR_MIN, CHAR_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(char, char, CHAR_MIN, CHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(char, char, CHAR_MIN, CHAR_MAX) +#endif + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, add, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, sub, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, mul, SCHAR_MIN, SCHAR_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(signed char, schar, SCHAR_MIN, SCHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(signed char, schar, SCHAR_MIN, SCHAR_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, add, UCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, sub, UCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, mul, UCHAR_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned char, uchar, UCHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned char, uchar, UCHAR_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SHORT) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, add, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, sub, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, mul, SHRT_MIN, SHRT_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(short, short, SHRT_MIN, SHRT_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(short, short, SHRT_MIN, SHRT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, add, UShortAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, sub, UShortSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, mul, UShortMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_USHORT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, add, USHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, sub, USHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, mul, USHRT_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned short, ushort, USHRT_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned short, ushort, USHRT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, add, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, sub, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, mul, INT_MIN, INT_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(int, int, INT_MIN, INT_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(int, int, INT_MIN, INT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, add, UIntAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, sub, UIntSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, mul, UIntMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, add, UINT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, sub, UINT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, mul, UINT_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned int, uint, UINT_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned int, uint, UINT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_LONG) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, add, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, sub, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, mul, LONG_MIN, LONG_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(long, long, LONG_MIN, LONG_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(long, long, LONG_MIN, LONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, add, ULongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, sub, ULongSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, mul, ULongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_ULONG) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, add, ULONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, sub, ULONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, mul, ULONG_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned long, ulong, ULONG_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned long, ulong, ULONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_LLONG) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, add, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, sub, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, mul, LLONG_MIN, LLONG_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(long long, llong, LLONG_MIN, LLONG_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(long long, llong, LLONG_MIN, LLONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, add, ULongLongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, sub, ULongLongSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, mul, ULongLongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_ULLONG) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, add, ULLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, sub, ULLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, mul, ULLONG_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned long long, ullong, ULLONG_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned long long, ullong, ULLONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, add, SizeTAdd) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, sub, SizeTSub) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, mul, SizeTMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SIZE) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, add, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, sub, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, mul, PSNIP_SAFE__SIZE_MAX_RT) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) + +#if !defined(PSNIP_SAFE_NO_FIXED) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT8) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, add, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, sub, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, mul, (-0x7fLL-1), 0x7f) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT8) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, add, 0xff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, sub, 0xff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, mul, 0xff) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint8_t, uint8, 0xff) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint8_t, uint8, 0xff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT16) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, add, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, sub, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, mul, (-32767-1), 0x7fff) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int16_t, int16, (-32767-1), 0x7fff) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int16_t, int16, (-32767-1), 0x7fff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, add, UShortAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, sub, UShortSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, mul, UShortMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT16) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, add, 0xffff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, sub, 0xffff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, mul, 0xffff) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint16_t, uint16, 0xffff) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint16_t, uint16, 0xffff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT32) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, add, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, sub, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, mul, (-0x7fffffffLL-1), 0x7fffffffLL) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, add, UIntAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, sub, UIntSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, mul, UIntMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT32) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, add, 0xffffffffUL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, sub, 0xffffffffUL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, mul, 0xffffffffUL) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint32_t, uint32, 0xffffffffUL) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint32_t, uint32, 0xffffffffUL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT64) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, add, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, sub, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, mul, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, add, ULongLongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, sub, ULongLongSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, mul, ULongLongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT64) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, add, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, sub, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, mul, 0xffffffffffffffffULL) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint64_t, uint64, 0xffffffffffffffffULL) + +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ + +#define PSNIP_SAFE_C11_GENERIC_SELECTION(res, op) \ + _Generic((*res), \ + char: psnip_safe_char_##op, \ + unsigned char: psnip_safe_uchar_##op, \ + short: psnip_safe_short_##op, \ + unsigned short: psnip_safe_ushort_##op, \ + int: psnip_safe_int_##op, \ + unsigned int: psnip_safe_uint_##op, \ + long: psnip_safe_long_##op, \ + unsigned long: psnip_safe_ulong_##op, \ + long long: psnip_safe_llong_##op, \ + unsigned long long: psnip_safe_ullong_##op) + +#define PSNIP_SAFE_C11_GENERIC_BINARY_OP(op, res, a, b) \ + PSNIP_SAFE_C11_GENERIC_SELECTION(res, op)(res, a, b) +#define PSNIP_SAFE_C11_GENERIC_UNARY_OP(op, res, v) \ + PSNIP_SAFE_C11_GENERIC_SELECTION(res, op)(res, v) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +#define psnip_safe_add(res, a, b) !__builtin_add_overflow(a, b, res) +#define psnip_safe_sub(res, a, b) !__builtin_sub_overflow(a, b, res) +#define psnip_safe_mul(res, a, b) !__builtin_mul_overflow(a, b, res) +#define psnip_safe_div(res, a, b) !__builtin_div_overflow(a, b, res) +#define psnip_safe_mod(res, a, b) !__builtin_mod_overflow(a, b, res) +#define psnip_safe_neg(res, v) PSNIP_SAFE_C11_GENERIC_UNARY_OP (neg, res, v) + +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +/* The are no fixed-length or size selections because they cause an + * error about _Generic specifying two compatible types. Hopefully + * this doesn't cause problems on exotic platforms, but if it does + * please let me know and I'll try to figure something out. */ + +#define psnip_safe_add(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(add, res, a, b) +#define psnip_safe_sub(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(sub, res, a, b) +#define psnip_safe_mul(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(mul, res, a, b) +#define psnip_safe_div(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(div, res, a, b) +#define psnip_safe_mod(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(mod, res, a, b) +#define psnip_safe_neg(res, v) PSNIP_SAFE_C11_GENERIC_UNARY_OP (neg, res, v) +#endif + +#if !defined(PSNIP_SAFE_HAVE_BUILTINS) && (defined(PSNIP_SAFE_EMULATE_NATIVE) || defined(PSNIP_BUILTIN_EMULATE_NATIVE)) +# define __builtin_sadd_overflow(a, b, res) (!psnip_safe_int_add(res, a, b)) +# define __builtin_saddl_overflow(a, b, res) (!psnip_safe_long_add(res, a, b)) +# define __builtin_saddll_overflow(a, b, res) (!psnip_safe_llong_add(res, a, b)) +# define __builtin_uadd_overflow(a, b, res) (!psnip_safe_uint_add(res, a, b)) +# define __builtin_uaddl_overflow(a, b, res) (!psnip_safe_ulong_add(res, a, b)) +# define __builtin_uaddll_overflow(a, b, res) (!psnip_safe_ullong_add(res, a, b)) + +# define __builtin_ssub_overflow(a, b, res) (!psnip_safe_int_sub(res, a, b)) +# define __builtin_ssubl_overflow(a, b, res) (!psnip_safe_long_sub(res, a, b)) +# define __builtin_ssubll_overflow(a, b, res) (!psnip_safe_llong_sub(res, a, b)) +# define __builtin_usub_overflow(a, b, res) (!psnip_safe_uint_sub(res, a, b)) +# define __builtin_usubl_overflow(a, b, res) (!psnip_safe_ulong_sub(res, a, b)) +# define __builtin_usubll_overflow(a, b, res) (!psnip_safe_ullong_sub(res, a, b)) + +# define __builtin_smul_overflow(a, b, res) (!psnip_safe_int_mul(res, a, b)) +# define __builtin_smull_overflow(a, b, res) (!psnip_safe_long_mul(res, a, b)) +# define __builtin_smulll_overflow(a, b, res) (!psnip_safe_llong_mul(res, a, b)) +# define __builtin_umul_overflow(a, b, res) (!psnip_safe_uint_mul(res, a, b)) +# define __builtin_umull_overflow(a, b, res) (!psnip_safe_ulong_mul(res, a, b)) +# define __builtin_umulll_overflow(a, b, res) (!psnip_safe_ullong_mul(res, a, b)) +#endif + +#endif /* !defined(PSNIP_SAFE_H) */ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/strptime.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/strptime.h new file mode 100644 index 0000000000000000000000000000000000000000..764a4440ee4973dd6506c3c5d467e5c5a260e428 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/strptime.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/visibility.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// A less featureful implementation of strptime() for platforms lacking +// a standard implementation (e.g. Windows). +ARROW_EXPORT char* arrow_strptime(const char* __restrict, const char* __restrict, + struct tm* __restrict); + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash.h new file mode 100644 index 0000000000000000000000000000000000000000..a33cdf8610dd6aa3b22bc24eef8ea3f871b8be9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash.h @@ -0,0 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/vendored/xxhash/xxhash.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h new file mode 100644 index 0000000000000000000000000000000000000000..a18e8c762daaaaa0db8974232a876315ed6ca6f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h @@ -0,0 +1,6773 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (C) 2012-2021 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/*! + * @mainpage xxHash + * + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed + * limits. + * + * It is proposed in four flavors, in three families: + * 1. @ref XXH32_family + * - Classic 32-bit hash function. Simple, compact, and runs on almost all + * 32-bit and 64-bit systems. + * 2. @ref XXH64_family + * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most + * 64-bit systems (but _not_ 32-bit systems). + * 3. @ref XXH3_family + * - Modern 64-bit and 128-bit hash function family which features improved + * strength and performance across the board, especially on smaller data. + * It benefits greatly from SIMD and 64-bit without requiring it. + * + * Benchmarks + * --- + * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. + * The open source benchmark program is compiled with clang v10.0 using -O3 flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | + * | -------------------- | ------- | ----: | ---------------: | ------------------: | + * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | + * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | + * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | + * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | + * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | + * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | + * | RAM sequential read | | N/A | 28.0 GB/s | N/A | + * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | + * | City64 | | 64 | 22.0 GB/s | 76.6 | + * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | + * | City128 | | 128 | 21.7 GB/s | 57.7 | + * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | + * | XXH64() | | 64 | 19.4 GB/s | 71.0 | + * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | + * | Mum | | 64 | 18.0 GB/s | 67.0 | + * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | + * | XXH32() | | 32 | 9.7 GB/s | 71.9 | + * | City32 | | 32 | 9.1 GB/s | 66.0 | + * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | + * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | + * | SipHash* | | 64 | 3.0 GB/s | 43.2 | + * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | + * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | + * | FNV64 | | 64 | 1.2 GB/s | 62.7 | + * | Blake2* | | 256 | 1.1 GB/s | 5.1 | + * | SHA1* | | 160 | 0.8 GB/s | 5.6 | + * | MD5* | | 128 | 0.6 GB/s | 7.8 | + * @note + * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, + * even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic + * by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for small + * data. For more accurate information, see the wiki. + * - More benchmarks and strength tests are found on the wiki: + * https://github.com/Cyan4973/xxHash/wiki + * + * Usage + * ------ + * All xxHash variants use a similar API. Changing the algorithm is a trivial + * substitution. + * + * @pre + * For functions which take an input and length parameter, the following + * requirements are assumed: + * - The range from [`input`, `input + length`) is valid, readable memory. + * - The only exception is if the `length` is `0`, `input` may be `NULL`. + * - For C++, the objects must have the *TriviallyCopyable* property, as the + * functions access bytes directly as if it was an array of `unsigned char`. + * + * @anchor single_shot_example + * **Single Shot** + * + * These functions are stateless functions which hash a contiguous block of memory, + * immediately returning the result. They are the easiest and usually the fastest + * option. + * + * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which hashes a null terminated string with XXH32(). + * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * { + * // NULL pointers are only valid if the length is zero + * size_t length = (string == NULL) ? 0 : strlen(string); + * return XXH32(string, length, seed); + * } + * @endcode + * + * @anchor streaming_example + * **Streaming** + * + * These groups of functions allow incremental hashing of unknown size, even + * more than what would fit in a size_t. + * + * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() + * + * @code{.c} + * #include + * #include + * #include "xxhash.h" + * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). + * XXH64_hash_t hashFile(FILE* f) + * { + * // Allocate a state struct. Do not just use malloc() or new. + * XXH3_state_t* state = XXH3_createState(); + * assert(state != NULL && "Out of memory!"); + * // Reset the state to start a new hashing session. + * XXH3_64bits_reset(state); + * char buffer[4096]; + * size_t count; + * // Read the file in chunks + * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + * // Run update() as many times as necessary to process the data + * XXH3_64bits_update(state, buffer, count); + * } + * // Retrieve the finalized hash. This will not change the state. + * XXH64_hash_t result = XXH3_64bits_digest(state); + * // Free the state. Do not use free(). + * XXH3_freeState(state); + * return result; + * } + * @endcode + * + * @file xxhash.h + * xxHash prototypes and implementation + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* **************************** + * INLINE mode + ******************************/ +/*! + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Gives access to internal state declaration, required for static allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ +# define XXH_STATIC_LINKING_ONLY +/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ + +/*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ +# define XXH_IMPLEMENTATION +/* Do not undef XXH_IMPLEMENTATION for Doxygen */ + +/*! + * @brief Exposes the implementation and marks all functions as `inline`. + * + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * @code{.c} + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * @endcode + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ +# define XXH_INLINE_ALL +# undef XXH_INLINE_ALL +/*! + * @brief Exposes the implementation without marking functions as inline. + */ +# define XXH_PRIVATE_API +# undef XXH_PRIVATE_API +/*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* note: this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif + + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ + /* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ +# undef XXH_versionNumber + /* XXH32 */ +# undef XXH32 +# undef XXH32_createState +# undef XXH32_freeState +# undef XXH32_reset +# undef XXH32_update +# undef XXH32_digest +# undef XXH32_copyState +# undef XXH32_canonicalFromHash +# undef XXH32_hashFromCanonical + /* XXH64 */ +# undef XXH64 +# undef XXH64_createState +# undef XXH64_freeState +# undef XXH64_reset +# undef XXH64_update +# undef XXH64_digest +# undef XXH64_copyState +# undef XXH64_canonicalFromHash +# undef XXH64_hashFromCanonical + /* XXH3_64bits */ +# undef XXH3_64bits +# undef XXH3_64bits_withSecret +# undef XXH3_64bits_withSeed +# undef XXH3_64bits_withSecretandSeed +# undef XXH3_createState +# undef XXH3_freeState +# undef XXH3_copyState +# undef XXH3_64bits_reset +# undef XXH3_64bits_reset_withSeed +# undef XXH3_64bits_reset_withSecret +# undef XXH3_64bits_update +# undef XXH3_64bits_digest +# undef XXH3_generateSecret + /* XXH3_128bits */ +# undef XXH128 +# undef XXH3_128bits +# undef XXH3_128bits_withSeed +# undef XXH3_128bits_withSecret +# undef XXH3_128bits_reset +# undef XXH3_128bits_reset_withSeed +# undef XXH3_128bits_reset_withSecret +# undef XXH3_128bits_reset_withSecretandSeed +# undef XXH3_128bits_update +# undef XXH3_128bits_digest +# undef XXH128_isEqual +# undef XXH128_cmp +# undef XXH128_canonicalFromHash +# undef XXH128_hashFromCanonical + /* Finally, free the namespace itself */ +# undef XXH_NAMESPACE + + /* employ the namespace for XXH_INLINE_ALL */ +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. + */ +# define XXH_IPREF(Id) XXH_NAMESPACE ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +/*! @brief Marks a global symbol. */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* XXH32 */ +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +/* XXH64 */ +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +/* XXH3_64bits */ +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) +/* XXH3_128bits */ +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) +#endif + + +/* ************************************* +* Compiler specifics +***************************************/ + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#if defined (__GNUC__) +# define XXH_CONSTF __attribute__((const)) +# define XXH_PUREF __attribute__((pure)) +# define XXH_MALLOCF __attribute__((malloc)) +#else +# define XXH_CONSTF /* disable */ +# define XXH_PUREF +# define XXH_MALLOCF +#endif + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 8 +#define XXH_VERSION_RELEASE 2 +/*! @brief Version number, encoded as two digits each */ +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) + +/*! + * @brief Obtains the xxHash version. + * + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. + * + * @return @ref XXH_VERSION_NUMBER of the invoked library. + */ +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); + + +/* **************************** +* Common basic types +******************************/ +#include /* size_t */ +/*! + * @brief Exit code for the streaming API. + */ +typedef enum { + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ +} XXH_errorcode; + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* Don't show include */ +/*! + * @brief An unsigned 32-bit integer. + * + * Not necessarily defined to `uint32_t` but functionally equivalent. + */ +typedef uint32_t XXH32_hash_t; + +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint32_t XXH32_hash_t; + +#else +# include +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# elif ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform: need a 32-bit type" +# endif +#endif + +/*! + * @} + * + * @defgroup XXH32_family XXH32 family + * @ingroup public + * Contains functions used in the classic 32-bit xxHash algorithm. + * + * @note + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that the @ref XXH3_family provides competitive speed for both 32-bit + * and 64-bit systems, and offers true 64/128 bit hash results. + * + * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families + * @see @ref XXH32_impl for implementation details + * @{ + */ + +/*! + * @brief Calculates the 32-bit hash of @p input using xxHash32. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * See @ref single_shot_example "Single Shot Example" for an example. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 32-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 32-bit hash value. + * + * @see + * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); + +#ifndef XXH_NO_STREAM +/*! + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * @see streaming_example at the top of @ref xxhash.h for an example. + */ + +/*! + * @typedef struct XXH32_state_s XXH32_state_t + * @brief The opaque state struct for the XXH32 streaming API. + * + * @see XXH32_state_s for details. + */ +typedef struct XXH32_state_s XXH32_state_t; + +/*! + * @brief Allocates an @ref XXH32_state_t. + * + * Must be freed with XXH32_freeState(). + * @return An allocated XXH32_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); +/*! + * @brief Frees an @ref XXH32_state_t. + * + * Must be allocated with XXH32_createState(). + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Copies one @ref XXH32_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +/*! + * @brief Resets an @ref XXH32_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH32_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 32-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH32_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH32_state_t. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash32 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + */ + +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +typedef struct { + unsigned char digest[4]; /*!< Hash bytes, big endian */ +} XXH32_canonical_t; + +/*! + * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. + * + * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param hash The @ref XXH32_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); + +/*! + * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. + * + * @param src The @ref XXH32_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + + +/*! @cond Doxygen ignores this part */ +#ifdef __has_attribute +# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define XXH_HAS_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ +#define XXH_C23_VN 201711L +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) +# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define XXH_HAS_C_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define XXH_HAS_CPP_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute + * introduced in CPP17 and C23. + * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough + */ +#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_ATTRIBUTE(__fallthrough__) +# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) +#else +# define XXH_FALLTHROUGH /* fallthrough */ +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ +#if XXH_HAS_ATTRIBUTE(noescape) +# define XXH_NOESCAPE __attribute__((noescape)) +#else +# define XXH_NOESCAPE +#endif +/*! @endcond */ + + +/*! + * @} + * @ingroup public + * @{ + */ + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* don't include */ +/*! + * @brief An unsigned 64-bit integer. + * + * Not necessarily defined to `uint64_t` but functionally equivalent. + */ +typedef uint64_t XXH64_hash_t; +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t XXH64_hash_t; +#else +# include +# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL + /* LP64 ABI says uint64_t is unsigned long */ + typedef unsigned long XXH64_hash_t; +# else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +# endif +#endif + +/*! + * @} + * + * @defgroup XXH64_family XXH64 family + * @ingroup public + * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. + * + * @note + * XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. + */ + +/*! + * @brief Calculates the 64-bit hash of @p input using xxHash64. + * + * This function usually runs faster on 64-bit systems, but slower on 32-bit + * systems (see benchmark). + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit hash. + * + * @see + * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/*! + * @brief The opaque state struct for the XXH64 streaming API. + * + * @see XXH64_state_s for details. + */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + +/*! + * @brief Allocates an @ref XXH64_state_t. + * + * Must be freed with XXH64_freeState(). + * @return An allocated XXH64_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); + +/*! + * @brief Frees an @ref XXH64_state_t. + * + * Must be allocated with XXH64_createState(). + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH64_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +/*! + * @brief Resets an @ref XXH64_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH64_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH64_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH64_state_t. + * + * @note + * Calling XXH64_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash64 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ +/******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; + +/*! + * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. + * + * @param dst The @ref XXH64_canonical_t pointer to be stored to. + * @param hash The @ref XXH64_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); + +/*! + * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. + * + * @param src The @ref XXH64_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); + +#ifndef XXH_NO_XXH3 + +/*! + * @} + * ************************************************************************ + * @defgroup XXH3_family XXH3 family + * @ingroup public + * @{ + * + * XXH3 is a more recent hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: + * + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * Compared to XXH64, expect XXH3 to run approximately + * ~2x faster on large inputs and >3x faster on small ones, + * exact differences vary depending on platform. + * + * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, + * but does not require it. + * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 + * at competitive speeds, even without vector support. Further details are + * explained in the implementation. + * + * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD + * implementations for many common platforms: + * - AVX512 + * - AVX2 + * - SSE2 + * - ARM NEON + * - WebAssembly SIMD128 + * - POWER8 VSX + * - s390x ZVector + * This can be controlled via the @ref XXH_VECTOR macro, but it automatically + * selects the best version according to predefined macros. For the x86 family, an + * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. + * + * XXH3 implementation is portable: + * it has a generic C90 formulation that can be compiled on any platform, + * all implementations generate exactly the same hash value on all platforms. + * Starting from v0.8.0, it's also labelled "stable", meaning that + * any future version will also generate the same hash value. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * + * When only 64 bits are needed, prefer invoking the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + */ +/*-********************************************************************** +* XXH3 64-bit variant +************************************************************************/ + +/*! + * @brief 64-bit unseeded variant of XXH3. + * + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants + * @see + * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief 64-bit seeded variant of XXH3 + * + * This variant generates a custom secret on the fly based on default secret + * altered using the `seed` value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * @param input The data to hash + * @param length The length + * @param seed The 64-bit seed to alter the state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ +#define XXH3_SECRET_SIZE_MIN 136 + +/*! + * @brief 64-bit variant of XXH3 with a custom "secret". + * + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing "XXH3_generateSecret()" instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + */ + +/*! + * @brief The state struct for the XXH3 streaming API. + * + * @see XXH3_state_s for details. + */ +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH3_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); + +/*! + * XXH3_64bits_reset_withSecret(): + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 64-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* note : canonical representation of XXH3 is the same as XXH64 + * since they both produce XXH64_hash_t values */ + + +/*-********************************************************************** +* XXH3 128-bit variant +************************************************************************/ + +/*! + * @brief The return value from 128-bit hashes. + * + * Stored in little endian order, although the fields themselves are in native + * endianness. + */ +typedef struct { + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ +} XXH128_hash_t; + +/*! + * @brief Unseeded 128-bit variant of XXH3 + * + * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead + * for shorter inputs. + * + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants + * @see + * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); +/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + * + * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). + * Use already declared XXH3_createState() and XXH3_freeState(). + * + * All reset and streaming functions have same meaning as their 64-bit counterpart. + */ + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 128-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* Following helper functions make it possible to compare XXH128_hast_t values. + * Since XXH128_hash_t is a structure, this capability is not offered by the language. + * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + +/*! + * XXH128_isEqual(): + * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/*! + * @brief Compares two @ref XXH128_hash_t + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * @return: >0 if *h128_1 > *h128_2 + * =0 if *h128_1 == *h128_2 + * <0 if *h128_1 < *h128_2 + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); + + +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; + + +/*! + * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. + * + * @param dst The @ref XXH128_canonical_t pointer to be stored to. + * @param hash The @ref XXH128_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); + +/*! + * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. + * + * @param src The @ref XXH128_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); + + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ + +/*! + * @} + */ +#endif /* XXHASH_H_5627135585666179 */ + + + +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ + +/* + * These definitions are only present to allow static allocation + * of XXH states, on stack or in a struct, for example. + * Never **ever** access their members directly. + */ + +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ +struct XXH32_state_s { + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ +}; /* typedef'd to XXH32_state_t */ + + +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ +struct XXH64_state_s { + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ +}; /* typedef'd to XXH64_state_t */ + +#ifndef XXH_NO_XXH3 + +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ +/* In C++ alignas() is a keyword */ +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +/*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ +#define XXH3_INTERNALBUFFER_SIZE 256 + +/*! + * @internal + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ +#define XXH3_SECRET_DEFAULT_SIZE 192 + +/*! + * @internal + * @brief Structure for XXH3 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. + * Otherwise it is an opaque type. + * Never use this definition in combination with dynamic library. + * This allows fields to safely be changed in the future. + * + * @note ** This structure has a strict alignment requirement of 64 bytes!! ** + * Do not allocate this with `malloc()` or `new`, + * it will not be sufficiently aligned. + * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. + * + * Typedef'd to @ref XXH3_state_t. + * Do never access the members of this struct directly. + * + * @see XXH3_INITSTATE() for stack initialization. + * @see XXH3_createState(), XXH3_freeState(). + * @see XXH32_state_s, XXH64_state_s + */ +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ + XXH32_hash_t bufferedSize; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ + size_t nbStripesSoFar; + /*!< Number or stripes processed. */ + XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ + size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ + size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ + XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ + XXH64_hash_t reserved64; + /*!< Reserved field. */ + const unsigned char* extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER + +/*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret mode. + * This operation isn't necessary when the state is created with XXH3_createState(). + * Note that this doesn't prepare the state for a streaming operation, + * it's still necessary to use XXH3_NNbits_reset*() afterwards. + */ +#define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + } while(0) + + +/*! + * simple alias to pre-selected XXH3_128bits variant + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/*! + * XXH3_generateSecret(): + * + * Derive a high-entropy secret from any user-defined content, named customSeed. + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection + * than 64-bit seed, as it becomes much more difficult for an external actor to + * guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length @p secretSize into an + * already allocated buffer @p secretBuffer. + * + * The generated secret can then be used with any `*_withSecret()` variant. + * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), + * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() + * are part of this list. They all accept a `secret` parameter + * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can + * be employed to ensure proper quality. + * + * @p customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch + * of zeroes. The resulting `secret` will nonetheless provide all required qualities. + * + * @pre + * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + * + * Example code: + * @code{.c} + * #include + * #include + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Hashes argv[2] using the entropy from argv[1]. + * int main(int argc, char* argv[]) + * { + * char secret[XXH3_SECRET_SIZE_MIN]; + * if (argv != 3) { return 1; } + * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); + * XXH64_hash_t h = XXH3_64bits_withSecret( + * argv[2], strlen(argv[2]), + * secret, sizeof(secret) + * ); + * printf("%016llx\n", (unsigned long long) h); + * } + * @endcode + */ +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); + +/*! + * @brief Generate the same secret as the _withSeed() variants. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * + * Example C++ `std::string` hash class: + * @code{.cpp} + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Slow, seeds each time + * class HashSlow { + * XXH64_hash_t seed; + * public: + * HashSlow(XXH64_hash_t s) : seed{s} {} + * size_t operator()(const std::string& x) const { + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; + * } + * }; + * // Fast, caches the seeded secret for future uses. + * class HashFast { + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; + * public: + * HashFast(XXH64_hash_t s) { + * XXH3_generateSecret_fromSeed(secret, seed); + * } + * size_t operator()(const std::string& x) const { + * return size_t{ + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) + * }; + * } + * }; + * @endcode + * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes + * @param seed The seed to seed the state. + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); + +/*! + * These variants generate hash values using either + * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) + * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @p secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, via its impact to the seed. + * + * This is not guaranteed when using the secret directly in "small data" scenarios, + * because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#ifndef XXH_NO_STREAM +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#endif /* !XXH_NO_STREAM */ + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION +#endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be hosted inside xxhash.c. + * + * However, inlining requires implementation to be visible to the compiler, + * hence be included alongside the header. + * Previously, implementation was hosted inside xxhash.c, + * which was then #included when inlining was activated. + * This construction created issues with a few build and install systems, + * as it required xxhash.c to be stored in /include directory. + * + * xxHash implementation is now directly integrated within xxhash.h. + * As a consequence, xxhash.c is no longer needed in /include. + * + * xxhash.c is still available and is still useful. + * In a "normal" setup, when xxhash is not inlined, + * xxhash.h only exposes the prototypes and public symbols, + * while xxhash.c can be built into an object file xxhash.o + * which can then be linked into the final binary. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ + +/*! + * @defgroup tuning Tuning parameters + * @{ + * + * Various macros to control xxHash's behavior. + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. + */ +# define XXH_NO_LONG_LONG +# undef XXH_NO_LONG_LONG /* don't actually */ +/*! + * @brief Controls how unaligned memory is accessed. + * + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers will + * eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences an + * unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's the + * only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. However, some compilers + * will emit literal byteshifts even if the target supports unaligned access. + * + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may cause + * another to read garbage data or even crash. + * + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) + */ +# define XXH_FORCE_MEMORY_ACCESS 0 + +/*! + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. + * + * xxHash, when compiled, tends to result in a rather large binary size. This + * is mostly due to heavy usage to forced inlining and constant folding of the + * @ref XXH3_family to increase performance. + * + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed + * comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies the + * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, + * and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use the + * streaming API. + */ +# define XXH_SIZE_OPT 0 + +/*! + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() + * and XXH64() only). + * + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, + * but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips + * which are platforms known to offer good unaligned memory accesses performance. + * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +# define XXH_FORCE_ALIGN_CHECK 0 + +/*! + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. + */ +# define XXH_NO_INLINE_HINTS 0 + +/*! + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). + * + * However, if the secret size is not known, it doesn't have any benefit. This + * happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. + * + * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers + * that are *sometimes* force inline on -Og, and it is impossible to automatically + * detect this optimization level. + */ +# define XXH3_INLINE_SECRET 0 + +/*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the finalizer. + * This is generally preferable for performance, + * but depending on exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ +# define XXH32_ENDJMP 0 + +/*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use this. + */ +# define XXH_OLD_NAMES +# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + +/*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, disabling + * the streaming functions can improve code size significantly, especially with + * the @ref XXH3_family which tends to make constant folded copies of itself. + */ +# define XXH_NO_STREAM +# undef XXH_NO_STREAM /* don't actually */ +#endif /* XXH_DOXYGEN */ +/*! + * @} + */ + +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy + * which for some reason does unaligned loads. */ +# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +#ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ +# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) +# define XXH_SIZE_OPT 1 +# else +# define XXH_SIZE_OPT 0 +# endif +#endif + +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ +# if XXH_SIZE_OPT >= 1 || \ + defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +#ifndef XXH_NO_INLINE_HINTS +# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +#ifndef XXH3_INLINE_SECRET +# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ + || !defined(XXH_INLINE_ALL) +# define XXH3_INLINE_SECRET 0 +# else +# define XXH3_INLINE_SECRET 1 +# endif +#endif + +#ifndef XXH32_ENDJMP +/* generally preferable for performance */ +# define XXH32_ENDJMP 0 +#endif + +/*! + * @defgroup impl Implementation + * @{ + */ + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +#if defined(XXH_NO_STREAM) +/* nothing */ +#elif defined(XXH_NO_STDLIB) + +/* When requesting to disable any mention of stdlib, + * the library loses the ability to invoked malloc / free. + * In practice, it means that functions like `XXH*_createState()` + * will always fail, and return NULL. + * This flag is useful in situations where + * xxhash.h is integrated into some kernel, embedded or limited environment + * without access to dynamic allocation. + */ + +static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } +static void XXH_free(void* p) { (void)p; } + +#else + +/* + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ +static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } + +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ +static void XXH_free(void* p) { free(p); } + +#endif /* XXH_NO_STDLIB */ + +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} + +#include /* ULLONG_MAX */ + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined(_MSC_VER) /* Visual Studio */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +#else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +#endif + +#if XXH3_INLINE_SECRET +# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE +#else +# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE +#endif + + +/* ************************************* +* Debug +***************************************/ +/*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif +#endif + +#if (XXH_DEBUGLEVEL>=1) +# include /* note: can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# if defined(__INTEL_COMPILER) +# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) +# else +# define XXH_ASSERT(c) XXH_ASSUME(c) +# endif +#endif + +/* note: use after variable declarations */ +#ifndef XXH_STATIC_ASSERT +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) +# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# else +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) +# endif +# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) +#endif + +/*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ +#if defined(__GNUC__) || defined(__clang__) +# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) +#else +# define XXH_COMPILER_GUARD(var) ((void)0) +#endif + +/* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ +#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) +# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) +#else +# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) +#endif + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; +#endif +typedef XXH32_hash_t xxh_u32; + +#ifdef XXH_OLD_NAMES +# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif + +/* *** Memory access *** */ + +/*! + * @internal + * @fn xxh_u32 XXH_read32(const void* ptr) + * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit native endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32(const void* ptr) + * @brief Reads an unaligned 32-bit little endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readBE32(const void* ptr) + * @brief Reads an unaligned 32-bit big endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit big endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) + * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is + * always @ref XXH_alignment::XXH_unaligned. + * + * @param ptr The pointer to read from. + * @param align Whether @p ptr is aligned. + * @pre + * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte + * aligned. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* *** Endianness *** */ + +/*! + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. + */ +#ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +/*! + * @internal + * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. + * + * Most compilers will constant fold this. + */ +static int XXH_isLittleEndian(void) +{ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif + + + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 +#endif + + + +/* + * C23 and future versions have standard "unreachable()". + * Once it has been implemented reliably we can add it as an + * additional case: + * + * ``` + * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) + * # include + * # ifdef unreachable + * # define XXH_UNREACHABLE() unreachable() + * # endif + * #endif + * ``` + * + * Note C++23 also has std::unreachable() which can be detected + * as follows: + * ``` + * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) + * # include + * # define XXH_UNREACHABLE() std::unreachable() + * #endif + * ``` + * NB: `__cpp_lib_unreachable` is defined in the `` header. + * We don't use that as including `` in `extern "C"` blocks + * doesn't work on GCC12 + */ + +#if XXH_HAS_BUILTIN(__builtin_unreachable) +# define XXH_UNREACHABLE() __builtin_unreachable() + +#elif defined(_MSC_VER) +# define XXH_UNREACHABLE() __assume(0) + +#else +# define XXH_UNREACHABLE() +#endif + +#if XXH_HAS_BUILTIN(__builtin_assume) +# define XXH_ASSUME(c) __builtin_assume(c) +#else +# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } +#endif + +/*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) +#endif + +/*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* *************************** +* Memory reads +*****************************/ + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +typedef enum { + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ +} XXH_alignment; + +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} + +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} + + +/* ************************************* +* Misc +***************************************/ +/*! @ingroup public */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +/*! + * @} + * @defgroup XXH32_impl XXH32 implementation + * @ingroup impl + * + * Details on the XXH32 implementation. + * @{ + */ + /* #define instead of static const, to be used as initializers */ +#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ +#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ +#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ +#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ +#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif + +/*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) +{ + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; +#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing + * the loop. NEON is only faster on the A53, and with the newer cores, it is less + * than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. + */ + XXH_COMPILER_GUARD(acc); +#endif + return acc; +} + +/*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param hash The hash to avalanche. + * @return The avalanched hash. + */ +static xxh_u32 XXH32_avalanche(xxh_u32 hash) +{ + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + * @see XXH64_finalize(). + */ +static XXH_PUREF xxh_u32 +XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1 do { \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ +} while (0) + +#define XXH_PROCESS4 do { \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ +} while (0) + + if (ptr==NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { + len &= 15; + while (len >= 4) { + XXH_PROCESS4; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1; + --len; + } + return XXH32_avalanche(hash); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 8: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 4: XXH_PROCESS4; + return XXH32_avalanche(hash); + + case 13: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 9: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 5: XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 14: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 10: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 15: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 11: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 7: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 3: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 2: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 1: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 0: return XXH32_avalanche(hash); + } + XXH_ASSERT(0); + return hash; /* reaching this point is deemed impossible */ + } +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + xxh_u32 h32; + + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=16) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + XXH_PRIME32_5; + } + + h32 += (xxh_u32)len; + + return XXH32_finalize(h32, input, len&15, align); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) +{ +#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +#endif +} + + + +/******* Hash streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + + do { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) +{ + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v[0], 1) + + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + + XXH_rotl32(state->v[3], 18); + } else { + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! + * @ingroup XXH32_family + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * + * The canonical representation uses big endian convention, the same convention + * as human-readable numbers (large digits first). + * + * This way, hash values can be written into a file or buffer, remaining + * comparable across different systems. + * + * The following functions allow transformation of hash values to and from their + * canonical format. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ +/*! + * @} + * @ingroup impl + * @{ + */ +/******* Memory access *******/ + +typedef XXH64_hash_t xxh_u64; + +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + return *(const xxh_u64*) memPtr; +} + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static xxh_u64 XXH_swap64(xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); +} + +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); +} + + +/******* xxh64 *******/ +/*! + * @} + * @defgroup XXH64_impl XXH64 implementation + * @ingroup impl + * + * Details on the XXH64 implementation. + * @{ + */ +/* #define rather that static const, to be used as initializers */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ +#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif + +/*! @copydoc XXH32_round */ +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; + return acc; +} + +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; +} + +/*! @copydoc XXH32_avalanche */ +static xxh_u64 XXH64_avalanche(xxh_u64 hash) +{ + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ +static XXH_PUREF xxh_u64 +XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ + if (ptr==NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + hash ^= k1; + hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + } + if (len >= 4) { + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + } + while (len > 0) { + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; + --len; + } + return XXH64_avalanche(hash); +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + xxh_u64 h64; + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=32) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 31; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8*)input, len); + return XXH64_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + +#endif +} + +/******* Hash Streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH64_family*/ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + + do { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } else { + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; + } + + h64 += (xxh_u64) state->total_len; + + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#ifndef XXH_NO_XXH3 + +/* ********************************************************************* +* XXH3 +* New generation hash designed for speed on small keys and vectorization +************************************************************************ */ +/*! + * @} + * @defgroup XXH3_impl XXH3 implementation + * @ingroup impl + * @{ + */ + +/* === Compiler specifics === */ + +#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ +# define XXH_RESTRICT /* disable */ +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ + || (defined (__clang__)) \ + || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ + || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) +/* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ +# define XXH_RESTRICT __restrict +#else +# define XXH_RESTRICT /* disable */ +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ + || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ + || defined(__clang__) +# define XXH_likely(x) __builtin_expect(x, 1) +# define XXH_unlikely(x) __builtin_expect(x, 0) +#else +# define XXH_likely(x) (x) +# define XXH_unlikely(x) (x) +#endif + +#ifndef XXH_HAS_INCLUDE +# ifdef __has_include +# define XXH_HAS_INCLUDE(x) __has_include(x) +# else +# define XXH_HAS_INCLUDE(x) 0 +# endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# if defined(__ARM_FEATURE_SVE) +# include +# endif +# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ + || (defined(_M_ARM) && _M_ARM >= 7) \ + || defined(_M_ARM64) || defined(_M_ARM64EC) \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ +# define inline __inline__ /* circumvent a clang bug */ +# include +# undef inline +# elif defined(__AVX2__) +# include +# elif defined(__SSE2__) +# include +# endif +#endif + +#if defined(_MSC_VER) +# include +#endif + +/* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because the + * shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the lower + * 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and therefore, + * we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and 64-bit + * platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one + * notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand allowing free + * shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we will + * emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably need + * to specify -march, as you likely meant to compile for a newer architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ + +#ifdef XXH_DOXYGEN +/*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the best + * implementation. + */ +# define XXH_VECTOR XXH_SCALAR +/*! + * @ingroup tuning + * @brief Possible values for @ref XXH_VECTOR. + * + * Note that these are actually implemented as macros. + * + * If this is not defined, it is detected automatically. + * internal macro XXH_X86DISPATCH overrides this. + */ +enum XXH_VECTOR_TYPE /* fake enum */ { + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ +}; +/*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment required for said vector + * type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ +# define XXH_ACC_ALIGN 8 +#endif + +/* Actual definition */ +#ifndef XXH_DOXYGEN +# define XXH_SCALAR 0 +# define XXH_SSE2 1 +# define XXH_AVX2 2 +# define XXH_AVX512 3 +# define XXH_NEON 4 +# define XXH_VSX 5 +# define XXH_SVE 6 +#endif + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if defined(__ARM_FEATURE_SVE) +# define XXH_VECTOR XXH_SVE +# elif ( \ + defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ + ) && ( \ + defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ + ) +# define XXH_VECTOR XXH_NEON +# elif defined(__AVX512F__) +# define XXH_VECTOR XXH_AVX512 +# elif defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ + || (defined(__s390x__) && defined(__VEC__)) \ + && defined(__GNUC__) /* TODO: IBM XL */ +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ +#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) +# ifdef _MSC_VER +# pragma warning(once : 4606) +# else +# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." +# endif +# undef XXH_VECTOR +# define XXH_VECTOR XXH_SCALAR +#endif + +/* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ +#ifndef XXH_ACC_ALIGN +# if defined(XXH_X86DISPATCH) +# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ +# elif XXH_VECTOR == XXH_SCALAR /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == XXH_NEON /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_VSX /* vsx */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ +# define XXH_ACC_ALIGN 64 +# elif XXH_VECTOR == XXH_SVE /* sve */ +# define XXH_ACC_ALIGN 64 +# endif +#endif + +#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ + || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#elif XXH_VECTOR == XXH_SVE +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#else +# define XXH_SEC_ALIGN 8 +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define XXH_ALIASING __attribute__((may_alias)) +#else +# define XXH_ALIASING /* nothing */ +#endif + +/* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling resulting + * in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which + * only applies to Sandy and Ivy Bridge... which don't even support AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use either + * -O2 -mavx2 -march=haswell + * or + * -O2 -mavx2 -mno-avx256-split-unaligned-load + * for decent performance, or to use Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC into + * -O2, but the other one we can't control without "failed to inline always + * inline function due to target mismatch" warnings. + */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ +# pragma GCC push_options +# pragma GCC optimize("-O2") +#endif + +#if XXH_VECTOR == XXH_NEON + +/* + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 + * optimizes out the entire hashLong loop because of the aliasing violation. + * + * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, + * so the only option is to mark it as aliasing. + */ +typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; + +/*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. + * + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only + * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). + * + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it + * prohibits load-store optimizations. Therefore, a direct dereference is used. + * + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe + * unaligned load. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ +{ + return *(xxh_aliasing_uint64x2_t const *)ptr; +} +#else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) +{ + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); +} +#endif + +/*! + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. + * + * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with + * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` + * with `vmlal_u32`. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); + return acc; +} +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); +} +#else +/* Portable intrinsic versions */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); +} +/*! @copydoc XXH_vmlal_low_u32 + * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); +} +#endif + +/*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 + * + * This can be set to 2, 4, 6, or 8. + * + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. + * + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those + * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU + * bandwidth. + * + * This is even more noticeable on the more advanced cores like the Cortex-A76 which + * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. + * + * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes + * and 2 scalar lanes, which is chosen by default. + * + * This does not apply to Apple processors or 32-bit processors, which run better with + * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. + * + * This change benefits CPUs with large micro-op buffers without negatively affecting + * most other CPUs: + * + * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | + * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | + * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | + * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | + * + * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. + * + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning + * it effectively becomes worse 4. + * + * @see XXH3_accumulate_512_neon() + */ +# ifndef XXH3_NEON_LANES +# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ + && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 +# define XXH3_NEON_LANES 6 +# else +# define XXH3_NEON_LANES XXH_ACC_NB +# endif +# endif +#endif /* XXH_VECTOR == XXH_NEON */ + +/* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ +#if XXH_VECTOR == XXH_VSX +/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, + * and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ +# pragma push_macro("bool") +# pragma push_macro("vector") +# pragma push_macro("pixel") +/* silence potential macro redefined warnings */ +# undef bool +# undef vector +# undef pixel + +# if defined(__s390x__) +# include +# else +# include +# endif + +/* Restore the original macro values, if applicable. */ +# pragma pop_macro("pixel") +# pragma pop_macro("vector") +# pragma pop_macro("bool") + +typedef __vector unsigned long long xxh_u64x2; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; + +/* + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. + */ +typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; + +# ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +# endif /* !defined(XXH_VSX_BE) */ + +# if XXH_VSX_BE +# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) +# define XXH_vec_revb vec_revb +# else +/*! + * A polyfill for POWER9's vec_revb(). + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) +{ + xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +# endif +# endif /* XXH_VSX_BE */ + +/*! + * Performs an unaligned vector load and byte swaps it on big endian. + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) +{ + xxh_u64x2 ret; + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); +# if XXH_VSX_BE + ret = XXH_vec_revb(ret); +# endif + return ret; +} + +/* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a while, + * and they are endian dependent. Also, their meaning swap depending on version. + * */ +# if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ +# define XXH_vec_mulo vec_mulo +# define XXH_vec_mule vec_mule +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) +/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +# else +/* gcc needs inline assembly */ +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +# endif /* XXH_vec_mulo, XXH_vec_mule */ +#endif /* XXH_VECTOR == XXH_VSX */ + +#if XXH_VECTOR == XXH_SVE +#define ACCRND(acc, offset) \ +do { \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ +} while (0) +#endif /* XXH_VECTOR == XXH_SVE */ + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if XXH_SIZE_OPT >= 1 +# define XXH_PREFETCH(ptr) (void)(ptr) +# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + + +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +/*! Pseudorandom secret taken directly from FARSH. */ +XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + +static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ +static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ + +#ifdef XXH_OLD_NAMES +# define kSecret XXH3_kSecret +#endif + +#ifdef XXH_DOXYGEN +/*! + * @brief Calculates a 32-bit to 64-bit long multiply. + * + * Implemented as a macro. + * + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't + * need to (but it shouldn't need to anyways, it is about 7 instructions to do + * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we + * use that instead of the normal method. + * + * If you are compiling for platforms like Thumb-1 and don't have a better option, + * you may also want to write your own long multiply routine here. + * + * @param x, y Numbers to be multiplied + * @return 64-bit product of the low 32 bits of @p x and @p y. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64(xxh_u64 x, xxh_u64 y) +{ + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +} +#elif defined(_MSC_VER) && defined(_M_IX86) +# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) +#else +/* + * Downcast + upcast is usually better than masking on older compilers like + * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands + * and perform a full 64x64 multiply -- entirely redundant on 32-bit. + */ +# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) +#endif + +/*! + * @brief Calculates a 64->128-bit long multiply. + * + * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar + * version. + * + * @param lhs , rhs The 64-bit integers to be multiplied + * @return The 128-bit result represented in an @ref XXH128_hash_t. + */ +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this type + * despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; + + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. + */ +#elif defined(_M_ARM64) || defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(__umulh) +#endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; +#endif +} + +/*! + * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. + * + * The reason for the separate function is to prevent passing too many structs + * around by value. This will hopefully inline the multiply, but we don't force it. + * + * @param lhs , rhs The 64-bit integers to multiply + * @return The low 64 bits of the product XOR'd by the high 64 bits. + * @see XXH_mult64to128() + */ +static xxh_u64 +XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + +/*! Seems to produce slightly better code on GCC for some reason. */ +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +{ + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); +} + +/* + * This is a fast avalanche stage, + * suitable when input bits are already partially mixed + */ +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) +{ + h64 = XXH_xorshift64(h64, 37); + h64 *= PRIME_MX1; + h64 = XXH_xorshift64(h64, 32); + return h64; +} + +/* + * This is a stronger avalanche, + * inspired by Pelle Evensen's rrmxmx + * preferable when input has not been previously mixed + */ +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) +{ + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= PRIME_MX2; + h64 ^= (h64 >> 35) + len ; + h64 *= PRIME_MX2; + return XXH_xorshift64(h64, 28); +} + + +/* ========================================== + * Short keys + * ========================================== + * One of the shortcomings of XXH32 and XXH64 was that their performance was + * sub-optimal on short lengths. It used an iterative algorithm which strongly + * favored lengths that were a multiple of 4 or 8. + * + * Instead of iterating over individual inputs, we use a set of single shot + * functions which piece together a range of lengths and operate in constant time. + * + * Additionally, the number of multiplies has been significantly reduced. This + * reduces latency, especially when emulating 64-bit multiplies on 32-bit. + * + * Depending on the platform, this may or may not be faster than XXH32, but it + * is almost guaranteed to be faster than XXH64. + */ + +/* + * At very short lengths, there isn't enough input to fully hide secrets, or use + * the entire secret. + * + * There is also only a limited amount of mixing we can do before significantly + * impacting performance. + * + * Therefore, we use different sections of the secret and always mix two secret + * samples with an XOR. This should have no effect on performance on the + * seedless or withSeed variants because everything _should_ be constant folded + * by modern compilers. + * + * The XOR mixing hides individual parts of the secret and increases entropy. + * + * This adds an extra layer of strength for custom secrets. + */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; + xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); + } +} + +/* + * DISCLAIMER: There are known *seed-dependent* multicollisions here due to + * multiplication by zero, affecting hashes of lengths 17 to 240. + * + * However, they are very unlikely. + * + * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all + * unseeded non-cryptographic hashes, it does not attempt to defend itself + * against specially crafted inputs, only random inputs. + * + * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes + * cancelling out the secret is taken an arbitrary number of times (addressed + * in XXH3_accumulate_512), this collision is very unlikely with random inputs + * and/or proper seeding: + * + * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a + * function that is only called up to 16 times per hash with up to 240 bytes of + * input. + * + * This is not too bad for a non-cryptographic hash function, especially with + * only 64 bit outputs. + * + * The 128-bit variant (which trades some speed for strength) is NOT affected + * by this, although it is always a good idea to use a proper seed if you care + * about strength. + */ +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ +#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + XXH_COMPILER_GUARD(seed64); +#endif + { xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) + ); + } +} + +/* For mid range keys, XXH3 uses a Mum-hash variant. */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * XXH_PRIME64_1; +#if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); + acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); + } while (i-- != 0); +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3_mix16B(input+48, secret+96, seed); + acc += XXH3_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3_mix16B(input+32, secret+64, seed); + acc += XXH3_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3_mix16B(input+16, secret+32, seed); + acc += XXH3_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3_mix16B(input+0, secret+0, seed); + acc += XXH3_mix16B(input+len-16, secret+16, seed); +#endif + return XXH3_avalanche(acc); + } +} + +#define XXH3_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * XXH_PRIME64_1; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + for (i=0; i<8; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); + } + /* last bytes */ + acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); +#if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. + * In everywhere else, it uses scalar code. + * + * For 64->128-bit multiplies, even if the NEON was 100% optimal, it + * would still be slower than UMAAL (see XXH_mult64to128). + * + * Unfortunately, Clang doesn't handle the long multiplies properly and + * converts them to the nonexistent "vmulq_u64" intrinsic, which is then + * scalarized into an ugly mess of VMOV.32 instructions. + * + * This mess is difficult to avoid without turning autovectorization + * off completely, but they are usually relatively minor and/or not + * worth it to fix. + * + * This loop is the easiest to fix, as unlike XXH32, this pragma + * _actually works_ because it is a loop vectorization instead of an + * SLP vectorization. + */ + #pragma clang loop vectorize(disable) +#endif + for (i=8 ; i < nbRounds; i++) { + /* + * Prevents clang for unrolling the acc loop and interleaving with this one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } + return XXH3_avalanche(acc + acc_end); + } +} + + +/* ======= Long Keys ======= */ + +#define XXH_STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + +#ifdef XXH_OLD_NAMES +# define STRIPE_LEN XXH_STRIPE_LEN +# define ACC_NB XXH_ACC_NB +#endif + +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_() and it calls + * XXH3_accumulate_512_(). + * + * It may be useful to hand implement this function if the compiler fails to + * optimize the inline function. + */ +#define XXH3_ACCUMULATE_TEMPLATE(name) \ +void \ +XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ + const xxh_u8* XXH_RESTRICT input, \ + const xxh_u8* XXH_RESTRICT secret, \ + size_t nbStripes) \ +{ \ + size_t n; \ + for (n = 0; n < nbStripes; n++ ) { \ + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name( \ + acc, \ + in, \ + secret + n*XXH_SECRET_CONSUME_RATE); \ + } \ +} + + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + XXH_memcpy(dst, &v64, sizeof(v64)); +} + +/* Several intrinsic functions below are supposed to accept __int64 as argument, + * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) + typedef int64_t xxh_i64; +#else + /* the following type must have a width of 64-bit */ + typedef long long xxh_i64; +#endif + + +/* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as the product. + * + * This means that in the (relatively likely) case of a multiply by zero, the + * original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together in + * the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +#if (XXH_VECTOR == XXH_AVX512) \ + || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) + +#ifndef XXH_TARGET_AVX512 +# define XXH_TARGET_AVX512 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + __m512i* const xacc = (__m512i *) acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + + { + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512 (input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + } +} +XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) + +/* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into the + * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid + * extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { __m512i* const xacc = (__m512i*) acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); + __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); + + const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); + __m512i* const dest = ( __m512i*) customSecret; + int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); + for (i=0; i < nbRounds; ++i) { + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_AVX2) \ + || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) + +#ifndef XXH_TARGET_AVX2 +# define XXH_TARGET_AVX2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xinput = (const __m256i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); + + const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); + __m256i* dest = ( __m256i*) customSecret; + +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dest); +# endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); + } +} + +#endif + +/* x86dispatch always generates SSE2 */ +#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_SSE2 +# define XXH_TARGET_SSE2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xinput = (const __m128i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + +# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; + __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); +# else + __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); +# endif + int i; + + const void* const src16 = XXH3_kSecret; + __m128i* dst16 = (__m128i*) customSecret; +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dst16); +# endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); + + for (i=0; i < nbRounds; ++i) { + dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_NEON) + +/* forward declarations for the scalar routines */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, size_t lane); + +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, size_t lane); + +/*! + * @internal + * @brief The bulk processing loop for NEON and WASM SIMD128. + * + * The NEON code path is actually partially scalar when running on AArch64. This + * is to optimize the pipelining and can have up to 15% speedup depending on the + * CPU, and it also mitigates some GCC codegen issues. + * + * @see XXH3_NEON_LANES for configuring this and details about this optimization. + * + * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit + * integers instead of the other platforms which mask full 64-bit vectors, + * so the setup is more complicated than just shifting right. + * + * Additionally, there is an optimization for 4 lanes at once noted below. + * + * Since, as stated, the most optimal amount of lanes for Cortexes is 6, + * there needs to be *three* versions of the accumulate operation used + * for the remaining 2 lanes. + * + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap + * nearly perfectly. + */ + +XXH_FORCE_INLINE void +XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* xinput = (const uint8_t *) input; + uint8_t const* xsecret = (const uint8_t *) secret; + + size_t i; +#ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); +#endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } + i = 0; + /* 4 NEON lanes at a time. */ + for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32( + vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2) + ); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for most + * widening intrinsics, have a variant that works on both high half vectors + * for free on AArch64. A similar instruction is available on SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); + } + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64 (xacc[i], sum); + } + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; + uint8_t const* xsecret = (uint8_t const*) secret; + + size_t i; + /* WASM uses operator overloads and doesn't need these. */ +#ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); +#endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } + for (i=0; i < XXH3_NEON_LANES / 2; i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); + uint64x2_t data_vec = veorq_u64(acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* xacc[i] *= XXH_PRIME32_1 */ +#ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; +#else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector + * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits + * and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); +#endif + } + } +} +#endif + +#if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* presumed aligned */ + xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ + xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ + xxh_u64x2 const v32 = { 32, 32 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = xacc[i]; + acc_vec += product; + + /* swap high and low halves */ +#ifdef __s390x__ + acc_vec += vec_permi(data_vec, data_vec, 2); +#else + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); +#endif + xacc[i] = acc_vec; + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + const xxh_u8* const xsecret = (const xxh_u8*) secret; + /* constants */ + xxh_u64x2 const v32 = { 32, 32 }; + xxh_u64x2 const v47 = { 47, 47 }; + xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } +} + +XXH_FORCE_INLINE void +XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes) +{ + if (nbStripes != 0) { + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(vacc, 0); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } + } +} + +#endif + +/* scalar variants - universal */ + +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +/* + * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they + * emit an excess mask and a full 64-bit multiply-add (MADD X-form). + * + * While this might not seem like much, as AArch64 is a 64-bit architecture, only + * big Cortex designs have a full 64-bit multiplier. + * + * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit + * multiplies expand to 2-3 multiplies in microcode. This has a major penalty + * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. + * + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does + * not have this penalty and does the mask automatically. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); + return ret; +} +#else +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; +} +#endif + +/*! + * @internal + * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* xacc = (xxh_u64*) acc; + xxh_u8 const* xinput = (xxh_u8 const*) input; + xxh_u8 const* xsecret = (xxh_u8 const*) secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + { + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); + } +} + +/*! + * @internal + * @brief Processes a 64 byte block of data using the scalar path. + */ +XXH_FORCE_INLINE void +XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + size_t i; + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__arm__) || defined(__thumb2__)) \ + && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ + && XXH_SIZE_OPT <= 0 +# pragma GCC unroll 8 +#endif + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) + +/*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + XXH_ASSERT(lane < XXH_ACC_NB); + { + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[lane] = acc64; + } +} + +/*! + * @internal + * @brief Scrambles the accumulators after a large chunk has been read + */ +XXH_FORCE_INLINE void +XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + size_t i; + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } +} + +XXH_FORCE_INLINE void +XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8* kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + +#if defined(__GNUC__) && defined(__aarch64__) + /* + * UGLY HACK: + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes the compiler to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + XXH_COMPILER_GUARD(kSecretPtr); +#endif + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i=0; i < nbRounds; i++) { + /* + * The asm hack causes the compiler to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; + XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); + XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); + } } +} + + +typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); +typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); +typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + + +#if (XXH_VECTOR == XXH_AVX512) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_accumulate XXH3_accumulate_avx512 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + +#elif (XXH_VECTOR == XXH_AVX2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_accumulate XXH3_accumulate_avx2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + +#elif (XXH_VECTOR == XXH_SSE2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_accumulate XXH3_accumulate_sse2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + +#elif (XXH_VECTOR == XXH_NEON) + +#define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_accumulate XXH3_accumulate_neon +#define XXH3_scrambleAcc XXH3_scrambleAcc_neon +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_VSX) + +#define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_accumulate XXH3_accumulate_vsx +#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_SVE) +#define XXH3_accumulate_512 XXH3_accumulate_512_sve +#define XXH3_accumulate XXH3_accumulate_sve +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#else /* scalar */ + +#define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_accumulate XXH3_accumulate_scalar +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#endif + +#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ +# undef XXH3_initCustomSecret +# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar +#endif + +XXH_FORCE_INLINE void +XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); + + /* last stripe */ + { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ + XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); +#if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + XXH_COMPILER_GUARD(result64); +#endif + } + + return XXH3_avalanche(result64); +} + +#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, + const void* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator */ +#define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); +} + +/* + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction cache. + * Note that inside this no_inline function, we do inline the internal loop, + * and provide a statically defined secret size to allow optimization of vector loop. + */ +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * XXH3_hashLong_64b_withSeed(): + * Generate a custom key based on alteration of default XXH3_kSecret with the seed, + * and then use this key for long mode hashing. + * + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + * + * It's important for performance that XXH3_hashLong is not inlined. Not sure + * why (uop cache maybe?), but the difference is large and easily measurable. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, + XXH64_hash_t seed, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ +#if XXH_SIZE_OPT <= 0 + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); +#endif + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + + +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong64_f f_hashLong) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); +} + + +/* === Public entry point === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) +{ + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) +{ + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); +} + + +/* === XXH3 streaming === */ +#ifndef XXH_NO_STREAM +/* + * Malloc's a pointer that is always aligned to align. + * + * This must be freed with `XXH_alignedFree()`. + * + * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte + * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 + * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. + * + * This underalignment previously caused a rather obvious crash which went + * completely unnoticed due to XXH3_createState() not actually being tested. + * Credit to RedSpah for noticing this bug. + * + * The alignment is done manually: Functions like posix_memalign or _mm_malloc + * are avoided: To maintain portability, we would have to write a fallback + * like this anyways, and besides, testing for the existence of library + * functions without relying on external build tools is impossible. + * + * The method is simple: Overallocate, manually align, and store the offset + * to the original behind the returned pointer. + * + * Align must be a power of 2 and 8 <= align <= 128. + */ +static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) +{ + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); + if (base != NULL) { + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8* ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + return NULL; + } +} +/* + * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass + * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. + */ +static void XXH_alignedFree(void* p) +{ + if (p != NULL) { + xxh_u8* ptr = (xxh_u8*)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8* base = ptr - offset; + XXH_free(base); + } +} +/*! @ingroup XXH3_family */ +/*! + * @brief Allocate an @ref XXH3_state_t. + * + * Must be freed with XXH3_freeState(). + * @return An allocated XXH3_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) +{ + XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state==NULL) return NULL; + XXH3_INITSTATE(state); + return state; +} + +/*! @ingroup XXH3_family */ +/*! + * @brief Frees an @ref XXH3_state_t. + * + * Must be allocated with XXH3_createState(). + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) +{ + XXH_alignedFree(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) +{ + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char*)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->useSeed = (seed != 0); + statePtr->extSecret = (const unsigned char*)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_64bits_reset(statePtr); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) +{ + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; +} + +/*! + * @internal + * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). + * + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. + * + * @param acc Pointer to the 8 accumulator lanes + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* + * @param nbStripesPerBlock Number of stripes in a block + * @param input Input pointer + * @param nbStripes Number of stripes to process + * @param secret Secret pointer + * @param secretLimit Offset of the last block in @p secret + * @param f_acc Pointer to an XXH3_accumulate implementation + * @param f_scramble Pointer to an XXH3_scrambleAcc implementation + * @return Pointer past the end of @p input after processing + */ +XXH_FORCE_INLINE const xxh_u8 * +XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, + size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, + const xxh_u8* XXH_RESTRICT input, size_t nbStripes, + const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + } while (nbStripes >= nbStripesPerBlock); + *nbStripesSoFarPtr = 0; + } + /* Process a partial block */ + if (nbStripes > 0) { + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; + *nbStripesSoFarPtr += nbStripes; + } + /* Return end pointer */ + return input; +} + +#ifndef XXH3_STREAM_USE_STACK +# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ +# define XXH3_STREAM_USE_STACK 1 +# endif +#endif +/* + * Both XXH3_64bits_update and XXH3_128bits_update use this routine. + */ +XXH_FORCE_INLINE XXH_errorcode +XXH3_update(XXH3_state_t* XXH_RESTRICT const state, + const xxh_u8* XXH_RESTRICT input, size_t len, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + XXH_ASSERT(state != NULL); + { const xxh_u8* const bEnd = input + len; + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); +#else + xxh_u64* XXH_RESTRICT const acc = state->acc; +#endif + state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); + + /* small input : just fill in tmp buffer */ + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc, f_scramble); + state->bufferedSize = 0; + } + XXH_ASSERT(input < bEnd); + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + input = XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, nbStripes, + secret, state->secretLimit, + f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + + } + /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + XXH_memcpy(state->acc, acc, sizeof(acc)); +#endif + } + + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate, XXH3_scrambleAcc); +} + + +XXH_FORCE_INLINE void +XXH3_digest_long (XXH64_hash_t* acc, + const XXH3_state_t* state, + const unsigned char* secret) +{ + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8* lastStripePtr; + + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + XXH_memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + /* Consume remaining stripes then point to remaining data in buffer */ + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, + secret, state->secretLimit, + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; + } else { /* bufferedSize < XXH_STRIPE_LEN */ + /* Copy to temp buffer */ + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + lastStripePtr = lastStripe; + } + /* Last stripe */ + XXH3_accumulate_512(acc, + lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + } + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->useSeed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ + + +/* ========================================== + * XXH3 128 bits (a.k.a XXH128) + * ========================================== + * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, + * even without counting the significantly larger output size. + * + * For example, extra steps are taken to avoid the seed-dependent collisions + * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). + * + * This strength naturally comes at the cost of some speed, especially on short + * lengths. Note that longer hashes are about as fast as the 64-bit version + * due to it using only a slight modification of the 64-bit loop. + * + * XXH128 is also more oriented towards 64-bit machines. It is still extremely + * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). + */ + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= PRIME_MX2; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; + xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + } else { + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; + + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + } } +} + +/* + * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN + */ +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t h128; + xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); + xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche( seed ^ bitfliph); + return h128; + } } +} + +/* + * A bit slower than XXH3_mix16B, but handles multiply by zero better. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, + const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + +#if XXH_SIZE_OPT >= 1 + { + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); + } while (i-- != 0); + } +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); +#endif + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { XXH128_hash_t acc; + unsigned i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + i - 32, + seed); + } + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i=160; i <= len; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, + seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, + input + len - 16, + input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + (XXH64_hash_t)0 - seed); + + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + secretSize + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + } +} + +/* + * It's important for performance that XXH3_hashLong() is not inlined. + */ +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's important for performance to pass @p secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. + * + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, + XXH3_accumulate, XXH3_scrambleAcc); +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const void* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_128bits_internal(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} + + +/* === Public XXH128 API === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_128bits_internal(input, len, 0, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_128bits_internal(input, len, 0, + (const xxh_u8*)secret, secretSize, + XXH3_hashLong_128b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_internal(input, len, seed, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ +#ifndef XXH_NO_STREAM +/* + * All initialization and update functions are identical to 64-bit streaming variant. + * The only difference is the finalization routine. + */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + return XXH3_64bits_reset(statePtr); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSeed(statePtr, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_64bits_update(state, input, len); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } + } + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ +/* 128-bit utility functions */ + +#include /* memcmp, memcpy */ + +/* return : 1 is equal, 0 if different */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * @return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + + + +/* ========================================== + * Secret generators + * ========================================== + */ +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) +{ + XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); + XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) +{ +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); +#else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; +#endif + + if (customSeedSize == 0) { + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + } +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); +#else + if (customSeed == NULL) return XXH_ERROR; +#endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { size_t pos = 0; + while (pos < secretSize) { + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char*)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + } } + + { size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n=0; n