diff --git a/.gitattributes b/.gitattributes index dfb7f5822deaf9f7fde762e1765cac442045be7c..986a27685dd3936dc4eada38d32c6fff1ed169aa 100644 --- a/.gitattributes +++ b/.gitattributes @@ -182,3 +182,5 @@ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 filter env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1500 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1500 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h new file mode 100644 index 0000000000000000000000000000000000000000..4ffff81f355f1ddcdc19516746c61b8021477de4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h @@ -0,0 +1,323 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/adapters/orc/options.h" +#include "arrow/io/interfaces.h" +#include "arrow/memory_pool.h" +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace adapters { +namespace orc { + +/// \brief Information about an ORC stripe +struct StripeInformation { + /// \brief Offset of the stripe from the start of the file, in bytes + int64_t offset; + /// \brief Length of the stripe, in bytes + int64_t length; + /// \brief Number of rows in the stripe + int64_t num_rows; + /// \brief Index of the first row of the stripe + int64_t first_row_id; +}; + +/// \class ORCFileReader +/// \brief Read an Arrow Table or RecordBatch from an ORC file. +class ARROW_EXPORT ORCFileReader { + public: + ~ORCFileReader(); + + /// \brief Creates a new ORC reader + /// + /// \param[in] file the data source + /// \param[in] pool a MemoryPool to use for buffer allocations + /// \return the returned reader object + static Result> Open( + const std::shared_ptr& file, MemoryPool* pool); + + /// \brief Return the schema read from the ORC file + /// + /// \return the returned Schema object + Result> ReadSchema(); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \return the returned Table + Result> Read(); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] schema the Table schema + /// \return the returned Table + Result> Read(const std::shared_ptr& schema); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] include_indices the selected field indices to read + /// \return the returned Table + Result> Read(const std::vector& include_indices); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] include_names the selected field names to read + /// \return the returned Table + Result> Read(const std::vector& include_names); + + /// \brief Read the file as a Table + /// + /// The table will be composed of one record batch per stripe. + /// + /// \param[in] schema the Table schema + /// \param[in] include_indices the selected field indices to read + /// \return the returned Table + Result> Read(const std::shared_ptr& schema, + const std::vector& include_indices); + + /// \brief Read a single stripe as a RecordBatch + /// + /// \param[in] stripe the stripe index + /// \return the returned RecordBatch + Result> ReadStripe(int64_t stripe); + + /// \brief Read a single stripe as a RecordBatch + /// + /// \param[in] stripe the stripe index + /// \param[in] include_indices the selected field indices to read + /// \return the returned RecordBatch + Result> ReadStripe( + int64_t stripe, const std::vector& include_indices); + + /// \brief Read a single stripe as a RecordBatch + /// + /// \param[in] stripe the stripe index + /// \param[in] include_names the selected field names to read + /// \return the returned RecordBatch + Result> ReadStripe( + int64_t stripe, const std::vector& include_names); + + /// \brief Seek to designated row. Invoke NextStripeReader() after seek + /// will return stripe reader starting from designated row. + /// + /// \param[in] row_number the rows number to seek + Status Seek(int64_t row_number); + + /// \brief Get a stripe level record batch iterator. + /// + /// Each record batch will have up to `batch_size` rows. + /// NextStripeReader serves as a fine-grained alternative to ReadStripe + /// which may cause OOM issues by loading the whole stripe into memory. + /// + /// Note this will only read rows for the current stripe, not the entire + /// file. + /// + /// \param[in] batch_size the maximum number of rows in each record batch + /// \return the returned stripe reader + Result> NextStripeReader(int64_t batch_size); + + /// \brief Get a stripe level record batch iterator. + /// + /// Each record batch will have up to `batch_size` rows. + /// NextStripeReader serves as a fine-grained alternative to ReadStripe + /// which may cause OOM issues by loading the whole stripe into memory. + /// + /// Note this will only read rows for the current stripe, not the entire + /// file. + /// + /// \param[in] batch_size the maximum number of rows in each record batch + /// \param[in] include_indices the selected field indices to read + /// \return the stripe reader + Result> NextStripeReader( + int64_t batch_size, const std::vector& include_indices); + + /// \brief Get a record batch iterator for the entire file. + /// + /// Each record batch will have up to `batch_size` rows. + /// + /// \param[in] batch_size the maximum number of rows in each record batch + /// \param[in] include_names the selected field names to read, if not empty + /// (otherwise all fields are read) + /// \return the record batch iterator + Result> GetRecordBatchReader( + int64_t batch_size, const std::vector& include_names); + + /// \brief The number of stripes in the file + int64_t NumberOfStripes(); + + /// \brief The number of rows in the file + int64_t NumberOfRows(); + + /// \brief StripeInformation for each stripe. + StripeInformation GetStripeInformation(int64_t stripe); + + /// \brief Get the format version of the file. + /// Currently known values are 0.11 and 0.12. + /// + /// \return The FileVersion of the ORC file. + FileVersion GetFileVersion(); + + /// \brief Get the software instance and version that wrote this file. + /// + /// \return a user-facing string that specifies the software version + std::string GetSoftwareVersion(); + + /// \brief Get the compression kind of the file. + /// + /// \return The kind of compression in the ORC file. + Result GetCompression(); + + /// \brief Get the buffer size for the compression. + /// + /// \return Number of bytes to buffer for the compression codec. + int64_t GetCompressionSize(); + + /// \brief Get the number of rows per an entry in the row index. + /// \return the number of rows per an entry in the row index or 0 if there + /// is no row index. + int64_t GetRowIndexStride(); + + /// \brief Get ID of writer that generated the file. + /// + /// \return UNKNOWN_WRITER if the writer ID is undefined + WriterId GetWriterId(); + + /// \brief Get the writer id value when getWriterId() returns an unknown writer. + /// + /// \return the integer value of the writer ID. + int32_t GetWriterIdValue(); + + /// \brief Get the version of the writer. + /// + /// \return the version of the writer. + + WriterVersion GetWriterVersion(); + + /// \brief Get the number of stripe statistics in the file. + /// + /// \return the number of stripe statistics + int64_t GetNumberOfStripeStatistics(); + + /// \brief Get the length of the data stripes in the file. + /// + /// \return return the number of bytes in stripes + int64_t GetContentLength(); + + /// \brief Get the length of the file stripe statistics. + /// + /// \return the number of compressed bytes in the file stripe statistics + int64_t GetStripeStatisticsLength(); + + /// \brief Get the length of the file footer. + /// + /// \return the number of compressed bytes in the file footer + int64_t GetFileFooterLength(); + + /// \brief Get the length of the file postscript. + /// + /// \return the number of bytes in the file postscript + int64_t GetFilePostscriptLength(); + + /// \brief Get the total length of the file. + /// + /// \return the number of bytes in the file + int64_t GetFileLength(); + + /// \brief Get the serialized file tail. + /// Useful if another reader of the same file wants to avoid re-reading + /// the file tail. See ReadOptions.SetSerializedFileTail(). + /// + /// \return a string of bytes with the file tail + std::string GetSerializedFileTail(); + + /// \brief Return the metadata read from the ORC file + /// + /// \return A KeyValueMetadata object containing the ORC metadata + Result> ReadMetadata(); + + private: + class Impl; + std::unique_ptr impl_; + ORCFileReader(); +}; + +/// \class ORCFileWriter +/// \brief Write an Arrow Table or RecordBatch to an ORC file. +class ARROW_EXPORT ORCFileWriter { + public: + ~ORCFileWriter(); + /// \brief Creates a new ORC writer. + /// + /// \param[in] output_stream a pointer to the io::OutputStream to write into + /// \param[in] write_options the ORC writer options for Arrow + /// \return the returned writer object + static Result> Open( + io::OutputStream* output_stream, + const WriteOptions& write_options = WriteOptions()); + + /// \brief Write a table. This can be called multiple times. + /// + /// Tables passed in subsequent calls must match the schema of the table that was + /// written first. + /// + /// \param[in] table the Arrow table from which data is extracted. + /// \return Status + Status Write(const Table& table); + + /// \brief Write a RecordBatch. This can be called multiple times. + /// + /// RecordBatches passed in subsequent calls must match the schema of the + /// RecordBatch that was written first. + /// + /// \param[in] record_batch the Arrow RecordBatch from which data is extracted. + /// \return Status + Status Write(const RecordBatch& record_batch); + + /// \brief Close an ORC writer (orc::Writer) + /// + /// \return Status + Status Close(); + + private: + class Impl; + std::unique_ptr impl_; + + private: + ORCFileWriter(); +}; + +} // namespace orc +} // namespace adapters +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h new file mode 100644 index 0000000000000000000000000000000000000000..3a300da678db98c24949203be7ab471a57502640 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/interfaces.h" +#include "arrow/status.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace adapters { + +namespace orc { + +enum class WriterId : int32_t { + kOrcJava = 0, + kOrcCpp = 1, + kPresto = 2, + kScritchleyGo = 3, + kTrino = 4, + kUnknown = INT32_MAX +}; + +enum class WriterVersion : int32_t { + kOriginal = 0, + kHive8732 = 1, + kHive4243 = 2, + kHive12055 = 3, + kHive13083 = 4, + kOrc101 = 5, + kOrc135 = 6, + kOrc517 = 7, + kOrc203 = 8, + kOrc14 = 9, + kMax = INT32_MAX +}; + +enum class CompressionStrategy : int32_t { kSpeed = 0, kCompression }; + +class ARROW_EXPORT FileVersion { + private: + int32_t major_version_; + int32_t minor_version_; + + public: + static const FileVersion& v_0_11(); + static const FileVersion& v_0_12(); + + FileVersion(int32_t major, int32_t minor) + : major_version_(major), minor_version_(minor) {} + + /** + * Get major version + */ + int32_t major_version() const { return this->major_version_; } + + /** + * Get minor version + */ + int32_t minor_version() const { return this->minor_version_; } + + bool operator==(const FileVersion& right) const { + return this->major_version() == right.major_version() && + this->minor_version() == right.minor_version(); + } + + bool operator!=(const FileVersion& right) const { return !(*this == right); } + + std::string ToString() const; +}; + +/// Options for the ORC Writer +struct ARROW_EXPORT WriteOptions { + /// Number of rows the ORC writer writes at a time, default 1024 + int64_t batch_size = 1024; + /// Which ORC file version to use, default FileVersion(0, 12) + FileVersion file_version = FileVersion(0, 12); + /// Size of each ORC stripe in bytes, default 64 MiB + int64_t stripe_size = 64 * 1024 * 1024; + /// The compression codec of the ORC file, there is no compression by default + Compression::type compression = Compression::UNCOMPRESSED; + /// The size of each compression block in bytes, default 64 KiB + int64_t compression_block_size = 64 * 1024; + /// The compression strategy i.e. speed vs size reduction, default + /// CompressionStrategy::kSpeed + CompressionStrategy compression_strategy = CompressionStrategy::kSpeed; + /// The number of rows per an entry in the row index, default 10000 + int64_t row_index_stride = 10000; + /// The padding tolerance, default 0.0 + double padding_tolerance = 0.0; + /// The dictionary key size threshold. 0 to disable dictionary encoding. + /// 1 to always enable dictionary encoding, default 0.0 + double dictionary_key_size_threshold = 0.0; + /// The array of columns that use the bloom filter, default empty + std::vector bloom_filter_columns; + /// The upper limit of the false-positive rate of the bloom filter, default 0.05 + double bloom_filter_fpp = 0.05; +}; + +} // namespace orc +} // namespace adapters +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h new file mode 100644 index 0000000000000000000000000000000000000000..9d093eddf6b598150ddb55da0e84699a5b7ef4b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "tensorflow/core/framework/op.h" + +#include "arrow/type.h" + +// These utilities are supposed to be included in TensorFlow operators +// that need to be compiled separately from Arrow because of ABI issues. +// They therefore need to be header-only. + +namespace arrow { + +namespace adapters { + +namespace tensorflow { + +Status GetArrowType(::tensorflow::DataType dtype, std::shared_ptr* out) { + switch (dtype) { + case ::tensorflow::DT_BOOL: + *out = arrow::boolean(); + break; + case ::tensorflow::DT_FLOAT: + *out = arrow::float32(); + break; + case ::tensorflow::DT_DOUBLE: + *out = arrow::float64(); + break; + case ::tensorflow::DT_HALF: + *out = arrow::float16(); + break; + case ::tensorflow::DT_INT8: + *out = arrow::int8(); + break; + case ::tensorflow::DT_INT16: + *out = arrow::int16(); + break; + case ::tensorflow::DT_INT32: + *out = arrow::int32(); + break; + case ::tensorflow::DT_INT64: + *out = arrow::int64(); + break; + case ::tensorflow::DT_UINT8: + *out = arrow::uint8(); + break; + case ::tensorflow::DT_UINT16: + *out = arrow::uint16(); + break; + case ::tensorflow::DT_UINT32: + *out = arrow::uint32(); + break; + case ::tensorflow::DT_UINT64: + *out = arrow::uint64(); + break; + default: + return Status::TypeError("TensorFlow data type is not supported"); + } + return Status::OK(); +} + +Status GetTensorFlowType(std::shared_ptr dtype, ::tensorflow::DataType* out) { + switch (dtype->id()) { + case Type::BOOL: + *out = ::tensorflow::DT_BOOL; + break; + case Type::UINT8: + *out = ::tensorflow::DT_UINT8; + break; + case Type::INT8: + *out = ::tensorflow::DT_INT8; + break; + case Type::UINT16: + *out = ::tensorflow::DT_UINT16; + break; + case Type::INT16: + *out = ::tensorflow::DT_INT16; + break; + case Type::UINT32: + *out = ::tensorflow::DT_UINT32; + break; + case Type::INT32: + *out = ::tensorflow::DT_INT32; + break; + case Type::UINT64: + *out = ::tensorflow::DT_UINT64; + break; + case Type::INT64: + *out = ::tensorflow::DT_INT64; + break; + case Type::HALF_FLOAT: + *out = ::tensorflow::DT_HALF; + break; + case Type::FLOAT: + *out = ::tensorflow::DT_FLOAT; + break; + case Type::DOUBLE: + *out = ::tensorflow::DT_DOUBLE; + break; + default: + return Status::TypeError("Arrow data type is not supported"); + } + return arrow::Status::OK(); +} + +} // namespace tensorflow + +} // namespace adapters + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h new file mode 100644 index 0000000000000000000000000000000000000000..6abe866b5f6f65e3e1f1bb69728a0c53adf1943f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h @@ -0,0 +1,233 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/// \file abi.h Arrow C Data Interface +/// +/// The Arrow C Data interface defines a very small, stable set +/// of C definitions which can be easily copied into any project's +/// source code and vendored to be used for columnar data interchange +/// in the Arrow format. For non-C/C++ languages and runtimes, +/// it should be almost as easy to translate the C definitions into +/// the corresponding C FFI declarations. +/// +/// Applications and libraries can therefore work with Arrow memory +/// without necessarily using the Arrow libraries or reinventing +/// the wheel. Developers can choose between tight integration +/// with the Arrow software project or minimal integration with +/// the Arrow format only. + +#pragma once + +#include + +// Spec and documentation: https://arrow.apache.org/docs/format/CDataInterface.html + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ARROW_C_DATA_INTERFACE +#define ARROW_C_DATA_INTERFACE + +#define ARROW_FLAG_DICTIONARY_ORDERED 1 +#define ARROW_FLAG_NULLABLE 2 +#define ARROW_FLAG_MAP_KEYS_SORTED 4 + +struct ArrowSchema { + // Array type description + const char* format; + const char* name; + const char* metadata; + int64_t flags; + int64_t n_children; + struct ArrowSchema** children; + struct ArrowSchema* dictionary; + + // Release callback + void (*release)(struct ArrowSchema*); + // Opaque producer-specific data + void* private_data; +}; + +struct ArrowArray { + // Array data description + int64_t length; + int64_t null_count; + int64_t offset; + int64_t n_buffers; + int64_t n_children; + const void** buffers; + struct ArrowArray** children; + struct ArrowArray* dictionary; + + // Release callback + void (*release)(struct ArrowArray*); + // Opaque producer-specific data + void* private_data; +}; + +#endif // ARROW_C_DATA_INTERFACE + +#ifndef ARROW_C_DEVICE_DATA_INTERFACE +#define ARROW_C_DEVICE_DATA_INTERFACE + +// Spec and Documentation: https://arrow.apache.org/docs/format/CDeviceDataInterface.html + +// DeviceType for the allocated memory +typedef int32_t ArrowDeviceType; + +// CPU device, same as using ArrowArray directly +#define ARROW_DEVICE_CPU 1 +// CUDA GPU Device +#define ARROW_DEVICE_CUDA 2 +// Pinned CUDA CPU memory by cudaMallocHost +#define ARROW_DEVICE_CUDA_HOST 3 +// OpenCL Device +#define ARROW_DEVICE_OPENCL 4 +// Vulkan buffer for next-gen graphics +#define ARROW_DEVICE_VULKAN 7 +// Metal for Apple GPU +#define ARROW_DEVICE_METAL 8 +// Verilog simulator buffer +#define ARROW_DEVICE_VPI 9 +// ROCm GPUs for AMD GPUs +#define ARROW_DEVICE_ROCM 10 +// Pinned ROCm CPU memory allocated by hipMallocHost +#define ARROW_DEVICE_ROCM_HOST 11 +// Reserved for extension +#define ARROW_DEVICE_EXT_DEV 12 +// CUDA managed/unified memory allocated by cudaMallocManaged +#define ARROW_DEVICE_CUDA_MANAGED 13 +// unified shared memory allocated on a oneAPI non-partitioned device. +#define ARROW_DEVICE_ONEAPI 14 +// GPU support for next-gen WebGPU standard +#define ARROW_DEVICE_WEBGPU 15 +// Qualcomm Hexagon DSP +#define ARROW_DEVICE_HEXAGON 16 + +struct ArrowDeviceArray { + // the Allocated Array + // + // the buffers in the array (along with the buffers of any + // children) are what is allocated on the device. + struct ArrowArray array; + // The device id to identify a specific device + int64_t device_id; + // The type of device which can access this memory. + ArrowDeviceType device_type; + // An event-like object to synchronize on if needed. + void* sync_event; + // Reserved bytes for future expansion. + int64_t reserved[3]; +}; + +#endif // ARROW_C_DEVICE_DATA_INTERFACE + +#ifndef ARROW_C_STREAM_INTERFACE +#define ARROW_C_STREAM_INTERFACE + +struct ArrowArrayStream { + // Callback to get the stream type + // (will be the same for all arrays in the stream). + // + // Return value: 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowSchema must be released independently from the stream. + int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out); + + // Callback to get the next array + // (if no error and the array is released, the stream has ended) + // + // Return value: 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowArray must be released independently from the stream. + int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out); + + // Callback to get optional detailed error information. + // This must only be called if the last stream operation failed + // with a non-0 return code. + // + // Return value: pointer to a null-terminated character array describing + // the last error, or NULL if no description is available. + // + // The returned pointer is only valid until the next operation on this stream + // (including release). + const char* (*get_last_error)(struct ArrowArrayStream*); + + // Release callback: release the stream's own resources. + // Note that arrays returned by `get_next` must be individually released. + void (*release)(struct ArrowArrayStream*); + + // Opaque producer-specific data + void* private_data; +}; + +#endif // ARROW_C_STREAM_INTERFACE + +#ifndef ARROW_C_DEVICE_STREAM_INTERFACE +#define ARROW_C_DEVICE_STREAM_INTERFACE + +// Equivalent to ArrowArrayStream, but for ArrowDeviceArrays. +// +// This stream is intended to provide a stream of data on a single +// device, if a producer wants data to be produced on multiple devices +// then multiple streams should be provided. One per device. +struct ArrowDeviceArrayStream { + // The device that this stream produces data on. + ArrowDeviceType device_type; + + // Callback to get the stream schema + // (will be the same for all arrays in the stream). + // + // Return value 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowSchema must be released independently from the stream. + // The schema should be accessible via CPU memory. + int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out); + + // Callback to get the next array + // (if no error and the array is released, the stream has ended) + // + // Return value: 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowDeviceArray must be released independently from the stream. + int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out); + + // Callback to get optional detailed error information. + // This must only be called if the last stream operation failed + // with a non-0 return code. + // + // Return value: pointer to a null-terminated character array describing + // the last error, or NULL if no description is available. + // + // The returned pointer is only valid until the next operation on this stream + // (including release). + const char* (*get_last_error)(struct ArrowDeviceArrayStream* self); + + // Release callback: release the stream's own resources. + // Note that arrays returned by `get_next` must be individually released. + void (*release)(struct ArrowDeviceArrayStream* self); + + // Opaque producer-specific data + void* private_data; +}; + +#endif // ARROW_C_DEVICE_STREAM_INTERFACE + +#ifdef __cplusplus +} +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h new file mode 100644 index 0000000000000000000000000000000000000000..d11ccfc1fd72253600501d7de3a150944608ca06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/array/array_base.h" +#include "arrow/c/dlpack_abi.h" + +namespace arrow::dlpack { + +/// \brief Export Arrow array as DLPack tensor. +/// +/// DLMangedTensor is produced as defined by the DLPack protocol, +/// see https://dmlc.github.io/dlpack/latest/. +/// +/// Data types for which the protocol is supported are +/// integer and floating-point data types. +/// +/// DLPack protocol only supports arrays with one contiguous +/// memory region which means Arrow Arrays with validity buffers +/// are not supported. +/// +/// \param[in] arr Arrow array +/// \return DLManagedTensor struct +ARROW_EXPORT +Result ExportArray(const std::shared_ptr& arr); + +/// \brief Get DLDevice with enumerator specifying the +/// type of the device data is stored on and index of the +/// device which is 0 by default for CPU. +/// +/// \param[in] arr Arrow array +/// \return DLDevice struct +ARROW_EXPORT +Result ExportDevice(const std::shared_ptr& arr); + +} // namespace arrow::dlpack diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..4af557a7ed5d7cf3ace070c32888971d65b797a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h @@ -0,0 +1,321 @@ +// Taken from: +// https://github.com/dmlc/dlpack/blob/ca4d00ad3e2e0f410eeab3264d21b8a39397f362/include/dlpack/dlpack.h +/*! + * Copyright (c) 2017 by Contributors + * \file dlpack.h + * \brief The common header of DLPack. + */ +#ifndef DLPACK_DLPACK_H_ +#define DLPACK_DLPACK_H_ + +/** + * \brief Compatibility with C++ + */ +#ifdef __cplusplus +#define DLPACK_EXTERN_C extern "C" +#else +#define DLPACK_EXTERN_C +#endif + +/*! \brief The current major version of dlpack */ +#define DLPACK_MAJOR_VERSION 1 + +/*! \brief The current minor version of dlpack */ +#define DLPACK_MINOR_VERSION 0 + +/*! \brief DLPACK_DLL prefix for windows */ +#ifdef _WIN32 +#ifdef DLPACK_EXPORTS +#define DLPACK_DLL __declspec(dllexport) +#else +#define DLPACK_DLL __declspec(dllimport) +#endif +#else +#define DLPACK_DLL +#endif + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \brief The DLPack version. + * + * A change in major version indicates that we have changed the + * data layout of the ABI - DLManagedTensorVersioned. + * + * A change in minor version indicates that we have added new + * code, such as a new device type, but the ABI is kept the same. + * + * If an obtained DLPack tensor has a major version that disagrees + * with the version number specified in this header file + * (i.e. major != DLPACK_MAJOR_VERSION), the consumer must call the deleter + * (and it is safe to do so). It is not safe to access any other fields + * as the memory layout will have changed. + * + * In the case of a minor version mismatch, the tensor can be safely used as + * long as the consumer knows how to interpret all fields. Minor version + * updates indicate the addition of enumeration values. + */ +typedef struct { + /*! \brief DLPack major version. */ + uint32_t major; + /*! \brief DLPack minor version. */ + uint32_t minor; +} DLPackVersion; + +/*! + * \brief The device type in DLDevice. + */ +#ifdef __cplusplus +typedef enum : int32_t { +#else +typedef enum { +#endif + /*! \brief CPU device */ + kDLCPU = 1, + /*! \brief CUDA GPU device */ + kDLCUDA = 2, + /*! + * \brief Pinned CUDA CPU memory by cudaMallocHost + */ + kDLCUDAHost = 3, + /*! \brief OpenCL devices. */ + kDLOpenCL = 4, + /*! \brief Vulkan buffer for next generation graphics. */ + kDLVulkan = 7, + /*! \brief Metal for Apple GPU. */ + kDLMetal = 8, + /*! \brief Verilog simulator buffer */ + kDLVPI = 9, + /*! \brief ROCm GPUs for AMD GPUs */ + kDLROCM = 10, + /*! + * \brief Pinned ROCm CPU memory allocated by hipMallocHost + */ + kDLROCMHost = 11, + /*! + * \brief Reserved extension device type, + * used for quickly test extension device + * The semantics can differ depending on the implementation. + */ + kDLExtDev = 12, + /*! + * \brief CUDA managed/unified memory allocated by cudaMallocManaged + */ + kDLCUDAManaged = 13, + /*! + * \brief Unified shared memory allocated on a oneAPI non-partititioned + * device. Call to oneAPI runtime is required to determine the device + * type, the USM allocation type and the sycl context it is bound to. + * + */ + kDLOneAPI = 14, + /*! \brief GPU support for next generation WebGPU standard. */ + kDLWebGPU = 15, + /*! \brief Qualcomm Hexagon DSP */ + kDLHexagon = 16, +} DLDeviceType; + +/*! + * \brief A Device for Tensor and operator. + */ +typedef struct { + /*! \brief The device type used in the device. */ + DLDeviceType device_type; + /*! + * \brief The device index. + * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. + */ + int32_t device_id; +} DLDevice; + +/*! + * \brief The type code options DLDataType. + */ +typedef enum { + /*! \brief signed integer */ + kDLInt = 0U, + /*! \brief unsigned integer */ + kDLUInt = 1U, + /*! \brief IEEE floating point */ + kDLFloat = 2U, + /*! + * \brief Opaque handle type, reserved for testing purposes. + * Frameworks need to agree on the handle data type for the exchange to be well-defined. + */ + kDLOpaqueHandle = 3U, + /*! \brief bfloat16 */ + kDLBfloat = 4U, + /*! + * \brief complex number + * (C/C++/Python layout: compact struct per complex number) + */ + kDLComplex = 5U, + /*! \brief boolean */ + kDLBool = 6U, +} DLDataTypeCode; + +/*! + * \brief The data type the tensor can hold. The data type is assumed to follow the + * native endian-ness. An explicit error message should be raised when attempting to + * export an array with non-native endianness + * + * Examples + * - float: type_code = 2, bits = 32, lanes = 1 + * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4 + * - int8: type_code = 0, bits = 8, lanes = 1 + * - std::complex: type_code = 5, bits = 64, lanes = 1 + * - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention, + * the underlying storage size of bool is 8 bits) + */ +typedef struct { + /*! + * \brief Type code of base types. + * We keep it uint8_t instead of DLDataTypeCode for minimal memory + * footprint, but the value should be one of DLDataTypeCode enum values. + * */ + uint8_t code; + /*! + * \brief Number of bits, common choices are 8, 16, 32. + */ + uint8_t bits; + /*! \brief Number of lanes in the type, used for vector types. */ + uint16_t lanes; +} DLDataType; + +/*! + * \brief Plain C Tensor object, does not manage memory. + */ +typedef struct { + /*! + * \brief The data pointer points to the allocated data. This will be CUDA + * device pointer or cl_mem handle in OpenCL. It may be opaque on some device + * types. This pointer is always aligned to 256 bytes as in CUDA. The + * `byte_offset` field should be used to point to the beginning of the data. + * + * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, + * TVM, perhaps others) do not adhere to this 256 byte aligment requirement + * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed + * (after which this note will be updated); at the moment it is recommended + * to not rely on the data pointer being correctly aligned. + * + * For given DLTensor, the size of memory required to store the contents of + * data is calculated as follows: + * + * \code{.c} + * static inline size_t GetDataSize(const DLTensor* t) { + * size_t size = 1; + * for (tvm_index_t i = 0; i < t->ndim; ++i) { + * size *= t->shape[i]; + * } + * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8; + * return size; + * } + * \endcode + */ + void* data; + /*! \brief The device of the tensor */ + DLDevice device; + /*! \brief Number of dimensions */ + int32_t ndim; + /*! \brief The data type of the pointer*/ + DLDataType dtype; + /*! \brief The shape of the tensor */ + int64_t* shape; + /*! + * \brief strides of the tensor (in number of elements, not bytes) + * can be NULL, indicating tensor is compact and row-majored. + */ + int64_t* strides; + /*! \brief The offset in bytes to the beginning pointer to data */ + uint64_t byte_offset; +} DLTensor; + +/*! + * \brief C Tensor object, manage memory of DLTensor. This data structure is + * intended to facilitate the borrowing of DLTensor by another framework. It is + * not meant to transfer the tensor. When the borrowing framework doesn't need + * the tensor, it should call the deleter to notify the host that the resource + * is no longer needed. + * + * \note This data structure is used as Legacy DLManagedTensor + * in DLPack exchange and is deprecated after DLPack v0.8 + * Use DLManagedTensorVersioned instead. + * This data structure may get renamed or deleted in future versions. + * + * \sa DLManagedTensorVersioned + */ +typedef struct DLManagedTensor { + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; + /*! \brief the context of the original host framework of DLManagedTensor in + * which DLManagedTensor is used in the framework. It can also be NULL. + */ + void* manager_ctx; + /*! + * \brief Destructor - this should be called + * to destruct the manager_ctx which backs the DLManagedTensor. It can be + * NULL if there is no way for the caller to provide a reasonable destructor. + * The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensor* self); +} DLManagedTensor; + +// bit masks used in in the DLManagedTensorVersioned + +/*! \brief bit mask to indicate that the tensor is read only. */ +#define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) + +/*! + * \brief A versioned and managed C Tensor object, manage memory of DLTensor. + * + * This data structure is intended to facilitate the borrowing of DLTensor by + * another framework. It is not meant to transfer the tensor. When the borrowing + * framework doesn't need the tensor, it should call the deleter to notify the + * host that the resource is no longer needed. + * + * \note This is the current standard DLPack exchange data structure. + */ +struct DLManagedTensorVersioned { + /*! + * \brief The API and ABI version of the current managed Tensor + */ + DLPackVersion version; + /*! + * \brief the context of the original host framework. + * + * Stores DLManagedTensorVersioned is used in the + * framework. It can also be NULL. + */ + void* manager_ctx; + /*! + * \brief Destructor. + * + * This should be called to destruct manager_ctx which holds the + * DLManagedTensorVersioned. It can be NULL if there is no way for the caller to provide + * a reasonable destructor. The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensorVersioned* self); + /*! + * \brief Additional bitmask flags information about the tensor. + * + * By default the flags should be set to 0. + * + * \note Future ABI changes should keep everything until this field + * stable, to ensure that deleter can be correctly called. + * + * \sa DLPACK_FLAG_BITMASK_READ_ONLY + */ + uint64_t flags; + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; +}; + +#ifdef __cplusplus +} // DLPACK_EXTERN_C +#endif +#endif // DLPACK_DLPACK_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h new file mode 100644 index 0000000000000000000000000000000000000000..b701d9928691f42b70a201569feb27d5ea86f8cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +/// \defgroup compute-functions Abstract compute function API +/// @{ +/// @} + +/// \defgroup compute-concrete-options Concrete option classes for compute functions +/// @{ +/// @} + +#include "arrow/compute/api_aggregate.h" // IWYU pragma: export +#include "arrow/compute/api_scalar.h" // IWYU pragma: export +#include "arrow/compute/api_vector.h" // IWYU pragma: export +#include "arrow/compute/cast.h" // IWYU pragma: export +#include "arrow/compute/function.h" // IWYU pragma: export +#include "arrow/compute/function_options.h" // IWYU pragma: export +#include "arrow/compute/kernel.h" // IWYU pragma: export +#include "arrow/compute/registry.h" // IWYU pragma: export +#include "arrow/datum.h" // IWYU pragma: export + +#include "arrow/compute/expression.h" // IWYU pragma: export + +/// \defgroup execnode-row Utilities for working with data in a row-major format +/// @{ +/// @} + +#include "arrow/compute/row/grouper.h" // IWYU pragma: export + +/// \defgroup acero-internals Acero internals, useful for those extending Acero +/// @{ +/// @} + +#include "arrow/compute/exec.h" // IWYU pragma: export diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h new file mode 100644 index 0000000000000000000000000000000000000000..2e5210b073ee4218145646bc512e06a9a0d3df6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h @@ -0,0 +1,466 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Eager evaluation convenience APIs for invoking common functions, including +// necessary memory allocations + +#pragma once + +#include + +#include "arrow/compute/function_options.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; + +namespace compute { + +class ExecContext; + +// ---------------------------------------------------------------------- +// Aggregate functions + +/// \addtogroup compute-concrete-options +/// @{ + +/// \brief Control general scalar aggregate kernel behavior +/// +/// By default, null values are ignored (skip_nulls = true). +class ARROW_EXPORT ScalarAggregateOptions : public FunctionOptions { + public: + explicit ScalarAggregateOptions(bool skip_nulls = true, uint32_t min_count = 1); + static constexpr char const kTypeName[] = "ScalarAggregateOptions"; + static ScalarAggregateOptions Defaults() { return ScalarAggregateOptions{}; } + + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control count aggregate kernel behavior. +/// +/// By default, only non-null values are counted. +class ARROW_EXPORT CountOptions : public FunctionOptions { + public: + enum CountMode { + /// Count only non-null values. + ONLY_VALID = 0, + /// Count only null values. + ONLY_NULL, + /// Count both non-null and null values. + ALL, + }; + explicit CountOptions(CountMode mode = CountMode::ONLY_VALID); + static constexpr char const kTypeName[] = "CountOptions"; + static CountOptions Defaults() { return CountOptions{}; } + + CountMode mode; +}; + +/// \brief Control Mode kernel behavior +/// +/// Returns top-n common values and counts. +/// By default, returns the most common value and count. +class ARROW_EXPORT ModeOptions : public FunctionOptions { + public: + explicit ModeOptions(int64_t n = 1, bool skip_nulls = true, uint32_t min_count = 0); + static constexpr char const kTypeName[] = "ModeOptions"; + static ModeOptions Defaults() { return ModeOptions{}; } + + int64_t n = 1; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control Delta Degrees of Freedom (ddof) of Variance and Stddev kernel +/// +/// The divisor used in calculations is N - ddof, where N is the number of elements. +/// By default, ddof is zero, and population variance or stddev is returned. +class ARROW_EXPORT VarianceOptions : public FunctionOptions { + public: + explicit VarianceOptions(int ddof = 0, bool skip_nulls = true, uint32_t min_count = 0); + static constexpr char const kTypeName[] = "VarianceOptions"; + static VarianceOptions Defaults() { return VarianceOptions{}; } + + int ddof = 0; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control Quantile kernel behavior +/// +/// By default, returns the median value. +class ARROW_EXPORT QuantileOptions : public FunctionOptions { + public: + /// Interpolation method to use when quantile lies between two data points + enum Interpolation { + LINEAR = 0, + LOWER, + HIGHER, + NEAREST, + MIDPOINT, + }; + + explicit QuantileOptions(double q = 0.5, enum Interpolation interpolation = LINEAR, + bool skip_nulls = true, uint32_t min_count = 0); + + explicit QuantileOptions(std::vector q, + enum Interpolation interpolation = LINEAR, + bool skip_nulls = true, uint32_t min_count = 0); + + static constexpr char const kTypeName[] = "QuantileOptions"; + static QuantileOptions Defaults() { return QuantileOptions{}; } + + /// probability level of quantile must be between 0 and 1 inclusive + std::vector q; + enum Interpolation interpolation; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control TDigest approximate quantile kernel behavior +/// +/// By default, returns the median value. +class ARROW_EXPORT TDigestOptions : public FunctionOptions { + public: + explicit TDigestOptions(double q = 0.5, uint32_t delta = 100, + uint32_t buffer_size = 500, bool skip_nulls = true, + uint32_t min_count = 0); + explicit TDigestOptions(std::vector q, uint32_t delta = 100, + uint32_t buffer_size = 500, bool skip_nulls = true, + uint32_t min_count = 0); + static constexpr char const kTypeName[] = "TDigestOptions"; + static TDigestOptions Defaults() { return TDigestOptions{}; } + + /// probability level of quantile must be between 0 and 1 inclusive + std::vector q; + /// compression parameter, default 100 + uint32_t delta; + /// input buffer size, default 500 + uint32_t buffer_size; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control Index kernel behavior +class ARROW_EXPORT IndexOptions : public FunctionOptions { + public: + explicit IndexOptions(std::shared_ptr value); + // Default constructor for serialization + IndexOptions(); + static constexpr char const kTypeName[] = "IndexOptions"; + + std::shared_ptr value; +}; + +/// \brief Configure a grouped aggregation +struct ARROW_EXPORT Aggregate { + Aggregate() = default; + + Aggregate(std::string function, std::shared_ptr options, + std::vector target, std::string name = "") + : function(std::move(function)), + options(std::move(options)), + target(std::move(target)), + name(std::move(name)) {} + + Aggregate(std::string function, std::shared_ptr options, + FieldRef target, std::string name = "") + : Aggregate(std::move(function), std::move(options), + std::vector{std::move(target)}, std::move(name)) {} + + Aggregate(std::string function, FieldRef target, std::string name) + : Aggregate(std::move(function), /*options=*/NULLPTR, + std::vector{std::move(target)}, std::move(name)) {} + + Aggregate(std::string function, std::string name) + : Aggregate(std::move(function), /*options=*/NULLPTR, + /*target=*/std::vector{}, std::move(name)) {} + + /// the name of the aggregation function + std::string function; + + /// options for the aggregation function + std::shared_ptr options; + + /// zero or more fields to which aggregations will be applied + std::vector target; + + /// optional output field name for aggregations + std::string name; +}; + +/// @} + +/// \brief Count values in an array. +/// +/// \param[in] options counting options, see CountOptions for more information +/// \param[in] datum to count +/// \param[in] ctx the function execution context, optional +/// \return out resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Count(const Datum& datum, + const CountOptions& options = CountOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the mean of a numeric array. +/// +/// \param[in] value datum to compute the mean, expecting Array +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed mean as a DoubleScalar +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Mean( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the product of values of a numeric array. +/// +/// \param[in] value datum to compute product of, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed sum as a Scalar +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Product( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Sum values of a numeric array. +/// +/// \param[in] value datum to sum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed sum as a Scalar +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Sum( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the first value of an array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed first as Scalar +/// +/// \since 13.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result First( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the last value of an array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed last as a Scalar +/// +/// \since 13.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Last( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the min / max of a numeric array +/// +/// This function returns both the min and max as a struct scalar, with type +/// struct, where T is the input type +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as a struct scalar +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result MinMax( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Test whether any element in a boolean array evaluates to true. +/// +/// This function returns true if any of the elements in the array evaluates +/// to true and false otherwise. Null values are ignored by default. +/// If null values are taken into account by setting ScalarAggregateOptions +/// parameter skip_nulls = false then Kleene logic is used. +/// See KleeneOr for more details on Kleene logic. +/// +/// \param[in] value input datum, expecting a boolean array +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as a BooleanScalar +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Any( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Test whether all elements in a boolean array evaluate to true. +/// +/// This function returns true if all of the elements in the array evaluate +/// to true and false otherwise. Null values are ignored by default. +/// If null values are taken into account by setting ScalarAggregateOptions +/// parameter skip_nulls = false then Kleene logic is used. +/// See KleeneAnd for more details on Kleene logic. +/// +/// \param[in] value input datum, expecting a boolean array +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as a BooleanScalar + +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result All( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the modal (most common) value of a numeric array +/// +/// This function returns top-n most common values and number of times they occur as +/// an array of `struct`, where T is the input type. +/// Values with larger counts are returned before smaller ones. +/// If there are more than one values with same count, smaller value is returned first. +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ModeOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as an array of struct +/// +/// \since 2.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Mode(const Datum& value, + const ModeOptions& options = ModeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the standard deviation of a numeric array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see VarianceOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed standard deviation as a DoubleScalar +/// +/// \since 2.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Stddev(const Datum& value, + const VarianceOptions& options = VarianceOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the variance of a numeric array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see VarianceOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed variance as a DoubleScalar +/// +/// \since 2.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Variance(const Datum& value, + const VarianceOptions& options = VarianceOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the quantiles of a numeric array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see QuantileOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as an array +/// +/// \since 4.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Quantile(const Datum& value, + const QuantileOptions& options = QuantileOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the approximate quantiles of a numeric array with T-Digest algorithm +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see TDigestOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as an array +/// +/// \since 4.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result TDigest(const Datum& value, + const TDigestOptions& options = TDigestOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Find the first index of a value in an array. +/// +/// \param[in] value The array to search. +/// \param[in] options The array to search for. See IndexOptions. +/// \param[in] ctx the function execution context, optional +/// \return out a Scalar containing the index (or -1 if not found). +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Index(const Datum& value, const IndexOptions& options, + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..bad34f4a37881e82b3b0787f2d2c9c7c8d4a0461 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h @@ -0,0 +1,1717 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Eager evaluation convenience APIs for invoking common functions, including +// necessary memory allocations + +#pragma once + +#include +#include +#include + +#include "arrow/compute/function_options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-concrete-options +/// +/// @{ + +class ARROW_EXPORT ArithmeticOptions : public FunctionOptions { + public: + explicit ArithmeticOptions(bool check_overflow = false); + static constexpr char const kTypeName[] = "ArithmeticOptions"; + bool check_overflow; +}; + +class ARROW_EXPORT ElementWiseAggregateOptions : public FunctionOptions { + public: + explicit ElementWiseAggregateOptions(bool skip_nulls = true); + static constexpr char const kTypeName[] = "ElementWiseAggregateOptions"; + static ElementWiseAggregateOptions Defaults() { return ElementWiseAggregateOptions{}; } + bool skip_nulls; +}; + +/// Rounding and tie-breaking modes for round compute functions. +/// Additional details and examples are provided in compute.rst. +enum class RoundMode : int8_t { + /// Round to nearest integer less than or equal in magnitude (aka "floor") + DOWN, + /// Round to nearest integer greater than or equal in magnitude (aka "ceil") + UP, + /// Get the integral part without fractional digits (aka "trunc") + TOWARDS_ZERO, + /// Round negative values with DOWN rule + /// and positive values with UP rule (aka "away from zero") + TOWARDS_INFINITY, + /// Round ties with DOWN rule (also called "round half towards negative infinity") + HALF_DOWN, + /// Round ties with UP rule (also called "round half towards positive infinity") + HALF_UP, + /// Round ties with TOWARDS_ZERO rule (also called "round half away from infinity") + HALF_TOWARDS_ZERO, + /// Round ties with TOWARDS_INFINITY rule (also called "round half away from zero") + HALF_TOWARDS_INFINITY, + /// Round ties to nearest even integer + HALF_TO_EVEN, + /// Round ties to nearest odd integer + HALF_TO_ODD, +}; + +class ARROW_EXPORT RoundOptions : public FunctionOptions { + public: + explicit RoundOptions(int64_t ndigits = 0, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundOptions"; + static RoundOptions Defaults() { return RoundOptions(); } + /// Rounding precision (number of digits to round to) + int64_t ndigits; + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +class ARROW_EXPORT RoundBinaryOptions : public FunctionOptions { + public: + explicit RoundBinaryOptions(RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundBinaryOptions"; + static RoundBinaryOptions Defaults() { return RoundBinaryOptions(); } + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +enum class CalendarUnit : int8_t { + NANOSECOND, + MICROSECOND, + MILLISECOND, + SECOND, + MINUTE, + HOUR, + DAY, + WEEK, + MONTH, + QUARTER, + YEAR +}; + +class ARROW_EXPORT RoundTemporalOptions : public FunctionOptions { + public: + explicit RoundTemporalOptions(int multiple = 1, CalendarUnit unit = CalendarUnit::DAY, + bool week_starts_monday = true, + bool ceil_is_strictly_greater = false, + bool calendar_based_origin = false); + static constexpr char const kTypeName[] = "RoundTemporalOptions"; + static RoundTemporalOptions Defaults() { return RoundTemporalOptions(); } + + /// Number of units to round to + int multiple; + /// The unit used for rounding of time + CalendarUnit unit; + /// What day does the week start with (Monday=true, Sunday=false) + bool week_starts_monday; + /// Enable this flag to return a rounded value that is strictly greater than the input. + /// For example: ceiling 1970-01-01T00:00:00 to 3 hours would yield 1970-01-01T03:00:00 + /// if set to true and 1970-01-01T00:00:00 if set to false. + /// This applies for ceiling only. + bool ceil_is_strictly_greater; + /// By default time is rounded to a multiple of units since 1970-01-01T00:00:00. + /// By setting calendar_based_origin to true, time will be rounded to a number + /// of units since the last greater calendar unit. + /// For example: rounding to a multiple of days since the beginning of the month or + /// to hours since the beginning of the day. + /// Exceptions: week and quarter are not used as greater units, therefore days will + /// will be rounded to the beginning of the month not week. Greater unit of week + /// is year. + /// Note that ceiling and rounding might change sorting order of an array near greater + /// unit change. For example rounding YYYY-mm-dd 23:00:00 to 5 hours will ceil and + /// round to YYYY-mm-dd+1 01:00:00 and floor to YYYY-mm-dd 20:00:00. On the other hand + /// YYYY-mm-dd+1 00:00:00 will ceil, round and floor to YYYY-mm-dd+1 00:00:00. This + /// can break the order of an already ordered array. + bool calendar_based_origin; +}; + +class ARROW_EXPORT RoundToMultipleOptions : public FunctionOptions { + public: + explicit RoundToMultipleOptions(double multiple = 1.0, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + explicit RoundToMultipleOptions(std::shared_ptr multiple, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundToMultipleOptions"; + static RoundToMultipleOptions Defaults() { return RoundToMultipleOptions(); } + /// Rounding scale (multiple to round to). + /// + /// Should be a positive numeric scalar of a type compatible with the + /// argument to be rounded. The cast kernel is used to convert the rounding + /// multiple to match the result type. + std::shared_ptr multiple; + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +/// Options for var_args_join. +class ARROW_EXPORT JoinOptions : public FunctionOptions { + public: + /// How to handle null values. (A null separator always results in a null output.) + enum NullHandlingBehavior { + /// A null in any input results in a null in the output. + EMIT_NULL, + /// Nulls in inputs are skipped. + SKIP, + /// Nulls in inputs are replaced with the replacement string. + REPLACE, + }; + explicit JoinOptions(NullHandlingBehavior null_handling = EMIT_NULL, + std::string null_replacement = ""); + static constexpr char const kTypeName[] = "JoinOptions"; + static JoinOptions Defaults() { return JoinOptions(); } + NullHandlingBehavior null_handling; + std::string null_replacement; +}; + +class ARROW_EXPORT MatchSubstringOptions : public FunctionOptions { + public: + explicit MatchSubstringOptions(std::string pattern, bool ignore_case = false); + MatchSubstringOptions(); + static constexpr char const kTypeName[] = "MatchSubstringOptions"; + + /// The exact substring (or regex, depending on kernel) to look for inside input values. + std::string pattern; + /// Whether to perform a case-insensitive match. + bool ignore_case; +}; + +class ARROW_EXPORT SplitOptions : public FunctionOptions { + public: + explicit SplitOptions(int64_t max_splits = -1, bool reverse = false); + static constexpr char const kTypeName[] = "SplitOptions"; + + /// Maximum number of splits allowed, or unlimited when -1 + int64_t max_splits; + /// Start splitting from the end of the string (only relevant when max_splits != -1) + bool reverse; +}; + +class ARROW_EXPORT SplitPatternOptions : public FunctionOptions { + public: + explicit SplitPatternOptions(std::string pattern, int64_t max_splits = -1, + bool reverse = false); + SplitPatternOptions(); + static constexpr char const kTypeName[] = "SplitPatternOptions"; + + /// The exact substring to split on. + std::string pattern; + /// Maximum number of splits allowed, or unlimited when -1 + int64_t max_splits; + /// Start splitting from the end of the string (only relevant when max_splits != -1) + bool reverse; +}; + +class ARROW_EXPORT ReplaceSliceOptions : public FunctionOptions { + public: + explicit ReplaceSliceOptions(int64_t start, int64_t stop, std::string replacement); + ReplaceSliceOptions(); + static constexpr char const kTypeName[] = "ReplaceSliceOptions"; + + /// Index to start slicing at + int64_t start; + /// Index to stop slicing at + int64_t stop; + /// String to replace the slice with + std::string replacement; +}; + +class ARROW_EXPORT ReplaceSubstringOptions : public FunctionOptions { + public: + explicit ReplaceSubstringOptions(std::string pattern, std::string replacement, + int64_t max_replacements = -1); + ReplaceSubstringOptions(); + static constexpr char const kTypeName[] = "ReplaceSubstringOptions"; + + /// Pattern to match, literal, or regular expression depending on which kernel is used + std::string pattern; + /// String to replace the pattern with + std::string replacement; + /// Max number of substrings to replace (-1 means unbounded) + int64_t max_replacements; +}; + +class ARROW_EXPORT ExtractRegexOptions : public FunctionOptions { + public: + explicit ExtractRegexOptions(std::string pattern); + ExtractRegexOptions(); + static constexpr char const kTypeName[] = "ExtractRegexOptions"; + + /// Regular expression with named capture fields + std::string pattern; +}; + +/// Options for IsIn and IndexIn functions +class ARROW_EXPORT SetLookupOptions : public FunctionOptions { + public: + /// How to handle null values. + enum NullMatchingBehavior { + /// MATCH, any null in `value_set` is successfully matched in + /// the input. + MATCH, + /// SKIP, any null in `value_set` is ignored and nulls in the input + /// produce null (IndexIn) or false (IsIn) values in the output. + SKIP, + /// EMIT_NULL, any null in `value_set` is ignored and nulls in the + /// input produce null (IndexIn and IsIn) values in the output. + EMIT_NULL, + /// INCONCLUSIVE, null values are regarded as unknown values, which is + /// sql-compatible. nulls in the input produce null (IndexIn and IsIn) + /// values in the output. Besides, if `value_set` contains a null, + /// non-null unmatched values in the input also produce null values + /// (IndexIn and IsIn) in the output. + INCONCLUSIVE + }; + + explicit SetLookupOptions(Datum value_set, NullMatchingBehavior = MATCH); + SetLookupOptions(); + + // DEPRECATED(will be removed after removing of skip_nulls) + explicit SetLookupOptions(Datum value_set, bool skip_nulls); + + static constexpr char const kTypeName[] = "SetLookupOptions"; + + /// The set of values to look up input values into. + Datum value_set; + + NullMatchingBehavior null_matching_behavior; + + // DEPRECATED(will be removed after removing of skip_nulls) + NullMatchingBehavior GetNullMatchingBehavior() const; + + // DEPRECATED(use null_matching_behavior instead) + /// Whether nulls in `value_set` count for lookup. + /// + /// If true, any null in `value_set` is ignored and nulls in the input + /// produce null (IndexIn) or false (IsIn) values in the output. + /// If false, any null in `value_set` is successfully matched in + /// the input. + std::optional skip_nulls; +}; + +/// Options for struct_field function +class ARROW_EXPORT StructFieldOptions : public FunctionOptions { + public: + explicit StructFieldOptions(std::vector indices); + explicit StructFieldOptions(std::initializer_list); + explicit StructFieldOptions(FieldRef field_ref); + StructFieldOptions(); + static constexpr char const kTypeName[] = "StructFieldOptions"; + + /// The FieldRef specifying what to extract from struct or union. + FieldRef field_ref; +}; + +class ARROW_EXPORT StrptimeOptions : public FunctionOptions { + public: + explicit StrptimeOptions(std::string format, TimeUnit::type unit, + bool error_is_null = false); + StrptimeOptions(); + static constexpr char const kTypeName[] = "StrptimeOptions"; + + /// The desired format string. + std::string format; + /// The desired time resolution + TimeUnit::type unit; + /// Return null on parsing errors if true or raise if false + bool error_is_null; +}; + +class ARROW_EXPORT StrftimeOptions : public FunctionOptions { + public: + explicit StrftimeOptions(std::string format, std::string locale = "C"); + StrftimeOptions(); + + static constexpr char const kTypeName[] = "StrftimeOptions"; + + static constexpr const char* kDefaultFormat = "%Y-%m-%dT%H:%M:%S"; + + /// The desired format string. + std::string format; + /// The desired output locale string. + std::string locale; +}; + +class ARROW_EXPORT PadOptions : public FunctionOptions { + public: + explicit PadOptions(int64_t width, std::string padding = " "); + PadOptions(); + static constexpr char const kTypeName[] = "PadOptions"; + + /// The desired string length. + int64_t width; + /// What to pad the string with. Should be one codepoint (Unicode)/byte (ASCII). + std::string padding; +}; + +class ARROW_EXPORT TrimOptions : public FunctionOptions { + public: + explicit TrimOptions(std::string characters); + TrimOptions(); + static constexpr char const kTypeName[] = "TrimOptions"; + + /// The individual characters to be trimmed from the string. + std::string characters; +}; + +class ARROW_EXPORT SliceOptions : public FunctionOptions { + public: + explicit SliceOptions(int64_t start, int64_t stop = std::numeric_limits::max(), + int64_t step = 1); + SliceOptions(); + static constexpr char const kTypeName[] = "SliceOptions"; + int64_t start, stop, step; +}; + +class ARROW_EXPORT ListSliceOptions : public FunctionOptions { + public: + explicit ListSliceOptions(int64_t start, std::optional stop = std::nullopt, + int64_t step = 1, + std::optional return_fixed_size_list = std::nullopt); + ListSliceOptions(); + static constexpr char const kTypeName[] = "ListSliceOptions"; + /// The start of list slicing. + int64_t start; + /// Optional stop of list slicing. If not set, then slice to end. (NotImplemented) + std::optional stop; + /// Slicing step + int64_t step; + // Whether to return a FixedSizeListArray. If true _and_ stop is after + // a list element's length, nulls will be appended to create the requested slice size. + // Default of `nullopt` will return whatever type it got in. + std::optional return_fixed_size_list; +}; + +class ARROW_EXPORT NullOptions : public FunctionOptions { + public: + explicit NullOptions(bool nan_is_null = false); + static constexpr char const kTypeName[] = "NullOptions"; + static NullOptions Defaults() { return NullOptions{}; } + + bool nan_is_null; +}; + +enum CompareOperator : int8_t { + EQUAL, + NOT_EQUAL, + GREATER, + GREATER_EQUAL, + LESS, + LESS_EQUAL, +}; + +struct ARROW_EXPORT CompareOptions { + explicit CompareOptions(CompareOperator op) : op(op) {} + CompareOptions() : CompareOptions(CompareOperator::EQUAL) {} + enum CompareOperator op; +}; + +class ARROW_EXPORT MakeStructOptions : public FunctionOptions { + public: + MakeStructOptions(std::vector n, std::vector r, + std::vector> m); + explicit MakeStructOptions(std::vector n); + MakeStructOptions(); + static constexpr char const kTypeName[] = "MakeStructOptions"; + + /// Names for wrapped columns + std::vector field_names; + + /// Nullability bits for wrapped columns + std::vector field_nullability; + + /// Metadata attached to wrapped columns + std::vector> field_metadata; +}; + +struct ARROW_EXPORT DayOfWeekOptions : public FunctionOptions { + public: + explicit DayOfWeekOptions(bool count_from_zero = true, uint32_t week_start = 1); + static constexpr char const kTypeName[] = "DayOfWeekOptions"; + static DayOfWeekOptions Defaults() { return DayOfWeekOptions(); } + + /// Number days from 0 if true and from 1 if false + bool count_from_zero; + /// What day does the week start with (Monday=1, Sunday=7). + /// The numbering is unaffected by the count_from_zero parameter. + uint32_t week_start; +}; + +/// Used to control timestamp timezone conversion and handling ambiguous/nonexistent +/// times. +struct ARROW_EXPORT AssumeTimezoneOptions : public FunctionOptions { + public: + /// \brief How to interpret ambiguous local times that can be interpreted as + /// multiple instants (normally two) due to DST shifts. + /// + /// AMBIGUOUS_EARLIEST emits the earliest instant amongst possible interpretations. + /// AMBIGUOUS_LATEST emits the latest instant amongst possible interpretations. + enum Ambiguous { AMBIGUOUS_RAISE, AMBIGUOUS_EARLIEST, AMBIGUOUS_LATEST }; + + /// \brief How to handle local times that do not exist due to DST shifts. + /// + /// NONEXISTENT_EARLIEST emits the instant "just before" the DST shift instant + /// in the given timestamp precision (for example, for a nanoseconds precision + /// timestamp, this is one nanosecond before the DST shift instant). + /// NONEXISTENT_LATEST emits the DST shift instant. + enum Nonexistent { NONEXISTENT_RAISE, NONEXISTENT_EARLIEST, NONEXISTENT_LATEST }; + + explicit AssumeTimezoneOptions(std::string timezone, + Ambiguous ambiguous = AMBIGUOUS_RAISE, + Nonexistent nonexistent = NONEXISTENT_RAISE); + AssumeTimezoneOptions(); + static constexpr char const kTypeName[] = "AssumeTimezoneOptions"; + + /// Timezone to convert timestamps from + std::string timezone; + + /// How to interpret ambiguous local times (due to DST shifts) + Ambiguous ambiguous; + /// How to interpret nonexistent local times (due to DST shifts) + Nonexistent nonexistent; +}; + +struct ARROW_EXPORT WeekOptions : public FunctionOptions { + public: + explicit WeekOptions(bool week_starts_monday = true, bool count_from_zero = false, + bool first_week_is_fully_in_year = false); + static constexpr char const kTypeName[] = "WeekOptions"; + static WeekOptions Defaults() { return WeekOptions{}; } + static WeekOptions ISODefaults() { + return WeekOptions{/*week_starts_monday*/ true, + /*count_from_zero=*/false, + /*first_week_is_fully_in_year=*/false}; + } + static WeekOptions USDefaults() { + return WeekOptions{/*week_starts_monday*/ false, + /*count_from_zero=*/false, + /*first_week_is_fully_in_year=*/false}; + } + + /// What day does the week start with (Monday=true, Sunday=false) + bool week_starts_monday; + /// Dates from current year that fall into last ISO week of the previous year return + /// 0 if true and 52 or 53 if false. + bool count_from_zero; + /// Must the first week be fully in January (true), or is a week that begins on + /// December 29, 30, or 31 considered to be the first week of the new year (false)? + bool first_week_is_fully_in_year; +}; + +struct ARROW_EXPORT Utf8NormalizeOptions : public FunctionOptions { + public: + enum Form { NFC, NFKC, NFD, NFKD }; + + explicit Utf8NormalizeOptions(Form form = NFC); + static Utf8NormalizeOptions Defaults() { return Utf8NormalizeOptions(); } + static constexpr char const kTypeName[] = "Utf8NormalizeOptions"; + + /// The Unicode normalization form to apply + Form form; +}; + +class ARROW_EXPORT RandomOptions : public FunctionOptions { + public: + enum Initializer { SystemRandom, Seed }; + + static RandomOptions FromSystemRandom() { return RandomOptions{SystemRandom, 0}; } + static RandomOptions FromSeed(uint64_t seed) { return RandomOptions{Seed, seed}; } + + RandomOptions(Initializer initializer, uint64_t seed); + RandomOptions(); + static constexpr char const kTypeName[] = "RandomOptions"; + static RandomOptions Defaults() { return RandomOptions(); } + + /// The type of initialization for random number generation - system or provided seed. + Initializer initializer; + /// The seed value used to initialize the random number generation. + uint64_t seed; +}; + +/// Options for map_lookup function +class ARROW_EXPORT MapLookupOptions : public FunctionOptions { + public: + enum Occurrence { + /// Return the first matching value + FIRST, + /// Return the last matching value + LAST, + /// Return all matching values + ALL + }; + + explicit MapLookupOptions(std::shared_ptr query_key, Occurrence occurrence); + MapLookupOptions(); + + constexpr static char const kTypeName[] = "MapLookupOptions"; + + /// The key to lookup in the map + std::shared_ptr query_key; + + /// Whether to return the first, last, or all matching values + Occurrence occurrence; +}; + +/// @} + +/// \brief Get the absolute value of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value transformed +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise absolute value +ARROW_EXPORT +Result AbsoluteValue(const Datum& arg, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Add two values together. Array values must be the same length. If +/// either addend is null the result will be null. +/// +/// \param[in] left the first addend +/// \param[in] right the second addend +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise sum +ARROW_EXPORT +Result Add(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Subtract two values. Array values must be the same length. If the +/// minuend or subtrahend is null the result will be null. +/// +/// \param[in] left the value subtracted from (minuend) +/// \param[in] right the value by which the minuend is reduced (subtrahend) +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise difference +ARROW_EXPORT +Result Subtract(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Multiply two values. Array values must be the same length. If either +/// factor is null the result will be null. +/// +/// \param[in] left the first factor +/// \param[in] right the second factor +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise product +ARROW_EXPORT +Result Multiply(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Divide two values. Array values must be the same length. If either +/// argument is null the result will be null. For integer types, if there is +/// a zero divisor, an error will be raised. +/// +/// \param[in] left the dividend +/// \param[in] right the divisor +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise quotient +ARROW_EXPORT +Result Divide(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Negate values. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value negated +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise negation +ARROW_EXPORT +Result Negate(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Raise the values of base array to the power of the exponent array values. +/// Array values must be the same length. If either base or exponent is null the result +/// will be null. +/// +/// \param[in] left the base +/// \param[in] right the exponent +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise base value raised to the power of exponent +ARROW_EXPORT +Result Power(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Raise Euler's number to the power of specified exponent, element-wise. +/// If the exponent value is null the result will be null. +/// +/// \param[in] arg the exponent +/// \param[in] ctx the function execution context, optional +/// \return the element-wise Euler's number raised to the power of exponent +ARROW_EXPORT +Result Exp(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Left shift the left array by the right array. Array values must be the +/// same length. If either operand is null, the result will be null. +/// +/// \param[in] left the value to shift +/// \param[in] right the value to shift by +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise left value shifted left by the right value +ARROW_EXPORT +Result ShiftLeft(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Right shift the left array by the right array. Array values must be the +/// same length. If either operand is null, the result will be null. Performs a +/// logical shift for unsigned values, and an arithmetic shift for signed values. +/// +/// \param[in] left the value to shift +/// \param[in] right the value to shift by +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise left value shifted right by the right value +ARROW_EXPORT +Result ShiftRight(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the sine of the array values. +/// \param[in] arg The values to compute the sine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise sine of the values +ARROW_EXPORT +Result Sin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cosine of the array values. +/// \param[in] arg The values to compute the cosine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise cosine of the values +ARROW_EXPORT +Result Cos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse sine (arcsine) of the array values. +/// \param[in] arg The values to compute the inverse sine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse sine of the values +ARROW_EXPORT +Result Asin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse cosine (arccosine) of the array values. +/// \param[in] arg The values to compute the inverse cosine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse cosine of the values +ARROW_EXPORT +Result Acos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the tangent of the array values. +/// \param[in] arg The values to compute the tangent for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise tangent of the values +ARROW_EXPORT +Result Tan(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse tangent (arctangent) of the array values. +/// \param[in] arg The values to compute the inverse tangent for. +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse tangent of the values +ARROW_EXPORT +Result Atan(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse tangent (arctangent) of y/x, using the +/// argument signs to determine the correct quadrant. +/// \param[in] y The y-values to compute the inverse tangent for. +/// \param[in] x The x-values to compute the inverse tangent for. +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse tangent of the values +ARROW_EXPORT +Result Atan2(const Datum& y, const Datum& x, ExecContext* ctx = NULLPTR); + +/// \brief Get the natural log of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise natural log +ARROW_EXPORT +Result Ln(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log base 10 of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log base 10 +ARROW_EXPORT +Result Log10(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log base 2 of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log base 2 +ARROW_EXPORT +Result Log2(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the natural log of (1 + value). +/// +/// If argument is null the result will be null. +/// This function may be more accurate than Log(1 + value) for values close to zero. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise natural log +ARROW_EXPORT +Result Log1p(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log of a value to the given base. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] base The given base. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log to the given base +ARROW_EXPORT +Result Logb(const Datum& arg, const Datum& base, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the square-root of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the square-root for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise square-root +ARROW_EXPORT +Result Sqrt(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Round to the nearest integer less than or equal in magnitude to the +/// argument. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] ctx the function execution context, optional +/// \return the rounded value +ARROW_EXPORT +Result Floor(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Round to the nearest integer greater than or equal in magnitude to the +/// argument. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] ctx the function execution context, optional +/// \return the rounded value +ARROW_EXPORT +Result Ceil(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Get the integral part without fractional digits. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to truncate +/// \param[in] ctx the function execution context, optional +/// \return the truncated value +ARROW_EXPORT +Result Trunc(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Find the element-wise maximum of any number of arrays or scalars. +/// Array values must be the same length. +/// +/// \param[in] args arrays or scalars to operate on. +/// \param[in] options options for handling nulls, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise maximum +ARROW_EXPORT +Result MaxElementWise( + const std::vector& args, + ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Find the element-wise minimum of any number of arrays or scalars. +/// Array values must be the same length. +/// +/// \param[in] args arrays or scalars to operate on. +/// \param[in] options options for handling nulls, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise minimum +ARROW_EXPORT +Result MinElementWise( + const std::vector& args, + ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the sign of a value. Array values can be of arbitrary length. If argument +/// is null the result will be null. +/// +/// \param[in] arg the value to extract sign from +/// \param[in] ctx the function execution context, optional +/// \return the element-wise sign function +ARROW_EXPORT +Result Sign(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given precision. +/// +/// If arg is null the result will be null. +/// +/// \param[in] arg the value to be rounded +/// \param[in] options rounding options (rounding mode and number of digits), optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result Round(const Datum& arg, RoundOptions options = RoundOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given precision. +/// +/// If arg1 is null the result will be null. +/// If arg2 is null then the result will be null. If arg2 is negative, then the rounding +/// place will be shifted to the left (thus -1 would correspond to rounding to the nearest +/// ten). If positive, the rounding place will shift to the right (and +1 would +/// correspond to rounding to the nearest tenth). +/// +/// \param[in] arg1 the value to be rounded +/// \param[in] arg2 the number of significant digits to round to +/// \param[in] options rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result RoundBinary(const Datum& arg1, const Datum& arg2, + RoundBinaryOptions options = RoundBinaryOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given multiple. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] options rounding options (rounding mode and multiple), optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result RoundToMultiple( + const Datum& arg, RoundToMultipleOptions options = RoundToMultipleOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Ceil a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to ceil +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result CeilTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Floor a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to floor +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result FloorTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to round +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RoundTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Invert the values of a boolean datum +/// \param[in] value datum to invert +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Invert(const Datum& value, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND of two boolean datums which always propagates nulls +/// (null and false is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result And(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND of two boolean datums with a Kleene truth table +/// (null and false is false). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneAnd(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Element-wise OR of two boolean datums which always propagates nulls +/// (null and true is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Or(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise OR of two boolean datums with a Kleene truth table +/// (null or true is true). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneOr(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise XOR of two boolean datums +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Xor(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND NOT of two boolean datums which always propagates nulls +/// (null and not true is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result AndNot(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND NOT of two boolean datums with a Kleene truth table +/// (false and not null is false, null and not true is false). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneAndNot(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief IsIn returns true for each element of `values` that is contained in +/// `value_set` +/// +/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls. +/// +/// \param[in] values array-like input to look up in value_set +/// \param[in] options SetLookupOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsIn(const Datum& values, const SetLookupOptions& options, + ExecContext* ctx = NULLPTR); +ARROW_EXPORT +Result IsIn(const Datum& values, const Datum& value_set, + ExecContext* ctx = NULLPTR); + +/// \brief IndexIn examines each slot in the values against a value_set array. +/// If the value is not found in value_set, null will be output. +/// If found, the index of occurrence within value_set (ignoring duplicates) +/// will be output. +/// +/// For example given values = [99, 42, 3, null] and +/// value_set = [3, 3, 99], the output will be = [2, null, 0, null] +/// +/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls. +/// +/// \param[in] values array-like input +/// \param[in] options SetLookupOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IndexIn(const Datum& values, const SetLookupOptions& options, + ExecContext* ctx = NULLPTR); +ARROW_EXPORT +Result IndexIn(const Datum& values, const Datum& value_set, + ExecContext* ctx = NULLPTR); + +/// \brief IsValid returns true for each element of `values` that is not null, +/// false otherwise +/// +/// \param[in] values input to examine for validity +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsValid(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IsNull returns true for each element of `values` that is null, +/// false otherwise +/// +/// \param[in] values input to examine for nullity +/// \param[in] options NullOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsNull(const Datum& values, NullOptions options = NullOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief IsNan returns true for each element of `values` that is NaN, +/// false otherwise +/// +/// \param[in] values input to look for NaN +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsNan(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IfElse returns elements chosen from `left` or `right` +/// depending on `cond`. `null` values in `cond` will be promoted to the result +/// +/// \param[in] cond `Boolean` condition Scalar/ Array +/// \param[in] left Scalar/ Array +/// \param[in] right Scalar/ Array +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IfElse(const Datum& cond, const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief CaseWhen behaves like a switch/case or if-else if-else statement: for +/// each row, select the first value for which the corresponding condition is +/// true, or (if given) select the 'else' value, else emit null. Note that a +/// null condition is the same as false. +/// +/// \param[in] cond Conditions (Boolean) +/// \param[in] cases Values (any type), along with an optional 'else' value. +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result CaseWhen(const Datum& cond, const std::vector& cases, + ExecContext* ctx = NULLPTR); + +/// \brief Year returns year for each element of `values` +/// +/// \param[in] values input to extract year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Year(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IsLeapYear returns if a year is a leap year for each element of `values` +/// +/// \param[in] values input to extract leap year indicator from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsLeapYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Month returns month for each element of `values`. +/// Month is encoded as January=1, December=12 +/// +/// \param[in] values input to extract month from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Month(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Day returns day number for each element of `values` +/// +/// \param[in] values input to extract day from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Day(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief YearMonthDay returns a struct containing the Year, Month and Day value for +/// each element of `values`. +/// +/// \param[in] values input to extract (year, month, day) struct from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result YearMonthDay(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief DayOfWeek returns number of the day of the week value for each element of +/// `values`. +/// +/// By default week starts on Monday denoted by 0 and ends on Sunday denoted +/// by 6. Start day of the week (Monday=1, Sunday=7) and numbering base (0 or 1) can be +/// set using DayOfWeekOptions +/// +/// \param[in] values input to extract number of the day of the week from +/// \param[in] options for setting start of the week and day numbering +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayOfWeek(const Datum& values, + DayOfWeekOptions options = DayOfWeekOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief DayOfYear returns number of day of the year for each element of `values`. +/// January 1st maps to day number 1, February 1st to 32, etc. +/// +/// \param[in] values input to extract number of day of the year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayOfYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief ISOYear returns ISO year number for each element of `values`. +/// First week of an ISO year has the majority (4 or more) of its days in January. +/// +/// \param[in] values input to extract ISO year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result ISOYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief USYear returns US epidemiological year number for each element of `values`. +/// First week of US epidemiological year has the majority (4 or more) of it's +/// days in January. Last week of US epidemiological year has the year's last +/// Wednesday in it. US epidemiological week starts on Sunday. +/// +/// \param[in] values input to extract US epidemiological year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result USYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief ISOWeek returns ISO week of year number for each element of `values`. +/// First ISO week has the majority (4 or more) of its days in January. +/// ISO week starts on Monday. Year can have 52 or 53 weeks. +/// Week numbering can start with 1. +/// +/// \param[in] values input to extract ISO week of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result ISOWeek(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief USWeek returns US week of year number for each element of `values`. +/// First US week has the majority (4 or more) of its days in January. +/// US week starts on Sunday. Year can have 52 or 53 weeks. +/// Week numbering starts with 1. +/// +/// \param[in] values input to extract US week of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result USWeek(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Week returns week of year number for each element of `values`. +/// First ISO week has the majority (4 or more) of its days in January. +/// Year can have 52 or 53 weeks. Week numbering can start with 0 or 1 +/// depending on DayOfWeekOptions.count_from_zero. +/// +/// \param[in] values input to extract week of year from +/// \param[in] options for setting numbering start +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Week(const Datum& values, WeekOptions options = WeekOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief ISOCalendar returns a (ISO year, ISO week, ISO day of week) struct for +/// each element of `values`. +/// ISO week starts on Monday denoted by 1 and ends on Sunday denoted by 7. +/// +/// \param[in] values input to ISO calendar struct from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result ISOCalendar(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Quarter returns the quarter of year number for each element of `values` +/// First quarter maps to 1 and fourth quarter maps to 4. +/// +/// \param[in] values input to extract quarter of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Quarter(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Hour returns hour value for each element of `values` +/// +/// \param[in] values input to extract hour from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Hour(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Minute returns minutes value for each element of `values` +/// +/// \param[in] values input to extract minutes from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Minute(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Second returns seconds value for each element of `values` +/// +/// \param[in] values input to extract seconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Second(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Millisecond returns number of milliseconds since the last full second +/// for each element of `values` +/// +/// \param[in] values input to extract milliseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Millisecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Microsecond returns number of microseconds since the last full millisecond +/// for each element of `values` +/// +/// \param[in] values input to extract microseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Microsecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Nanosecond returns number of nanoseconds since the last full millisecond +/// for each element of `values` +/// +/// \param[in] values input to extract nanoseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Nanosecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Subsecond returns the fraction of second elapsed since last full second +/// as a float for each element of `values` +/// +/// \param[in] values input to extract subsecond from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Subsecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Format timestamps according to a format string +/// +/// Return formatted time strings according to the format string +/// `StrftimeOptions::format` and to the locale specifier `Strftime::locale`. +/// +/// \param[in] values input timestamps +/// \param[in] options for setting format string and locale +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Strftime(const Datum& values, StrftimeOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief Parse timestamps according to a format string +/// +/// Return parsed timestamps according to the format string +/// `StrptimeOptions::format` at time resolution `Strftime::unit`. Parse errors are +/// raised depending on the `Strftime::error_is_null` setting. +/// +/// \param[in] values input strings +/// \param[in] options for setting format string, unit and error_is_null +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Strptime(const Datum& values, StrptimeOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief Converts timestamps from local timestamp without a timezone to a timestamp with +/// timezone, interpreting the local timestamp as being in the specified timezone for each +/// element of `values` +/// +/// \param[in] values input to convert +/// \param[in] options for setting source timezone, exception and ambiguous timestamp +/// handling. +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result AssumeTimezone(const Datum& values, + AssumeTimezoneOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief IsDaylightSavings extracts if currently observing daylight savings for each +/// element of `values` +/// +/// \param[in] values input to extract daylight savings indicator from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result IsDaylightSavings(const Datum& values, + ExecContext* ctx = NULLPTR); + +/// \brief LocalTimestamp converts timestamp to timezone naive local timestamp +/// +/// \param[in] values input to convert to local time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result LocalTimestamp(const Datum& values, + ExecContext* ctx = NULLPTR); + +/// \brief Years Between finds the number of years between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result YearsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Quarters Between finds the number of quarters between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result QuartersBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Months Between finds the number of month between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MonthsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Weeks Between finds the number of weeks between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result WeeksBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Month Day Nano Between finds the number of months, days, and nanoseconds +/// between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MonthDayNanoBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief DayTime Between finds the number of days and milliseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayTimeBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Days Between finds the number of days between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DaysBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Hours Between finds the number of hours between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result HoursBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Minutes Between finds the number of minutes between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MinutesBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Seconds Between finds the number of hours between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result SecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Milliseconds Between finds the number of milliseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MillisecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Microseconds Between finds the number of microseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MicrosecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Nanoseconds Between finds the number of nanoseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result NanosecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Finds either the FIRST, LAST, or ALL items with a key that matches the given +/// query key in a map. +/// +/// Returns an array of items for FIRST and LAST, and an array of list of items for ALL. +/// +/// \param[in] map to look in +/// \param[in] options to pass a query key and choose which matching keys to return +/// (FIRST, LAST or ALL) +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MapLookup(const Datum& map, MapLookupOptions options, + ExecContext* ctx = NULLPTR); +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..919572f16ee69edaa348f432d36214896b455732 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h @@ -0,0 +1,697 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/compute/function_options.h" +#include "arrow/compute/ordering.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace compute { + +class ExecContext; + +/// \addtogroup compute-concrete-options +/// @{ + +class ARROW_EXPORT FilterOptions : public FunctionOptions { + public: + /// Configure the action taken when a slot of the selection mask is null + enum NullSelectionBehavior { + /// The corresponding filtered value will be removed in the output. + DROP, + /// The corresponding filtered value will be null in the output. + EMIT_NULL, + }; + + explicit FilterOptions(NullSelectionBehavior null_selection = DROP); + static constexpr char const kTypeName[] = "FilterOptions"; + static FilterOptions Defaults() { return FilterOptions(); } + + NullSelectionBehavior null_selection_behavior = DROP; +}; + +class ARROW_EXPORT TakeOptions : public FunctionOptions { + public: + explicit TakeOptions(bool boundscheck = true); + static constexpr char const kTypeName[] = "TakeOptions"; + static TakeOptions BoundsCheck() { return TakeOptions(true); } + static TakeOptions NoBoundsCheck() { return TakeOptions(false); } + static TakeOptions Defaults() { return BoundsCheck(); } + + bool boundscheck = true; +}; + +/// \brief Options for the dictionary encode function +class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions { + public: + /// Configure how null values will be encoded + enum NullEncodingBehavior { + /// The null value will be added to the dictionary with a proper index. + ENCODE, + /// The null value will be masked in the indices array. + MASK + }; + + explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK); + static constexpr char const kTypeName[] = "DictionaryEncodeOptions"; + static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); } + + NullEncodingBehavior null_encoding_behavior = MASK; +}; + +/// \brief Options for the run-end encode function +class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions { + public: + explicit RunEndEncodeOptions(std::shared_ptr run_end_type = int32()); + static constexpr char const kTypeName[] = "RunEndEncodeOptions"; + static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); } + + std::shared_ptr run_end_type; +}; + +class ARROW_EXPORT ArraySortOptions : public FunctionOptions { + public: + explicit ArraySortOptions(SortOrder order = SortOrder::Ascending, + NullPlacement null_placement = NullPlacement::AtEnd); + static constexpr char const kTypeName[] = "ArraySortOptions"; + static ArraySortOptions Defaults() { return ArraySortOptions(); } + + /// Sorting order + SortOrder order; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +class ARROW_EXPORT SortOptions : public FunctionOptions { + public: + explicit SortOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd); + explicit SortOptions(const Ordering& ordering); + static constexpr char const kTypeName[] = "SortOptions"; + static SortOptions Defaults() { return SortOptions(); } + /// Convenience constructor to create an ordering from SortOptions + /// + /// Note: Both classes contain the exact same information. However, + /// sort_options should only be used in a "function options" context while Ordering + /// is used more generally. + Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); } + Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +/// \brief SelectK options +class ARROW_EXPORT SelectKOptions : public FunctionOptions { + public: + explicit SelectKOptions(int64_t k = -1, std::vector sort_keys = {}); + static constexpr char const kTypeName[] = "SelectKOptions"; + static SelectKOptions Defaults() { return SelectKOptions(); } + + static SelectKOptions TopKDefault(int64_t k, std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Descending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Descending)); + } + return SelectKOptions{k, keys}; + } + static SelectKOptions BottomKDefault(int64_t k, + std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Ascending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Ascending)); + } + return SelectKOptions{k, keys}; + } + + /// The number of `k` elements to keep. + int64_t k; + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; +}; + +/// \brief Rank options +class ARROW_EXPORT RankOptions : public FunctionOptions { + public: + /// Configure how ties between equal values are handled + enum Tiebreaker { + /// Ties get the smallest possible rank in sorted order. + Min, + /// Ties get the largest possible rank in sorted order. + Max, + /// Ranks are assigned in order of when ties appear in the input. + /// This ensures the ranks are a stable permutation of the input. + First, + /// The ranks span a dense [1, M] interval where M is the number + /// of distinct values in the input. + Dense + }; + + explicit RankOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First); + /// Convenience constructor for array inputs + explicit RankOptions(SortOrder order, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First) + : RankOptions({SortKey("", order)}, null_placement, tiebreaker) {} + + static constexpr char const kTypeName[] = "RankOptions"; + static RankOptions Defaults() { return RankOptions(); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; + /// Tiebreaker for dealing with equal values in ranks + Tiebreaker tiebreaker; +}; + +/// \brief Partitioning options for NthToIndices +class ARROW_EXPORT PartitionNthOptions : public FunctionOptions { + public: + explicit PartitionNthOptions(int64_t pivot, + NullPlacement null_placement = NullPlacement::AtEnd); + PartitionNthOptions() : PartitionNthOptions(0) {} + static constexpr char const kTypeName[] = "PartitionNthOptions"; + + /// The index into the equivalent sorted array of the partition pivot element. + int64_t pivot; + /// Whether nulls and NaNs are partitioned at the start or at the end + NullPlacement null_placement; +}; + +/// \brief Options for cumulative functions +/// \note Also aliased as CumulativeSumOptions for backward compatibility +class ARROW_EXPORT CumulativeOptions : public FunctionOptions { + public: + explicit CumulativeOptions(bool skip_nulls = false); + explicit CumulativeOptions(double start, bool skip_nulls = false); + explicit CumulativeOptions(std::shared_ptr start, bool skip_nulls = false); + static constexpr char const kTypeName[] = "CumulativeOptions"; + static CumulativeOptions Defaults() { return CumulativeOptions(); } + + /// Optional starting value for cumulative operation computation, default depends on the + /// operation and input type. + /// - sum: 0 + /// - prod: 1 + /// - min: maximum of the input type + /// - max: minimum of the input type + /// - mean: start is ignored because it has no meaning for mean + std::optional> start; + + /// If true, nulls in the input are ignored and produce a corresponding null output. + /// When false, the first null encountered is propagated through the remaining output. + bool skip_nulls = false; +}; +using CumulativeSumOptions = CumulativeOptions; // For backward compatibility + +/// \brief Options for pairwise functions +class ARROW_EXPORT PairwiseOptions : public FunctionOptions { + public: + explicit PairwiseOptions(int64_t periods = 1); + static constexpr char const kTypeName[] = "PairwiseOptions"; + static PairwiseOptions Defaults() { return PairwiseOptions(); } + + /// Periods to shift for applying the binary operation, accepts negative values. + int64_t periods = 1; +}; + +/// @} + +/// \brief Filter with a boolean selection filter +/// +/// The output will be populated with values from the input at positions +/// where the selection filter is not 0. Nulls in the filter will be handled +/// based on options.null_selection_behavior. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// filter = [0, 1, 1, 0, null, 1], the output will be +/// (null_selection_behavior == DROP) = ["b", "c", "f"] +/// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"] +/// +/// \param[in] values array to filter +/// \param[in] filter indicates which values should be filtered out +/// \param[in] options configures null_selection_behavior +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Filter(const Datum& values, const Datum& filter, + const FilterOptions& options = FilterOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +namespace internal { + +// These internal functions are implemented in kernels/vector_selection.cc + +/// \brief Return the number of selected indices in the boolean filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +int64_t GetFilterOutputSize(const ArraySpan& filter, + FilterOptions::NullSelectionBehavior null_selection); + +/// \brief Compute uint64 selection indices for use with Take given a boolean +/// filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +Result> GetTakeIndices( + const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection, + MemoryPool* memory_pool = default_memory_pool()); + +} // namespace internal + +/// \brief ReplaceWithMask replaces each value in the array corresponding +/// to a true value in the mask with the next element from `replacements`. +/// +/// \param[in] values Array input to replace +/// \param[in] mask Array or Scalar of Boolean mask values +/// \param[in] replacements The replacement values to draw from. There must +/// be as many replacement values as true values in the mask. +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result ReplaceWithMask(const Datum& values, const Datum& mask, + const Datum& replacements, ExecContext* ctx = NULLPTR); + +/// \brief FillNullForward fill null values in forward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in forward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "c", "c", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief FillNullBackward fill null values in backward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in backward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "f", "f", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Take from an array of values at indices in another array +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array at the given +/// indices. If an index is null then the taken element will be null. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// indices = [2, 1, null, 3], the output will be +/// = [values[2], values[1], null, values[3]] +/// = ["c", "b", null, null] +/// +/// \param[in] values datum from which to take +/// \param[in] indices which values to take +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Take(const Datum& values, const Datum& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Take with Array inputs and output +ARROW_EXPORT +Result> Take(const Array& values, const Array& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Drop Null from an array of values +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array without nulls. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"], +/// the output will be = ["a", "b", "c", "e", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result DropNull(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief DropNull with Array inputs and output +ARROW_EXPORT +Result> DropNull(const Array& values, ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// Find index of n-th(0 based) smallest value and perform indirect +/// partition of an array around that element. Output indices[0 ~ n-1] +/// holds values no greater than n-th element, and indices[n+1 ~ end] +/// holds values no less than n-th element. Elements in each partition +/// is not sorted. Nulls will be partitioned to the end of the output. +/// Output is not guaranteed to be stable. +/// +/// \param[in] values array to be partitioned +/// \param[in] n pivot array around sorted n-th element +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, int64_t n, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// This overload takes a PartitionNthOptions specifying the pivot index +/// and the null handling. +/// +/// \param[in] values array to be partitioned +/// \param[in] options options including pivot index and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, + const PartitionNthOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that would select the first `k` elements. +/// +/// Perform an indirect sort of the datum, keeping only the first `k` elements. The output +/// array will contain indices such that the item indicated by the k-th index will be in +/// the position it would be if the datum were sorted by `options.sort_keys`. However, +/// indices of null values will not be part of the output. The sort is not guaranteed to +/// be stable. +/// +/// \param[in] datum datum to be partitioned +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return a datum with the same schema as the input +ARROW_EXPORT +Result> SelectKUnstable(const Datum& datum, + const SelectKOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// Perform an indirect sort of array. The output array will contain +/// indices that would sort an array, which would be the same length +/// as input. Nulls will be stably partitioned to the end of the output +/// regardless of order. +/// +/// For example given array = [null, 1, 3.3, null, 2, 5.3] and order +/// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0, +/// 3]. +/// +/// \param[in] array array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] array array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// Perform an indirect sort of chunked array. The output array will +/// contain indices that would sort a chunked array, which would be +/// the same length as input. Nulls will be stably partitioned to the +/// end of the output regardless of order. +/// +/// For example given chunked_array = [[null, 1], [3.3], [null, 2, +/// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2, +/// 4, 1, 0, 3]. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an input in the +/// specified order. Input is one of array, chunked array record batch +/// or table. +/// +/// Perform an indirect sort of input. The output array will contain +/// indices that would sort an input, which would be the same length +/// as input. Nulls will be stably partitioned to the start or to the end +/// of the output depending on SortOrder::null_placement. +/// +/// For example given input (table) = { +/// "column1": [[null, 1], [ 3, null, 2, 1]], +/// "column2": [[ 5], [3, null, null, 5, 5]], +/// } and options = { +/// {"column1", SortOrder::Ascending}, +/// {"column2", SortOrder::Descending}, +/// }, the output will be [5, 1, 4, 2, 0, 3]. +/// +/// \param[in] datum array, chunked array, record batch or table to sort +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort a table +ARROW_EXPORT +Result> SortIndices(const Datum& datum, const SortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Compute unique elements from an array-like object +/// +/// Note if a null occurs in the input it will NOT be included in the output. +/// +/// \param[in] datum array-like input +/// \param[in] ctx the function execution context, optional +/// \return result as Array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> Unique(const Datum& datum, ExecContext* ctx = NULLPTR); + +// Constants for accessing the output of ValueCounts +ARROW_EXPORT extern const char kValuesFieldName[]; +ARROW_EXPORT extern const char kCountsFieldName[]; +ARROW_EXPORT extern const int32_t kValuesFieldIndex; +ARROW_EXPORT extern const int32_t kCountsFieldIndex; + +/// \brief Return counts of unique elements from an array-like object. +/// +/// Note that the counts do not include counts for nulls in the array. These can be +/// obtained separately from metadata. +/// +/// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values +/// which can lead to unexpected results if the input Array has these values. +/// +/// \param[in] value array-like input +/// \param[in] ctx the function execution context, optional +/// \return counts An array of structs. +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> ValueCounts(const Datum& value, + ExecContext* ctx = NULLPTR); + +/// \brief Dictionary-encode values in an array-like object +/// +/// Any nulls encountered in the dictionary will be handled according to the +/// specified null encoding behavior. +/// +/// For example, given values ["a", "b", null, "a", null] the output will be +/// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null] +/// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"] +/// +/// If the input is already dictionary encoded this function is a no-op unless +/// it needs to modify the null_encoding (TODO) +/// +/// \param[in] data array-like input +/// \param[in] ctx the function execution context, optional +/// \param[in] options configures null encoding behavior +/// \return result with same shape and type as input +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result DictionaryEncode( + const Datum& data, + const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Run-end-encode values in an array-like object +/// +/// The returned run-end encoded type uses the same value type of the input and +/// run-end type defined in the options. +/// +/// \param[in] value array-like input +/// \param[in] options configures encoding behavior +/// \param[in] ctx the function execution context, optional +/// \return result with same shape but run-end encoded +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndEncode( + const Datum& value, + const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Decode a Run-End Encoded array to a plain array +/// +/// The output data type is the same as the values array type of run-end encoded +/// input. +/// +/// \param[in] value run-end-encoded input +/// \param[in] ctx the function execution context, optional +/// \return plain array resulting from decoding the run-end encoded input +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative sum of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative sum behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeSum( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative product of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative prod behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeProd( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative max of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative max behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMax( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative min of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative min behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMin( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative mean of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative mean behavior, `start` is ignored +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMean( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Return the first order difference of an array. +/// +/// Computes the first order difference of an array, i.e. +/// output[i] = input[i] - input[i - p] if i >= p +/// output[i] = null otherwise +/// where p is the period. For example, with p = 1, +/// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5]. +/// With p = 2, +/// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6] +/// p can also be negative, in which case the diff is computed in +/// the opposite direction. +/// \param[in] array array input +/// \param[in] options options, specifying overflow behavior and period +/// \param[in] check_overflow whether to return error on overflow +/// \param[in] ctx the function execution context, optional +/// \return result as array +ARROW_EXPORT +Result> PairwiseDiff(const Array& array, + const PairwiseOptions& options, + bool check_overflow = false, + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h new file mode 100644 index 0000000000000000000000000000000000000000..18e56092dda2a5f8f997de5b5cd1c81262e77a8f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/compute/function.h" +#include "arrow/compute/function_options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; + +namespace compute { + +class ExecContext; + +/// \addtogroup compute-concrete-options +/// @{ + +class ARROW_EXPORT CastOptions : public FunctionOptions { + public: + explicit CastOptions(bool safe = true); + + static constexpr char const kTypeName[] = "CastOptions"; + static CastOptions Safe(TypeHolder to_type = {}) { + CastOptions safe(true); + safe.to_type = std::move(to_type); + return safe; + } + + static CastOptions Unsafe(TypeHolder to_type = {}) { + CastOptions unsafe(false); + unsafe.to_type = std::move(to_type); + return unsafe; + } + + // Type being casted to. May be passed separate to eager function + // compute::Cast + TypeHolder to_type; + + bool allow_int_overflow; + bool allow_time_truncate; + bool allow_time_overflow; + bool allow_decimal_truncate; + bool allow_float_truncate; + // Indicate if conversions from Binary/FixedSizeBinary to string must + // validate the utf8 payload. + bool allow_invalid_utf8; + + /// true if the safety options all match CastOptions::Safe + /// + /// Note, if this returns false it does not mean is_unsafe will return true + bool is_safe() const; + /// true if the safety options all match CastOptions::Unsafe + /// + /// Note, if this returns false it does not mean is_safe will return true + bool is_unsafe() const; +}; + +/// @} + +/// \brief Return true if a cast function is defined +ARROW_EXPORT +bool CanCast(const DataType& from_type, const DataType& to_type); + +// ---------------------------------------------------------------------- +// Convenience invocation APIs for a number of kernels + +/// \brief Cast from one array type to another +/// \param[in] value array to cast +/// \param[in] to_type type to cast to +/// \param[in] options casting options +/// \param[in] ctx the function execution context, optional +/// \return the resulting array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> Cast(const Array& value, const TypeHolder& to_type, + const CastOptions& options = CastOptions::Safe(), + ExecContext* ctx = NULLPTR); + +/// \brief Cast from one array type to another +/// \param[in] value array to cast +/// \param[in] options casting options. The "to_type" field must be populated +/// \param[in] ctx the function execution context, optional +/// \return the resulting array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Cast(const Datum& value, const CastOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Cast from one value to another +/// \param[in] value datum to cast +/// \param[in] to_type type to cast to +/// \param[in] options casting options +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Cast(const Datum& value, const TypeHolder& to_type, + const CastOptions& options = CastOptions::Safe(), + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h new file mode 100644 index 0000000000000000000000000000000000000000..3fbefe4a1ab7b7e432e07607f674b5de1c947cd5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h @@ -0,0 +1,489 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +// It seems like 64K might be a good default chunksize to use for execution +// based on the experience of other query processing systems. The current +// default is not to chunk contiguous arrays, though, but this may change in +// the future once parallel execution is implemented +static constexpr int64_t kDefaultExecChunksize = UINT16_MAX; + +/// \brief Context for expression-global variables and options used by +/// function evaluation +class ARROW_EXPORT ExecContext { + public: + // If no function registry passed, the default is used. + explicit ExecContext(MemoryPool* pool = default_memory_pool(), + ::arrow::internal::Executor* executor = NULLPTR, + FunctionRegistry* func_registry = NULLPTR); + + /// \brief The MemoryPool used for allocations, default is + /// default_memory_pool(). + MemoryPool* memory_pool() const { return pool_; } + + const ::arrow::internal::CpuInfo* cpu_info() const; + + /// \brief An Executor which may be used to parallelize execution. + ::arrow::internal::Executor* executor() const { return executor_; } + + /// \brief The FunctionRegistry for looking up functions by name and + /// selecting kernels for execution. Defaults to the library-global function + /// registry provided by GetFunctionRegistry. + FunctionRegistry* func_registry() const { return func_registry_; } + + // \brief Set maximum length unit of work for kernel execution. Larger + // contiguous array inputs will be split into smaller chunks, and, if + // possible and enabled, processed in parallel. The default chunksize is + // INT64_MAX, so contiguous arrays are not split. + void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; } + + // \brief Maximum length for ExecBatch data chunks processed by + // kernels. Contiguous array inputs with longer length will be split into + // smaller chunks. + int64_t exec_chunksize() const { return exec_chunksize_; } + + /// \brief Set whether to use multiple threads for function execution. This + /// is not yet used. + void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; } + + /// \brief If true, then utilize multiple threads where relevant for function + /// execution. This is not yet used. + bool use_threads() const { return use_threads_; } + + // Set the preallocation strategy for kernel execution as it relates to + // chunked execution. For chunked execution, whether via ChunkedArray inputs + // or splitting larger Array arguments into smaller pieces, contiguous + // allocation (if permitted by the kernel) will allocate one large array to + // write output into yielding it to the caller at the end. If this option is + // set to off, then preallocations will be performed independently for each + // chunk of execution + // + // TODO: At some point we might want the limit the size of contiguous + // preallocations. For example, even if the exec_chunksize is 64K or less, we + // might limit contiguous allocations to 1M records, say. + void set_preallocate_contiguous(bool preallocate) { + preallocate_contiguous_ = preallocate; + } + + /// \brief If contiguous preallocations should be used when doing chunked + /// execution as specified by exec_chunksize(). See + /// set_preallocate_contiguous() for more information. + bool preallocate_contiguous() const { return preallocate_contiguous_; } + + private: + MemoryPool* pool_; + ::arrow::internal::Executor* executor_; + FunctionRegistry* func_registry_; + int64_t exec_chunksize_ = std::numeric_limits::max(); + bool preallocate_contiguous_ = true; + bool use_threads_ = true; +}; + +// TODO: Consider standardizing on uint16 selection vectors and only use them +// when we can ensure that each value is 64K length or smaller + +/// \brief Container for an array of value selection indices that were +/// materialized from a filter. +/// +/// Columnar query engines (see e.g. [1]) have found that rather than +/// materializing filtered data, the filter can instead be converted to an +/// array of the "on" indices and then "fusing" these indices in operator +/// implementations. This is especially relevant for aggregations but also +/// applies to scalar operations. +/// +/// We are not yet using this so this is mostly a placeholder for now. +/// +/// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf +class ARROW_EXPORT SelectionVector { + public: + explicit SelectionVector(std::shared_ptr data); + + explicit SelectionVector(const Array& arr); + + /// \brief Create SelectionVector from boolean mask + static Result> FromMask(const BooleanArray& arr); + + const int32_t* indices() const { return indices_; } + int32_t length() const; + + private: + std::shared_ptr data_; + const int32_t* indices_; +}; + +/// An index to represent that a batch does not belong to an ordered stream +constexpr int64_t kUnsequencedIndex = -1; + +/// \brief A unit of work for kernel execution. It contains a collection of +/// Array and Scalar values and an optional SelectionVector indicating that +/// there is an unmaterialized filter that either must be materialized, or (if +/// the kernel supports it) pushed down into the kernel implementation. +/// +/// ExecBatch is semantically similar to RecordBatch in that in a SQL context +/// it represents a collection of records, but constant "columns" are +/// represented by Scalar values rather than having to be converted into arrays +/// with repeated values. +/// +/// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight +/// than is desirable for this class. Microbenchmarks would help determine for +/// sure. See ARROW-8928. + +/// \addtogroup acero-internals +/// @{ + +struct ARROW_EXPORT ExecBatch { + ExecBatch() = default; + ExecBatch(std::vector values, int64_t length) + : values(std::move(values)), length(length) {} + + explicit ExecBatch(const RecordBatch& batch); + + /// \brief Infer the ExecBatch length from values. + static Result InferLength(const std::vector& values); + + /// Creates an ExecBatch with length-validation. + /// + /// If any value is given, then all values must have a common length. If the given + /// length is negative, then the length of the ExecBatch is set to this common length, + /// or to 1 if no values are given. Otherwise, the given length must equal the common + /// length, if any value is given. + static Result Make(std::vector values, int64_t length = -1); + + Result> ToRecordBatch( + std::shared_ptr schema, MemoryPool* pool = default_memory_pool()) const; + + /// The values representing positional arguments to be passed to a kernel's + /// exec function for processing. + std::vector values; + + /// A deferred filter represented as an array of indices into the values. + /// + /// For example, the filter [true, true, false, true] would be represented as + /// the selection vector [0, 1, 3]. When the selection vector is set, + /// ExecBatch::length is equal to the length of this array. + std::shared_ptr selection_vector; + + /// A predicate Expression guaranteed to evaluate to true for all rows in this batch. + Expression guarantee = literal(true); + + /// The semantic length of the ExecBatch. When the values are all scalars, + /// the length should be set to 1 for non-aggregate kernels, otherwise the + /// length is taken from the array values, except when there is a selection + /// vector. When there is a selection vector set, the length of the batch is + /// the length of the selection. Aggregate kernels can have an ExecBatch + /// formed by projecting just the partition columns from a batch in which + /// case, it would have scalar rows with length greater than 1. + /// + /// If the array values are of length 0 then the length is 0 regardless of + /// whether any values are Scalar. + int64_t length = 0; + + /// \brief index of this batch in a sorted stream of batches + /// + /// This index must be strictly monotonic starting at 0 without gaps or + /// it can be set to kUnsequencedIndex if there is no meaningful order + int64_t index = kUnsequencedIndex; + + /// \brief The sum of bytes in each buffer referenced by the batch + /// + /// Note: Scalars are not counted + /// Note: Some values may referenced only part of a buffer, for + /// example, an array with an offset. The actual data + /// visible to this batch will be smaller than the total + /// buffer size in this case. + int64_t TotalBufferSize() const; + + /// \brief Return the value at the i-th index + template + inline const Datum& operator[](index_type i) const { + return values[i]; + } + + bool Equals(const ExecBatch& other) const; + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + ExecBatch Slice(int64_t offset, int64_t length) const; + + Result SelectValues(const std::vector& ids) const; + + /// \brief A convenience for returning the types from the batch. + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + std::string ToString() const; +}; + +inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); } +inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); } + +ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*); + +/// @} + +/// \defgroup compute-internals Utilities for calling functions, useful for those +/// extending the function registry +/// +/// @{ + +struct ExecValue { + ArraySpan array = {}; + const Scalar* scalar = NULLPTR; + + ExecValue(Scalar* scalar) // NOLINT implicit conversion + : scalar(scalar) {} + + ExecValue(ArraySpan array) // NOLINT implicit conversion + : array(std::move(array)) {} + + ExecValue(const ArrayData& array) { // NOLINT implicit conversion + this->array.SetMembers(array); + } + + ExecValue() = default; + ExecValue(const ExecValue& other) = default; + ExecValue& operator=(const ExecValue& other) = default; + ExecValue(ExecValue&& other) = default; + ExecValue& operator=(ExecValue&& other) = default; + + int64_t length() const { return this->is_array() ? this->array.length : 1; } + + bool is_array() const { return this->scalar == NULLPTR; } + bool is_scalar() const { return !this->is_array(); } + + void SetArray(const ArrayData& array) { + this->array.SetMembers(array); + this->scalar = NULLPTR; + } + + void SetScalar(const Scalar* scalar) { this->scalar = scalar; } + + template + const ExactType& scalar_as() const { + return ::arrow::internal::checked_cast(*this->scalar); + } + + /// XXX: here temporarily for compatibility with datum, see + /// e.g. MakeStructExec in scalar_nested.cc + int64_t null_count() const { + if (this->is_array()) { + return this->array.GetNullCount(); + } else { + return this->scalar->is_valid ? 0 : 1; + } + } + + const DataType* type() const { + if (this->is_array()) { + return array.type; + } else { + return scalar->type.get(); + } + } +}; + +struct ARROW_EXPORT ExecResult { + // The default value of the variant is ArraySpan + std::variant> value; + + int64_t length() const { + if (this->is_array_span()) { + return this->array_span()->length; + } else { + return this->array_data()->length; + } + } + + const DataType* type() const { + if (this->is_array_span()) { + return this->array_span()->type; + } else { + return this->array_data()->type.get(); + } + } + + const ArraySpan* array_span() const { return &std::get(this->value); } + ArraySpan* array_span_mutable() { return &std::get(this->value); } + + bool is_array_span() const { return this->value.index() == 0; } + + const std::shared_ptr& array_data() const { + return std::get>(this->value); + } + ArrayData* array_data_mutable() { + return std::get>(this->value).get(); + } + + bool is_array_data() const { return this->value.index() == 1; } +}; + +/// \brief A "lightweight" column batch object which contains no +/// std::shared_ptr objects and does not have any memory ownership +/// semantics. Can represent a view onto an "owning" ExecBatch. +struct ARROW_EXPORT ExecSpan { + ExecSpan() = default; + ExecSpan(const ExecSpan& other) = default; + ExecSpan& operator=(const ExecSpan& other) = default; + ExecSpan(ExecSpan&& other) = default; + ExecSpan& operator=(ExecSpan&& other) = default; + + explicit ExecSpan(std::vector values, int64_t length) + : length(length), values(std::move(values)) {} + + explicit ExecSpan(const ExecBatch& batch) { + this->length = batch.length; + this->values.resize(batch.values.size()); + for (size_t i = 0; i < batch.values.size(); ++i) { + const Datum& in_value = batch[i]; + ExecValue* out_value = &this->values[i]; + if (in_value.is_array()) { + out_value->SetArray(*in_value.array()); + } else { + out_value->SetScalar(in_value.scalar().get()); + } + } + } + + /// \brief Return the value at the i-th index + template + inline const ExecValue& operator[](index_type i) const { + return values[i]; + } + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + ExecBatch ToExecBatch() const { + ExecBatch result; + result.length = this->length; + for (const ExecValue& value : this->values) { + if (value.is_array()) { + result.values.push_back(value.array.ToArrayData()); + } else { + result.values.push_back(value.scalar->GetSharedPtr()); + } + } + return result; + } + + int64_t length = 0; + std::vector values; +}; + +/// \defgroup compute-call-function One-shot calls to compute functions +/// +/// @{ + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + ExecContext* ctx = NULLPTR); + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + ExecContext* ctx = NULLPTR); + +/// @} + +/// \defgroup compute-function-executor One-shot calls to obtain function executors +/// +/// @{ + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, std::vector in_types, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types (taken from the Datum arguments) +/// and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, const std::vector& args, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h new file mode 100644 index 0000000000000000000000000000000000000000..9a36a6d3368fb9ee0486c9dba9ab86ba10764dc7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h @@ -0,0 +1,295 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/type_fwd.h" +#include "arrow/util/small_vector.h" + +namespace arrow { +namespace compute { + +/// \defgroup expression-core Expressions to describe data transformations +/// +/// @{ + +/// An unbound expression which maps a single Datum to another Datum. +/// An expression is one of +/// - A literal Datum. +/// - A reference to a single (potentially nested) field of the input Datum. +/// - A call to a compute function, with arguments specified by other Expressions. +class ARROW_EXPORT Expression { + public: + struct Call { + std::string function_name; + std::vector arguments; + std::shared_ptr options; + // Cached hash value + size_t hash; + + // post-Bind properties: + std::shared_ptr function; + const Kernel* kernel = NULLPTR; + std::shared_ptr kernel_state; + TypeHolder type; + + void ComputeHash(); + }; + + std::string ToString() const; + bool Equals(const Expression& other) const; + size_t hash() const; + struct Hash { + size_t operator()(const Expression& expr) const { return expr.hash(); } + }; + + /// Bind this expression to the given input type, looking up Kernels and field types. + /// Some expression simplification may be performed and implicit casts will be inserted. + /// Any state necessary for execution will be initialized and returned. + Result Bind(const TypeHolder& in, ExecContext* = NULLPTR) const; + Result Bind(const Schema& in_schema, ExecContext* = NULLPTR) const; + + // XXX someday + // Clone all KernelState in this bound expression. If any function referenced by this + // expression has mutable KernelState, it is not safe to execute or apply simplification + // passes to it (or copies of it!) from multiple threads. Cloning state produces new + // KernelStates where necessary to ensure that Expressions may be manipulated safely + // on multiple threads. + // Result CloneState() const; + // Status SetState(ExpressionState); + + /// Return true if all an expression's field references have explicit types + /// and all of its functions' kernels are looked up. + bool IsBound() const; + + /// Return true if this expression is composed only of Scalar literals, field + /// references, and calls to ScalarFunctions. + bool IsScalarExpression() const; + + /// Return true if this expression is literal and entirely null. + bool IsNullLiteral() const; + + /// Return true if this expression could evaluate to true. Will return true for any + /// unbound or non-boolean Expressions. IsSatisfiable does not (currently) do any + /// canonicalization or simplification of the expression, so even Expressions + /// which are unsatisfiable may spuriously return `true` here. This function is + /// intended for use in predicate pushdown where a filter expression is simplified + /// by a guarantee, so it assumes that trying to simplify again would be redundant. + bool IsSatisfiable() const; + + // XXX someday + // Result GetPipelines(); + + bool is_valid() const { return impl_ != NULLPTR; } + + /// Access a Call or return nullptr if this expression is not a call + const Call* call() const; + /// Access a Datum or return nullptr if this expression is not a literal + const Datum* literal() const; + /// Access a FieldRef or return nullptr if this expression is not a field_ref + const FieldRef* field_ref() const; + + /// The type to which this expression will evaluate + const DataType* type() const; + // XXX someday + // NullGeneralization::type nullable() const; + + struct Parameter { + FieldRef ref; + + // post-bind properties + TypeHolder type; + ::arrow::internal::SmallVector indices; + }; + const Parameter* parameter() const; + + Expression() = default; + explicit Expression(Call call); + explicit Expression(Datum literal); + explicit Expression(Parameter parameter); + + private: + using Impl = std::variant; + std::shared_ptr impl_; + + ARROW_FRIEND_EXPORT friend bool Identical(const Expression& l, const Expression& r); +}; + +inline bool operator==(const Expression& l, const Expression& r) { return l.Equals(r); } +inline bool operator!=(const Expression& l, const Expression& r) { return !l.Equals(r); } + +ARROW_EXPORT void PrintTo(const Expression&, std::ostream*); + +// Factories + +ARROW_EXPORT +Expression literal(Datum lit); + +template +Expression literal(Arg&& arg) { + return literal(Datum(std::forward(arg))); +} + +ARROW_EXPORT +Expression field_ref(FieldRef ref); + +ARROW_EXPORT +Expression call(std::string function, std::vector arguments, + std::shared_ptr options = NULLPTR); + +template ::value>::type> +Expression call(std::string function, std::vector arguments, + Options options) { + return call(std::move(function), std::move(arguments), + std::make_shared(std::move(options))); +} + +/// Assemble a list of all fields referenced by an Expression at any depth. +ARROW_EXPORT +std::vector FieldsInExpression(const Expression&); + +/// Check if the expression references any fields. +ARROW_EXPORT +bool ExpressionHasFieldRefs(const Expression&); + +struct ARROW_EXPORT KnownFieldValues; + +/// Assemble a mapping from field references to known values. This derives known values +/// from "equal" and "is_null" Expressions referencing a field and a literal. +ARROW_EXPORT +Result ExtractKnownFieldValues( + const Expression& guaranteed_true_predicate); + +/// @} + +/// \defgroup expression-passes Functions for modification of Expressions +/// +/// @{ +/// +/// These transform bound expressions. Some transforms utilize a guarantee, which is +/// provided as an Expression which is guaranteed to evaluate to true. The +/// guaranteed_true_predicate need not be bound, but canonicalization is currently +/// deferred to producers of guarantees. For example in order to be recognized as a +/// guarantee on a field value, an Expression must be a call to "equal" with field_ref LHS +/// and literal RHS. Flipping the arguments, "is_in" with a one-long value_set, ... or +/// other semantically identical Expressions will not be recognized. + +/// Weak canonicalization which establishes guarantees for subsequent passes. Even +/// equivalent Expressions may result in different canonicalized expressions. +/// TODO this could be a strong canonicalization +ARROW_EXPORT +Result Canonicalize(Expression, ExecContext* = NULLPTR); + +/// Simplify Expressions based on literal arguments (for example, add(null, x) will always +/// be null so replace the call with a null literal). Includes early evaluation of all +/// calls whose arguments are entirely literal. +ARROW_EXPORT +Result FoldConstants(Expression); + +/// Simplify Expressions by replacing with known values of the fields which it references. +ARROW_EXPORT +Result ReplaceFieldsWithKnownValues(const KnownFieldValues& known_values, + Expression); + +/// Simplify an expression by replacing subexpressions based on a guarantee: +/// a boolean expression which is guaranteed to evaluate to `true`. For example, this is +/// used to remove redundant function calls from a filter expression or to replace a +/// reference to a constant-value field with a literal. +ARROW_EXPORT +Result SimplifyWithGuarantee(Expression, + const Expression& guaranteed_true_predicate); + +/// Replace all named field refs (e.g. "x" or "x.y") with field paths (e.g. [0] or [1,3]) +/// +/// This isn't usually needed and does not offer any simplification by itself. However, +/// it can be useful to normalize an expression to paths to make it simpler to work with. +ARROW_EXPORT Result RemoveNamedRefs(Expression expression); + +/// @} + +// Execution + +/// Create an ExecBatch suitable for passing to ExecuteScalarExpression() from a +/// RecordBatch which may have missing or incorrectly ordered columns. +/// Missing fields will be replaced with null scalars. +ARROW_EXPORT Result MakeExecBatch(const Schema& full_schema, + const Datum& partial, + Expression guarantee = literal(true)); + +/// Execute a scalar expression against the provided state and input ExecBatch. This +/// expression must be bound. +ARROW_EXPORT +Result ExecuteScalarExpression(const Expression&, const ExecBatch& input, + ExecContext* = NULLPTR); + +/// Convenience function for invoking against a RecordBatch +ARROW_EXPORT +Result ExecuteScalarExpression(const Expression&, const Schema& full_schema, + const Datum& partial_input, ExecContext* = NULLPTR); + +// Serialization + +ARROW_EXPORT +Result> Serialize(const Expression&); + +ARROW_EXPORT +Result Deserialize(std::shared_ptr); + +/// \defgroup expression-convenience Helpers for convenient expression creation +/// +/// @{ + +ARROW_EXPORT Expression project(std::vector values, + std::vector names); + +ARROW_EXPORT Expression equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression not_equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression less(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression less_equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression greater(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression greater_equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression is_null(Expression lhs, bool nan_is_null = false); + +ARROW_EXPORT Expression is_valid(Expression lhs); + +ARROW_EXPORT Expression and_(Expression lhs, Expression rhs); +ARROW_EXPORT Expression and_(const std::vector&); +ARROW_EXPORT Expression or_(Expression lhs, Expression rhs); +ARROW_EXPORT Expression or_(const std::vector&); +ARROW_EXPORT Expression not_(Expression operand); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h new file mode 100644 index 0000000000000000000000000000000000000000..be934a3c5abfce9e3d0b55ccbdee410d6dd64156 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h @@ -0,0 +1,394 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle. + +#pragma once + +#include +#include +#include + +#include "arrow/compute/kernel.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-functions +/// @{ + +/// \brief Contains the number of required arguments for the function. +/// +/// Naming conventions taken from https://en.wikipedia.org/wiki/Arity. +struct ARROW_EXPORT Arity { + /// \brief A function taking no arguments + static Arity Nullary() { return Arity(0, false); } + + /// \brief A function taking 1 argument + static Arity Unary() { return Arity(1, false); } + + /// \brief A function taking 2 arguments + static Arity Binary() { return Arity(2, false); } + + /// \brief A function taking 3 arguments + static Arity Ternary() { return Arity(3, false); } + + /// \brief A function taking a variable number of arguments + /// + /// \param[in] min_args the minimum number of arguments required when + /// invoking the function + static Arity VarArgs(int min_args = 0) { return Arity(min_args, true); } + + // NOTE: the 0-argument form (default constructor) is required for Cython + explicit Arity(int num_args = 0, bool is_varargs = false) + : num_args(num_args), is_varargs(is_varargs) {} + + /// The number of required arguments (or the minimum number for varargs + /// functions). + int num_args; + + /// If true, then the num_args is the minimum number of required arguments. + bool is_varargs = false; +}; + +struct ARROW_EXPORT FunctionDoc { + /// \brief A one-line summary of the function, using a verb. + /// + /// For example, "Add two numeric arrays or scalars". + std::string summary; + + /// \brief A detailed description of the function, meant to follow the summary. + std::string description; + + /// \brief Symbolic names (identifiers) for the function arguments. + /// + /// Some bindings may use this to generate nicer function signatures. + std::vector arg_names; + + // TODO add argument descriptions? + + /// \brief Name of the options class, if any. + std::string options_class; + + /// \brief Whether options are required for function execution + /// + /// If false, then either the function does not have an options class + /// or there is a usable default options value. + bool options_required; + + FunctionDoc() = default; + + FunctionDoc(std::string summary, std::string description, + std::vector arg_names, std::string options_class = "", + bool options_required = false) + : summary(std::move(summary)), + description(std::move(description)), + arg_names(std::move(arg_names)), + options_class(std::move(options_class)), + options_required(options_required) {} + + static const FunctionDoc& Empty(); +}; + +/// \brief An executor of a function with a preconfigured kernel +class ARROW_EXPORT FunctionExecutor { + public: + virtual ~FunctionExecutor() = default; + /// \brief Initialize or re-initialize the preconfigured kernel + /// + /// This method may be called zero or more times. Depending on how + /// the FunctionExecutor was obtained, it may already have been initialized. + virtual Status Init(const FunctionOptions* options = NULLPTR, + ExecContext* exec_ctx = NULLPTR) = 0; + /// \brief Execute the preconfigured kernel with arguments that must fit it + /// + /// The method requires the arguments be castable to the preconfigured types. + /// + /// \param[in] args Arguments to execute the function on + /// \param[in] length Length of arguments batch or -1 to default it. If the + /// function has no parameters, this determines the batch length, defaulting + /// to 0. Otherwise, if the function is scalar, this must equal the argument + /// batch's inferred length or be -1 to default to it. This is ignored for + /// vector functions. + virtual Result Execute(const std::vector& args, int64_t length = -1) = 0; +}; + +/// \brief Base class for compute functions. Function implementations contain a +/// collection of "kernels" which are implementations of the function for +/// specific argument types. Selecting a viable kernel for executing a function +/// is referred to as "dispatching". +class ARROW_EXPORT Function { + public: + /// \brief The kind of function, which indicates in what contexts it is + /// valid for use. + enum Kind { + /// A function that performs scalar data operations on whole arrays of + /// data. Can generally process Array or Scalar values. The size of the + /// output will be the same as the size (or broadcasted size, in the case + /// of mixing Array and Scalar inputs) of the input. + SCALAR, + + /// A function with array input and output whose behavior depends on the + /// values of the entire arrays passed, rather than the value of each scalar + /// value. + VECTOR, + + /// A function that computes scalar summary statistics from array input. + SCALAR_AGGREGATE, + + /// A function that computes grouped summary statistics from array input + /// and an array of group identifiers. + HASH_AGGREGATE, + + /// A function that dispatches to other functions and does not contain its + /// own kernels. + META + }; + + virtual ~Function() = default; + + /// \brief The name of the kernel. The registry enforces uniqueness of names. + const std::string& name() const { return name_; } + + /// \brief The kind of kernel, which indicates in what contexts it is valid + /// for use. + Function::Kind kind() const { return kind_; } + + /// \brief Contains the number of arguments the function requires, or if the + /// function accepts variable numbers of arguments. + const Arity& arity() const { return arity_; } + + /// \brief Return the function documentation + const FunctionDoc& doc() const { return doc_; } + + /// \brief Returns the number of registered kernels for this function. + virtual int num_kernels() const = 0; + + /// \brief Return a kernel that can execute the function given the exact + /// argument types (without implicit type casts). + /// + /// NB: This function is overridden in CastFunction. + virtual Result DispatchExact(const std::vector& types) const; + + /// \brief Return a best-match kernel that can execute the function given the argument + /// types, after implicit casts are applied. + /// + /// \param[in,out] values Argument types. An element may be modified to + /// indicate that the returned kernel only approximately matches the input + /// value descriptors; callers are responsible for casting inputs to the type + /// required by the kernel. + virtual Result DispatchBest(std::vector* values) const; + + /// \brief Get a function executor with a best-matching kernel + /// + /// The returned executor will by default work with the default FunctionOptions + /// and KernelContext. If you want to change that, call `FunctionExecutor::Init`. + virtual Result> GetBestExecutor( + std::vector inputs) const; + + /// \brief Execute the function eagerly with the passed input arguments with + /// kernel dispatch, batch iteration, and memory allocation details taken + /// care of. + /// + /// If the `options` pointer is null, then `default_options()` will be used. + /// + /// This function can be overridden in subclasses. + virtual Result Execute(const std::vector& args, + const FunctionOptions* options, ExecContext* ctx) const; + + virtual Result Execute(const ExecBatch& batch, const FunctionOptions* options, + ExecContext* ctx) const; + + /// \brief Returns the default options for this function. + /// + /// Whatever option semantics a Function has, implementations must guarantee + /// that default_options() is valid to pass to Execute as options. + const FunctionOptions* default_options() const { return default_options_; } + + virtual Status Validate() const; + + protected: + Function(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options) + : name_(std::move(name)), + kind_(kind), + arity_(arity), + doc_(std::move(doc)), + default_options_(default_options) {} + + Status CheckArity(size_t num_args) const; + + std::string name_; + Function::Kind kind_; + Arity arity_; + const FunctionDoc doc_; + const FunctionOptions* default_options_ = NULLPTR; +}; + +namespace detail { + +template +class FunctionImpl : public Function { + public: + /// \brief Return pointers to current-available kernels for inspection + std::vector kernels() const { + std::vector result; + for (const auto& kernel : kernels_) { + result.push_back(&kernel); + } + return result; + } + + int num_kernels() const override { return static_cast(kernels_.size()); } + + protected: + FunctionImpl(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options) + : Function(std::move(name), kind, arity, std::move(doc), default_options) {} + + std::vector kernels_; +}; + +/// \brief Look up a kernel in a function. If no Kernel is found, nullptr is returned. +ARROW_EXPORT +const Kernel* DispatchExactImpl(const Function* func, const std::vector&); + +/// \brief Return an error message if no Kernel is found. +ARROW_EXPORT +Status NoMatchingKernel(const Function* func, const std::vector&); + +} // namespace detail + +/// \brief A function that executes elementwise operations on arrays or +/// scalars, and therefore whose results generally do not depend on the order +/// of the values in the arguments. Accepts and returns arrays that are all of +/// the same size. These functions roughly correspond to the functions used in +/// SQL expressions. +class ARROW_EXPORT ScalarFunction : public detail::FunctionImpl { + public: + using KernelType = ScalarKernel; + + ScalarFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), Function::SCALAR, arity, + std::move(doc), default_options) {} + + /// \brief Add a kernel with given input/output types, no required state + /// initialization, preallocation for fixed-width types, and default null + /// handling (intersect validity bitmaps of inputs). + Status AddKernel(std::vector in_types, OutputType out_type, + ArrayKernelExec exec, KernelInit init = NULLPTR); + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(ScalarKernel kernel); +}; + +/// \brief A function that executes general array operations that may yield +/// outputs of different sizes or have results that depend on the whole array +/// contents. These functions roughly correspond to the functions found in +/// non-SQL array languages like APL and its derivatives. +class ARROW_EXPORT VectorFunction : public detail::FunctionImpl { + public: + using KernelType = VectorKernel; + + VectorFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), Function::VECTOR, arity, + std::move(doc), default_options) {} + + /// \brief Add a simple kernel with given input/output types, no required + /// state initialization, no data preallocation, and no preallocation of the + /// validity bitmap. + Status AddKernel(std::vector in_types, OutputType out_type, + ArrayKernelExec exec, KernelInit init = NULLPTR); + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(VectorKernel kernel); +}; + +class ARROW_EXPORT ScalarAggregateFunction + : public detail::FunctionImpl { + public: + using KernelType = ScalarAggregateKernel; + + ScalarAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), + Function::SCALAR_AGGREGATE, arity, + std::move(doc), default_options) {} + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(ScalarAggregateKernel kernel); +}; + +class ARROW_EXPORT HashAggregateFunction + : public detail::FunctionImpl { + public: + using KernelType = HashAggregateKernel; + + HashAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : detail::FunctionImpl(std::move(name), + Function::HASH_AGGREGATE, arity, + std::move(doc), default_options) {} + + /// \brief Add a kernel (function implementation). Returns error if the + /// kernel's signature does not match the function's arity. + Status AddKernel(HashAggregateKernel kernel); +}; + +/// \brief A function that dispatches to other functions. Must implement +/// MetaFunction::ExecuteImpl. +/// +/// For Array, ChunkedArray, and Scalar Datum kinds, may rely on the execution +/// of concrete Function types, but must handle other Datum kinds on its own. +class ARROW_EXPORT MetaFunction : public Function { + public: + int num_kernels() const override { return 0; } + + Result Execute(const std::vector& args, const FunctionOptions* options, + ExecContext* ctx) const override; + + Result Execute(const ExecBatch& batch, const FunctionOptions* options, + ExecContext* ctx) const override; + + protected: + virtual Result ExecuteImpl(const std::vector& args, + const FunctionOptions* options, + ExecContext* ctx) const = 0; + + MetaFunction(std::string name, const Arity& arity, FunctionDoc doc, + const FunctionOptions* default_options = NULLPTR) + : Function(std::move(name), Function::META, arity, std::move(doc), + default_options) {} +}; + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h new file mode 100644 index 0000000000000000000000000000000000000000..88ec2fd2d0679b5c849549179aa652bec9b37b56 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle. + +#pragma once + +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-functions +/// @{ + +/// \brief Extension point for defining options outside libarrow (but +/// still within this project). +class ARROW_EXPORT FunctionOptionsType { + public: + virtual ~FunctionOptionsType() = default; + + virtual const char* type_name() const = 0; + virtual std::string Stringify(const FunctionOptions&) const = 0; + virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0; + virtual Result> Serialize(const FunctionOptions&) const; + virtual Result> Deserialize( + const Buffer& buffer) const; + virtual std::unique_ptr Copy(const FunctionOptions&) const = 0; +}; + +/// \brief Base class for specifying options configuring a function's behavior, +/// such as error handling. +class ARROW_EXPORT FunctionOptions : public util::EqualityComparable { + public: + virtual ~FunctionOptions() = default; + + const FunctionOptionsType* options_type() const { return options_type_; } + const char* type_name() const { return options_type()->type_name(); } + + bool Equals(const FunctionOptions& other) const; + std::string ToString() const; + std::unique_ptr Copy() const; + /// \brief Serialize an options struct to a buffer. + Result> Serialize() const; + /// \brief Deserialize an options struct from a buffer. + /// Note: this will only look for `type_name` in the default FunctionRegistry; + /// to use a custom FunctionRegistry, look up the FunctionOptionsType, then + /// call FunctionOptionsType::Deserialize(). + static Result> Deserialize( + const std::string& type_name, const Buffer& buffer); + + protected: + explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {} + const FunctionOptionsType* options_type_; +}; + +ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..1adb3e96c97c8cbaa20f99c29eea41c535aae64d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h @@ -0,0 +1,752 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/compute/exec.h" +#include "arrow/datum.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +// macOS defines PREALLOCATE as a preprocessor macro in the header sys/vnode.h. +// No other BSD seems to do so. The name is used as an identifier in MemAllocation enum. +#if defined(__APPLE__) && defined(PREALLOCATE) +#undef PREALLOCATE +#endif + +namespace arrow { +namespace compute { + +class FunctionOptions; + +/// \brief Base class for opaque kernel-specific state. For example, if there +/// is some kind of initialization required. +struct ARROW_EXPORT KernelState { + virtual ~KernelState() = default; +}; + +/// \brief Context/state for the execution of a particular kernel. +class ARROW_EXPORT KernelContext { + public: + // Can pass optional backreference; not used consistently for the + // moment but will be made so in the future + explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR) + : exec_ctx_(exec_ctx), kernel_(kernel) {} + + /// \brief Allocate buffer from the context's memory pool. The contents are + /// not initialized. + Result> Allocate(int64_t nbytes); + + /// \brief Allocate buffer for bitmap from the context's memory pool. Like + /// Allocate, the contents of the buffer are not initialized but the last + /// byte is preemptively zeroed to help avoid ASAN or valgrind issues. + Result> AllocateBitmap(int64_t num_bits); + + /// \brief Assign the active KernelState to be utilized for each stage of + /// kernel execution. Ownership and memory lifetime of the KernelState must + /// be minded separately. + void SetState(KernelState* state) { state_ = state; } + + // Set kernel that is being invoked since some kernel + // implementations will examine the kernel state. + void SetKernel(const Kernel* kernel) { kernel_ = kernel; } + + KernelState* state() { return state_; } + + /// \brief Configuration related to function execution that is to be shared + /// across multiple kernels. + ExecContext* exec_context() { return exec_ctx_; } + + /// \brief The memory pool to use for allocations. For now, it uses the + /// MemoryPool contained in the ExecContext used to create the KernelContext. + MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); } + + const Kernel* kernel() const { return kernel_; } + + private: + ExecContext* exec_ctx_; + KernelState* state_ = NULLPTR; + const Kernel* kernel_ = NULLPTR; +}; + +/// \brief An type-checking interface to permit customizable validation rules +/// for use with InputType and KernelSignature. This is for scenarios where the +/// acceptance is not an exact type instance, such as a TIMESTAMP type for a +/// specific TimeUnit, but permitting any time zone. +struct ARROW_EXPORT TypeMatcher { + virtual ~TypeMatcher() = default; + + /// \brief Return true if this matcher accepts the data type. + virtual bool Matches(const DataType& type) const = 0; + + /// \brief A human-interpretable string representation of what the type + /// matcher checks for, usable when printing KernelSignature or formatting + /// error messages. + virtual std::string ToString() const = 0; + + /// \brief Return true if this TypeMatcher contains the same matching rule as + /// the other. Currently depends on RTTI. + virtual bool Equals(const TypeMatcher& other) const = 0; +}; + +namespace match { + +/// \brief Match any DataType instance having the same DataType::id. +ARROW_EXPORT std::shared_ptr SameTypeId(Type::type type_id); + +/// \brief Match any TimestampType instance having the same unit, but the time +/// zones can be different. +ARROW_EXPORT std::shared_ptr TimestampTypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr Time32TypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr Time64TypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr DurationTypeUnit(TimeUnit::type unit); + +// \brief Match any integer type +ARROW_EXPORT std::shared_ptr Integer(); + +// Match types using 32-bit varbinary representation +ARROW_EXPORT std::shared_ptr BinaryLike(); + +// Match types using 64-bit varbinary representation +ARROW_EXPORT std::shared_ptr LargeBinaryLike(); + +// Match any fixed binary type +ARROW_EXPORT std::shared_ptr FixedSizeBinaryLike(); + +// \brief Match any primitive type (boolean or any type representable as a C +// Type) +ARROW_EXPORT std::shared_ptr Primitive(); + +// \brief Match any integer type that can be used as run-end in run-end encoded +// arrays +ARROW_EXPORT std::shared_ptr RunEndInteger(); + +/// \brief Match run-end encoded types that use any valid run-end type and +/// encode specific value types +/// +/// @param[in] value_type_matcher a matcher that is applied to the values field +ARROW_EXPORT std::shared_ptr RunEndEncoded( + std::shared_ptr value_type_matcher); + +/// \brief Match run-end encoded types that use any valid run-end type and +/// encode specific value types +/// +/// @param[in] value_type_id a type id that the type of the values field should match +ARROW_EXPORT std::shared_ptr RunEndEncoded(Type::type value_type_id); + +/// \brief Match run-end encoded types that encode specific run-end and value types +/// +/// @param[in] run_end_type_matcher a matcher that is applied to the run_ends field +/// @param[in] value_type_matcher a matcher that is applied to the values field +ARROW_EXPORT std::shared_ptr RunEndEncoded( + std::shared_ptr run_end_type_matcher, + std::shared_ptr value_type_matcher); + +} // namespace match + +/// \brief An object used for type-checking arguments to be passed to a kernel +/// and stored in a KernelSignature. The type-checking rule can be supplied +/// either with an exact DataType instance or a custom TypeMatcher. +class ARROW_EXPORT InputType { + public: + /// \brief The kind of type-checking rule that the InputType contains. + enum Kind { + /// \brief Accept any value type. + ANY_TYPE, + + /// \brief A fixed arrow::DataType and will only exact match having this + /// exact type (e.g. same TimestampType unit, same decimal scale and + /// precision, or same nested child types). + EXACT_TYPE, + + /// \brief Uses a TypeMatcher implementation to check the type. + USE_TYPE_MATCHER + }; + + /// \brief Accept any value type + InputType() : kind_(ANY_TYPE) {} + + /// \brief Accept an exact value type. + InputType(std::shared_ptr type) // NOLINT implicit construction + : kind_(EXACT_TYPE), type_(std::move(type)) {} + + /// \brief Use the passed TypeMatcher to type check. + InputType(std::shared_ptr type_matcher) // NOLINT implicit construction + : kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {} + + /// \brief Match any type with the given Type::type. Uses a TypeMatcher for + /// its implementation. + InputType(Type::type type_id) // NOLINT implicit construction + : InputType(match::SameTypeId(type_id)) {} + + InputType(const InputType& other) { CopyInto(other); } + + void operator=(const InputType& other) { CopyInto(other); } + + InputType(InputType&& other) { MoveInto(std::forward(other)); } + + void operator=(InputType&& other) { MoveInto(std::forward(other)); } + + // \brief Match any input (array, scalar of any type) + static InputType Any() { return InputType(); } + + /// \brief Return true if this input type matches the same type cases as the + /// other. + bool Equals(const InputType& other) const; + + bool operator==(const InputType& other) const { return this->Equals(other); } + + bool operator!=(const InputType& other) const { return !(*this == other); } + + /// \brief Return hash code. + size_t Hash() const; + + /// \brief Render a human-readable string representation. + std::string ToString() const; + + /// \brief Return true if the Datum matches this argument kind in + /// type (and only allows scalar or array-like Datums). + bool Matches(const Datum& value) const; + + /// \brief Return true if the type matches this InputType + bool Matches(const DataType& type) const; + + /// \brief The type matching rule that this InputType uses. + Kind kind() const { return kind_; } + + /// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType + /// must match. Otherwise this function should not be used and will assert in + /// debug builds. + const std::shared_ptr& type() const; + + /// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for + /// checking the type of a value. Otherwise this function should not be used + /// and will assert in debug builds. + const TypeMatcher& type_matcher() const; + + private: + void CopyInto(const InputType& other) { + this->kind_ = other.kind_; + this->type_ = other.type_; + this->type_matcher_ = other.type_matcher_; + } + + void MoveInto(InputType&& other) { + this->kind_ = other.kind_; + this->type_ = std::move(other.type_); + this->type_matcher_ = std::move(other.type_matcher_); + } + + Kind kind_; + + // For EXACT_TYPE Kind + std::shared_ptr type_; + + // For USE_TYPE_MATCHER Kind + std::shared_ptr type_matcher_; +}; + +/// \brief Container to capture both exact and input-dependent output types. +class ARROW_EXPORT OutputType { + public: + /// \brief An enum indicating whether the value type is an invariant fixed + /// value or one that's computed by a kernel-defined resolver function. + enum ResolveKind { FIXED, COMPUTED }; + + /// Type resolution function. Given input types, return output type. This + /// function MAY may use the kernel state to decide the output type based on + /// the FunctionOptions. + /// + /// This function SHOULD _not_ be used to check for arity, that is to be + /// performed one or more layers above. + using Resolver = + std::function(KernelContext*, const std::vector&)>; + + /// \brief Output an exact type + OutputType(std::shared_ptr type) // NOLINT implicit construction + : kind_(FIXED), type_(std::move(type)) {} + + /// \brief Output a computed type depending on actual input types + template + OutputType(Fn resolver) // NOLINT implicit construction + : kind_(COMPUTED), resolver_(std::move(resolver)) {} + + OutputType(const OutputType& other) { + this->kind_ = other.kind_; + this->type_ = other.type_; + this->resolver_ = other.resolver_; + } + + OutputType(OutputType&& other) { + this->kind_ = other.kind_; + this->type_ = std::move(other.type_); + this->resolver_ = other.resolver_; + } + + OutputType& operator=(const OutputType&) = default; + OutputType& operator=(OutputType&&) = default; + + /// \brief Return the type of the expected output value of the kernel given + /// the input argument types. The resolver may make use of state information + /// kept in the KernelContext. + Result Resolve(KernelContext* ctx, + const std::vector& args) const; + + /// \brief The exact output value type for the FIXED kind. + const std::shared_ptr& type() const; + + /// \brief For use with COMPUTED resolution strategy. It may be more + /// convenient to invoke this with OutputType::Resolve returned from this + /// method. + const Resolver& resolver() const; + + /// \brief Render a human-readable string representation. + std::string ToString() const; + + /// \brief Return the kind of type resolution of this output type, whether + /// fixed/invariant or computed by a resolver. + ResolveKind kind() const { return kind_; } + + private: + ResolveKind kind_; + + // For FIXED resolution + std::shared_ptr type_; + + // For COMPUTED resolution + Resolver resolver_ = NULLPTR; +}; + +/// \brief Holds the input types and output type of the kernel. +/// +/// VarArgs functions with minimum N arguments should pass up to N input types to be +/// used to validate the input types of a function invocation. The first N-1 types +/// will be matched against the first N-1 arguments, and the last type will be +/// matched against the remaining arguments. +class ARROW_EXPORT KernelSignature { + public: + KernelSignature(std::vector in_types, OutputType out_type, + bool is_varargs = false); + + /// \brief Convenience ctor since make_shared can be awkward + static std::shared_ptr Make(std::vector in_types, + OutputType out_type, + bool is_varargs = false); + + /// \brief Return true if the signature if compatible with the list of input + /// value descriptors. + bool MatchesInputs(const std::vector& types) const; + + /// \brief Returns true if the input types of each signature are + /// equal. Well-formed functions should have a deterministic output type + /// given input types, but currently it is the responsibility of the + /// developer to ensure this. + bool Equals(const KernelSignature& other) const; + + bool operator==(const KernelSignature& other) const { return this->Equals(other); } + + bool operator!=(const KernelSignature& other) const { return !(*this == other); } + + /// \brief Compute a hash code for the signature + size_t Hash() const; + + /// \brief The input types for the kernel. For VarArgs functions, this should + /// generally contain a single validator to use for validating all of the + /// function arguments. + const std::vector& in_types() const { return in_types_; } + + /// \brief The output type for the kernel. Use Resolve to return the + /// exact output given input argument types, since many kernels' + /// output types depend on their input types (or their type + /// metadata). + const OutputType& out_type() const { return out_type_; } + + /// \brief Render a human-readable string representation + std::string ToString() const; + + bool is_varargs() const { return is_varargs_; } + + private: + std::vector in_types_; + OutputType out_type_; + bool is_varargs_; + + // For caching the hash code after it's computed the first time + mutable uint64_t hash_code_; +}; + +/// \brief A function may contain multiple variants of a kernel for a given +/// type combination for different SIMD levels. Based on the active system's +/// CPU info or the user's preferences, we can elect to use one over the other. +struct SimdLevel { + enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX }; +}; + +/// \brief The strategy to use for propagating or otherwise populating the +/// validity bitmap of a kernel output. +struct NullHandling { + enum type { + /// Compute the output validity bitmap by intersecting the validity bitmaps + /// of the arguments using bitwise-and operations. This means that values + /// in the output are valid/non-null only if the corresponding values in + /// all input arguments were valid/non-null. Kernel generally need not + /// touch the bitmap thereafter, but a kernel's exec function is permitted + /// to alter the bitmap after the null intersection is computed if it needs + /// to. + INTERSECTION, + + /// Kernel expects a pre-allocated buffer to write the result bitmap + /// into. The preallocated memory is not zeroed (except for the last byte), + /// so the kernel should ensure to completely populate the bitmap. + COMPUTED_PREALLOCATE, + + /// Kernel allocates and sets the validity bitmap of the output. + COMPUTED_NO_PREALLOCATE, + + /// Kernel output is never null and a validity bitmap does not need to be + /// allocated. + OUTPUT_NOT_NULL + }; +}; + +/// \brief The preference for memory preallocation of fixed-width type outputs +/// in kernel execution. +struct MemAllocation { + enum type { + // For data types that support pre-allocation (i.e. fixed-width), the + // kernel expects to be provided a pre-allocated data buffer to write + // into. Non-fixed-width types must always allocate their own data + // buffers. The allocation made for the same length as the execution batch, + // so vector kernels yielding differently sized output should not use this. + // + // It is valid for the data to not be preallocated but the validity bitmap + // is (or is computed using the intersection/bitwise-and method). + // + // For variable-size output types like BinaryType or StringType, or for + // nested types, this option has no effect. + PREALLOCATE, + + // The kernel is responsible for allocating its own data buffer for + // fixed-width type outputs. + NO_PREALLOCATE + }; +}; + +struct Kernel; + +/// \brief Arguments to pass to an KernelInit function. A struct is used to help +/// avoid API breakage should the arguments passed need to be expanded. +struct KernelInitArgs { + /// \brief A pointer to the kernel being initialized. The init function may + /// depend on the kernel's KernelSignature or other data contained there. + const Kernel* kernel; + + /// \brief The types of the input arguments that the kernel is + /// about to be executed against. + const std::vector& inputs; + + /// \brief Opaque options specific to this kernel. May be nullptr for functions + /// that do not require options. + const FunctionOptions* options; +}; + +/// \brief Common initializer function for all kernel types. +using KernelInit = std::function>( + KernelContext*, const KernelInitArgs&)>; + +/// \brief Base type for kernels. Contains the function signature and +/// optionally the state initialization function, along with some common +/// attributes +struct ARROW_EXPORT Kernel { + Kernel() = default; + + Kernel(std::shared_ptr sig, KernelInit init) + : signature(std::move(sig)), init(std::move(init)) {} + + Kernel(std::vector in_types, OutputType out_type, KernelInit init) + : Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init)) {} + + /// \brief The "signature" of the kernel containing the InputType input + /// argument validators and OutputType output type resolver. + std::shared_ptr signature; + + /// \brief Create a new KernelState for invocations of this kernel, e.g. to + /// set up any options or state relevant for execution. + KernelInit init; + + /// \brief Create a vector of new KernelState for invocations of this kernel. + static Status InitAll(KernelContext*, const KernelInitArgs&, + std::vector>*); + + /// \brief Indicates whether execution can benefit from parallelization + /// (splitting large chunks into smaller chunks and using multiple + /// threads). Some kernels may not support parallel execution at + /// all. Synchronization and concurrency-related issues are currently the + /// responsibility of the Kernel's implementation. + bool parallelizable = true; + + /// \brief Indicates the level of SIMD instruction support in the host CPU is + /// required to use the function. The intention is for functions to be able to + /// contain multiple kernels with the same signature but different levels of SIMD, + /// so that the most optimized kernel supported on a host's processor can be chosen. + SimdLevel::type simd_level = SimdLevel::NONE; + + // Additional kernel-specific data + std::shared_ptr data; +}; + +/// \brief The scalar kernel execution API that must be implemented for SCALAR +/// kernel types. This includes both stateless and stateful kernels. Kernels +/// depending on some execution state access that state via subclasses of +/// KernelState set on the KernelContext object. Implementations should +/// endeavor to write into pre-allocated memory if they are able, though for +/// some kernels (e.g. in cases when a builder like StringBuilder) must be +/// employed this may not be possible. +using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*); + +/// \brief Kernel data structure for implementations of ScalarFunction. In +/// addition to the members found in Kernel, contains the null handling +/// and memory pre-allocation preferences. +struct ARROW_EXPORT ScalarKernel : public Kernel { + ScalarKernel() = default; + + ScalarKernel(std::shared_ptr sig, ArrayKernelExec exec, + KernelInit init = NULLPTR) + : Kernel(std::move(sig), init), exec(exec) {} + + ScalarKernel(std::vector in_types, OutputType out_type, ArrayKernelExec exec, + KernelInit init = NULLPTR) + : Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {} + + /// \brief Perform a single invocation of this kernel. Depending on the + /// implementation, it may only write into preallocated memory, while in some + /// cases it will allocate its own memory. Any required state is managed + /// through the KernelContext. + ArrayKernelExec exec; + + /// \brief Writing execution results into larger contiguous allocations + /// requires that the kernel be able to write into sliced output ArrayData*, + /// including sliced output validity bitmaps. Some kernel implementations may + /// not be able to do this, so setting this to false disables this + /// functionality. + bool can_write_into_slices = true; + + // For scalar functions preallocated data and intersecting arg validity + // bitmaps is a reasonable default + NullHandling::type null_handling = NullHandling::INTERSECTION; + MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE; +}; + +// ---------------------------------------------------------------------- +// VectorKernel (for VectorFunction) + +/// \brief Kernel data structure for implementations of VectorFunction. In +/// contains an optional finalizer function, the null handling and memory +/// pre-allocation preferences (which have different defaults from +/// ScalarKernel), and some other execution-related options. +struct ARROW_EXPORT VectorKernel : public Kernel { + /// \brief See VectorKernel::finalize member for usage + using FinalizeFunc = std::function*)>; + + /// \brief Function for executing a stateful VectorKernel against a + /// ChunkedArray input. Does not need to be defined for all VectorKernels + using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out); + + VectorKernel() = default; + + VectorKernel(std::vector in_types, OutputType out_type, ArrayKernelExec exec, + KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR) + : Kernel(std::move(in_types), std::move(out_type), std::move(init)), + exec(exec), + finalize(std::move(finalize)) {} + + VectorKernel(std::shared_ptr sig, ArrayKernelExec exec, + KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR) + : Kernel(std::move(sig), std::move(init)), + exec(exec), + finalize(std::move(finalize)) {} + + /// \brief Perform a single invocation of this kernel. Any required state is + /// managed through the KernelContext. + ArrayKernelExec exec; + + /// \brief Execute the kernel on a ChunkedArray. Does not need to be defined + ChunkedExec exec_chunked = NULLPTR; + + /// \brief For VectorKernel, convert intermediate results into finalized + /// results. Mutates input argument. Some kernels may accumulate state + /// (example: hashing-related functions) through processing chunked inputs, and + /// then need to attach some accumulated state to each of the outputs of + /// processing each chunk of data. + FinalizeFunc finalize; + + /// Since vector kernels generally are implemented rather differently from + /// scalar/elementwise kernels (and they may not even yield arrays of the same + /// size), so we make the developer opt-in to any memory preallocation rather + /// than having to turn it off. + NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE; + MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE; + + /// \brief Writing execution results into larger contiguous allocations + /// requires that the kernel be able to write into sliced output ArrayData*, + /// including sliced output validity bitmaps. Some kernel implementations may + /// not be able to do this, so setting this to false disables this + /// functionality. + bool can_write_into_slices = true; + + /// Some vector kernels can do chunkwise execution using ExecSpanIterator, + /// in some cases accumulating some state. Other kernels (like Take) need to + /// be passed whole arrays and don't work on ChunkedArray inputs + bool can_execute_chunkwise = true; + + /// Some kernels (like unique and value_counts) yield non-chunked output from + /// chunked-array inputs. This option controls how the results are boxed when + /// returned from ExecVectorFunction + /// + /// true -> ChunkedArray + /// false -> Array + bool output_chunked = true; +}; + +// ---------------------------------------------------------------------- +// ScalarAggregateKernel (for ScalarAggregateFunction) + +using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&); +using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*); +// Finalize returns Datum to permit multiple return values +using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*); + +/// \brief Kernel data structure for implementations of +/// ScalarAggregateFunction. The four necessary components of an aggregation +/// kernel are the init, consume, merge, and finalize functions. +/// +/// * init: creates a new KernelState for a kernel. +/// * consume: processes an ExecSpan and updates the KernelState found in the +/// KernelContext. +/// * merge: combines one KernelState with another. +/// * finalize: produces the end result of the aggregation using the +/// KernelState in the KernelContext. +struct ARROW_EXPORT ScalarAggregateKernel : public Kernel { + ScalarAggregateKernel(std::shared_ptr sig, KernelInit init, + ScalarAggregateConsume consume, ScalarAggregateMerge merge, + ScalarAggregateFinalize finalize, const bool ordered) + : Kernel(std::move(sig), std::move(init)), + consume(consume), + merge(merge), + finalize(finalize), + ordered(ordered) {} + + ScalarAggregateKernel(std::vector in_types, OutputType out_type, + KernelInit init, ScalarAggregateConsume consume, + ScalarAggregateMerge merge, ScalarAggregateFinalize finalize, + const bool ordered) + : ScalarAggregateKernel( + KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init), consume, merge, finalize, ordered) {} + + /// \brief Merge a vector of KernelStates into a single KernelState. + /// The merged state will be returned and will be set on the KernelContext. + static Result> MergeAll( + const ScalarAggregateKernel* kernel, KernelContext* ctx, + std::vector> states); + + ScalarAggregateConsume consume; + ScalarAggregateMerge merge; + ScalarAggregateFinalize finalize; + /// \brief Whether this kernel requires ordering + /// Some aggregations, such as, "first", requires some kind of input order. The + /// order can be implicit, e.g., the order of the input data, or explicit, e.g. + /// the ordering specified with a window aggregation. + /// The caller of the aggregate kernel is responsible for passing data in some + /// defined order to the kernel. The flag here is a way for the kernel to tell + /// the caller that data passed to the kernel must be defined in some order. + bool ordered = false; +}; + +// ---------------------------------------------------------------------- +// HashAggregateKernel (for HashAggregateFunction) + +using HashAggregateResize = Status (*)(KernelContext*, int64_t); +using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&); +using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&); + +// Finalize returns Datum to permit multiple return values +using HashAggregateFinalize = Status (*)(KernelContext*, Datum*); + +/// \brief Kernel data structure for implementations of +/// HashAggregateFunction. The four necessary components of an aggregation +/// kernel are the init, consume, merge, and finalize functions. +/// +/// * init: creates a new KernelState for a kernel. +/// * resize: ensure that the KernelState can accommodate the specified number of groups. +/// * consume: processes an ExecSpan (which includes the argument as well +/// as an array of group identifiers) and updates the KernelState found in the +/// KernelContext. +/// * merge: combines one KernelState with another. +/// * finalize: produces the end result of the aggregation using the +/// KernelState in the KernelContext. +struct ARROW_EXPORT HashAggregateKernel : public Kernel { + HashAggregateKernel() = default; + + HashAggregateKernel(std::shared_ptr sig, KernelInit init, + HashAggregateResize resize, HashAggregateConsume consume, + HashAggregateMerge merge, HashAggregateFinalize finalize, + const bool ordered) + : Kernel(std::move(sig), std::move(init)), + resize(resize), + consume(consume), + merge(merge), + finalize(finalize), + ordered(ordered) {} + + HashAggregateKernel(std::vector in_types, OutputType out_type, + KernelInit init, HashAggregateConsume consume, + HashAggregateResize resize, HashAggregateMerge merge, + HashAggregateFinalize finalize, const bool ordered) + : HashAggregateKernel( + KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init), resize, consume, merge, finalize, ordered) {} + + HashAggregateResize resize; + HashAggregateConsume consume; + HashAggregateMerge merge; + HashAggregateFinalize finalize; + /// @brief whether the summarizer requires ordering + /// This is similar to ScalarAggregateKernel. See ScalarAggregateKernel + /// for detailed doc of this variable. + bool ordered = false; +}; + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_hash.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_hash.h new file mode 100644 index 0000000000000000000000000000000000000000..1173df5ed103e8de063c8eb8a9bfd45c1d798890 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_hash.h @@ -0,0 +1,223 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(ARROW_HAVE_RUNTIME_AVX2) +#include +#endif + +#include + +#include "arrow/compute/light_array.h" +#include "arrow/compute/util.h" + +namespace arrow { +namespace compute { + +// Forward declarations only needed for making test functions a friend of the classes in +// this file. +// +enum class BloomFilterBuildStrategy; + +// Implementations are based on xxh3 32-bit algorithm description from: +// https://github.com/Cyan4973/xxHash/blob/dev/doc/xxhash_spec.md +// +class ARROW_EXPORT Hashing32 { + friend class TestVectorHash; + template + friend void TestBloomLargeHashHelper(int64_t, int64_t, const std::vector&, + int64_t, int, T*); + friend void TestBloomSmall(BloomFilterBuildStrategy, int64_t, int, bool, bool); + + public: + static void HashMultiColumn(const std::vector& cols, LightContext* ctx, + uint32_t* out_hash); + + static Status HashBatch(const ExecBatch& key_batch, uint32_t* hashes, + std::vector& column_arrays, + int64_t hardware_flags, util::TempVectorStack* temp_stack, + int64_t start_row, int64_t num_rows); + + static void HashFixed(int64_t hardware_flags, bool combine_hashes, uint32_t num_keys, + uint64_t key_length, const uint8_t* keys, uint32_t* hashes, + uint32_t* temp_hashes_for_combine); + + private: + static const uint32_t PRIME32_1 = 0x9E3779B1; + static const uint32_t PRIME32_2 = 0x85EBCA77; + static const uint32_t PRIME32_3 = 0xC2B2AE3D; + static const uint32_t PRIME32_4 = 0x27D4EB2F; + static const uint32_t PRIME32_5 = 0x165667B1; + static const uint32_t kCombineConst = 0x9e3779b9UL; + static const int64_t kStripeSize = 4 * sizeof(uint32_t); + + static void HashVarLen(int64_t hardware_flags, bool combine_hashes, uint32_t num_rows, + const uint32_t* offsets, const uint8_t* concatenated_keys, + uint32_t* hashes, uint32_t* temp_hashes_for_combine); + + static void HashVarLen(int64_t hardware_flags, bool combine_hashes, uint32_t num_rows, + const uint64_t* offsets, const uint8_t* concatenated_keys, + uint32_t* hashes, uint32_t* temp_hashes_for_combine); + + static inline uint32_t Avalanche(uint32_t acc) { + acc ^= (acc >> 15); + acc *= PRIME32_2; + acc ^= (acc >> 13); + acc *= PRIME32_3; + acc ^= (acc >> 16); + return acc; + } + static inline uint32_t Round(uint32_t acc, uint32_t input); + static inline uint32_t CombineAccumulators(uint32_t acc1, uint32_t acc2, uint32_t acc3, + uint32_t acc4); + static inline uint32_t CombineHashesImp(uint32_t previous_hash, uint32_t hash) { + uint32_t next_hash = previous_hash ^ (hash + kCombineConst + (previous_hash << 6) + + (previous_hash >> 2)); + return next_hash; + } + static inline void ProcessFullStripes(uint64_t num_stripes, const uint8_t* key, + uint32_t* out_acc1, uint32_t* out_acc2, + uint32_t* out_acc3, uint32_t* out_acc4); + static inline void ProcessLastStripe(uint32_t mask1, uint32_t mask2, uint32_t mask3, + uint32_t mask4, const uint8_t* last_stripe, + uint32_t* acc1, uint32_t* acc2, uint32_t* acc3, + uint32_t* acc4); + static inline void StripeMask(int i, uint32_t* mask1, uint32_t* mask2, uint32_t* mask3, + uint32_t* mask4); + template + static void HashFixedLenImp(uint32_t num_rows, uint64_t key_length, const uint8_t* keys, + uint32_t* hashes); + template + static void HashVarLenImp(uint32_t num_rows, const T* offsets, + const uint8_t* concatenated_keys, uint32_t* hashes); + template + static void HashBitImp(int64_t bit_offset, uint32_t num_keys, const uint8_t* keys, + uint32_t* hashes); + static void HashBit(bool combine_hashes, int64_t bit_offset, uint32_t num_keys, + const uint8_t* keys, uint32_t* hashes); + template + static void HashIntImp(uint32_t num_keys, const T* keys, uint32_t* hashes); + static void HashInt(bool combine_hashes, uint32_t num_keys, uint64_t key_length, + const uint8_t* keys, uint32_t* hashes); + +#if defined(ARROW_HAVE_RUNTIME_AVX2) + static inline __m256i Avalanche_avx2(__m256i hash); + static inline __m256i CombineHashesImp_avx2(__m256i previous_hash, __m256i hash); + template + static void AvalancheAll_avx2(uint32_t num_rows, uint32_t* hashes, + const uint32_t* hashes_temp_for_combine); + static inline __m256i Round_avx2(__m256i acc, __m256i input); + static inline uint64_t CombineAccumulators_avx2(__m256i acc); + static inline __m256i StripeMask_avx2(int i, int j); + template + static inline __m256i ProcessStripes_avx2(int64_t num_stripes_A, int64_t num_stripes_B, + __m256i mask_last_stripe, const uint8_t* keys, + int64_t offset_A, int64_t offset_B); + template + static uint32_t HashFixedLenImp_avx2(uint32_t num_rows, uint64_t key_length, + const uint8_t* keys, uint32_t* hashes, + uint32_t* hashes_temp_for_combine); + static uint32_t HashFixedLen_avx2(bool combine_hashes, uint32_t num_rows, + uint64_t key_length, const uint8_t* keys, + uint32_t* hashes, uint32_t* hashes_temp_for_combine); + template + static uint32_t HashVarLenImp_avx2(uint32_t num_rows, const T* offsets, + const uint8_t* concatenated_keys, uint32_t* hashes, + uint32_t* hashes_temp_for_combine); + static uint32_t HashVarLen_avx2(bool combine_hashes, uint32_t num_rows, + const uint32_t* offsets, + const uint8_t* concatenated_keys, uint32_t* hashes, + uint32_t* hashes_temp_for_combine); + static uint32_t HashVarLen_avx2(bool combine_hashes, uint32_t num_rows, + const uint64_t* offsets, + const uint8_t* concatenated_keys, uint32_t* hashes, + uint32_t* hashes_temp_for_combine); +#endif +}; + +class ARROW_EXPORT Hashing64 { + friend class TestVectorHash; + template + friend void TestBloomLargeHashHelper(int64_t, int64_t, const std::vector&, + int64_t, int, T*); + friend void TestBloomSmall(BloomFilterBuildStrategy, int64_t, int, bool, bool); + + public: + static void HashMultiColumn(const std::vector& cols, LightContext* ctx, + uint64_t* hashes); + + static Status HashBatch(const ExecBatch& key_batch, uint64_t* hashes, + std::vector& column_arrays, + int64_t hardware_flags, util::TempVectorStack* temp_stack, + int64_t start_row, int64_t num_rows); + + static void HashFixed(bool combine_hashes, uint32_t num_keys, uint64_t key_length, + const uint8_t* keys, uint64_t* hashes); + + private: + static const uint64_t PRIME64_1 = 0x9E3779B185EBCA87ULL; + static const uint64_t PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; + static const uint64_t PRIME64_3 = 0x165667B19E3779F9ULL; + static const uint64_t PRIME64_4 = 0x85EBCA77C2B2AE63ULL; + static const uint64_t PRIME64_5 = 0x27D4EB2F165667C5ULL; + static const uint32_t kCombineConst = 0x9e3779b9UL; + static const int64_t kStripeSize = 4 * sizeof(uint64_t); + + static void HashVarLen(bool combine_hashes, uint32_t num_rows, const uint32_t* offsets, + const uint8_t* concatenated_keys, uint64_t* hashes); + + static void HashVarLen(bool combine_hashes, uint32_t num_rows, const uint64_t* offsets, + const uint8_t* concatenated_keys, uint64_t* hashes); + + static inline uint64_t Avalanche(uint64_t acc); + static inline uint64_t Round(uint64_t acc, uint64_t input); + static inline uint64_t CombineAccumulators(uint64_t acc1, uint64_t acc2, uint64_t acc3, + uint64_t acc4); + static inline uint64_t CombineHashesImp(uint64_t previous_hash, uint64_t hash) { + uint64_t next_hash = previous_hash ^ (hash + kCombineConst + (previous_hash << 6) + + (previous_hash >> 2)); + return next_hash; + } + static inline void ProcessFullStripes(uint64_t num_stripes, const uint8_t* key, + uint64_t* out_acc1, uint64_t* out_acc2, + uint64_t* out_acc3, uint64_t* out_acc4); + static inline void ProcessLastStripe(uint64_t mask1, uint64_t mask2, uint64_t mask3, + uint64_t mask4, const uint8_t* last_stripe, + uint64_t* acc1, uint64_t* acc2, uint64_t* acc3, + uint64_t* acc4); + static inline void StripeMask(int i, uint64_t* mask1, uint64_t* mask2, uint64_t* mask3, + uint64_t* mask4); + template + static void HashFixedLenImp(uint32_t num_rows, uint64_t key_length, const uint8_t* keys, + uint64_t* hashes); + template + static void HashVarLenImp(uint32_t num_rows, const T* offsets, + const uint8_t* concatenated_keys, uint64_t* hashes); + template + static void HashBitImp(int64_t bit_offset, uint32_t num_keys, const uint8_t* keys, + uint64_t* hashes); + static void HashBit(bool combine_hashes, int64_t bit_offset, uint32_t num_keys, + const uint8_t* keys, uint64_t* hashes); + template + static void HashIntImp(uint32_t num_keys, const T* keys, uint64_t* hashes); + static void HashInt(bool combine_hashes, uint32_t num_keys, uint64_t key_length, + const uint8_t* keys, uint64_t* hashes); +}; + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_map.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_map.h new file mode 100644 index 0000000000000000000000000000000000000000..8e06dc83483aa6a3f4f83eb10a3e537d08d63d76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_map.h @@ -0,0 +1,288 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/compute/util.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace compute { + +// SwissTable is a variant of a hash table implementation. +// This implementation is vectorized, that is: main interface methods take arrays of input +// values and output arrays of result values. +// +// A detailed explanation of this data structure (including concepts such as blocks, +// slots, stamps) and operations provided by this class is given in the document: +// arrow/compute/exec/doc/key_map.md. +// +class ARROW_EXPORT SwissTable { + friend class SwissTableMerge; + + public: + SwissTable() = default; + ~SwissTable() { cleanup(); } + + using EqualImpl = + std::function; + using AppendImpl = + std::function; + + Status init(int64_t hardware_flags, MemoryPool* pool, int log_blocks = 0, + bool no_hash_array = false); + + void cleanup(); + + void early_filter(const int num_keys, const uint32_t* hashes, + uint8_t* out_match_bitvector, uint8_t* out_local_slots) const; + + void find(const int num_keys, const uint32_t* hashes, uint8_t* inout_match_bitvector, + const uint8_t* local_slots, uint32_t* out_group_ids, + util::TempVectorStack* temp_stack, const EqualImpl& equal_impl, + void* callback_ctx) const; + + Status map_new_keys(uint32_t num_ids, uint16_t* ids, const uint32_t* hashes, + uint32_t* group_ids, util::TempVectorStack* temp_stack, + const EqualImpl& equal_impl, const AppendImpl& append_impl, + void* callback_ctx); + + int minibatch_size() const { return 1 << log_minibatch_; } + + uint32_t num_inserted() const { return num_inserted_; } + + int64_t hardware_flags() const { return hardware_flags_; } + + MemoryPool* pool() const { return pool_; } + + int log_blocks() const { return log_blocks_; } + + void num_inserted(uint32_t i) { num_inserted_ = i; } + + uint8_t* blocks() const { return blocks_->mutable_data(); } + + uint32_t* hashes() const { + return reinterpret_cast(hashes_->mutable_data()); + } + + /// \brief Extract group id for a given slot in a given block. + /// + inline uint64_t extract_group_id(const uint8_t* block_ptr, int slot, + uint64_t group_id_mask) const; + + inline void insert_into_empty_slot(uint32_t slot_id, uint32_t hash, uint32_t group_id); + + static int num_groupid_bits_from_log_blocks(int log_blocks) { + int required_bits = log_blocks + 3; + return required_bits <= 8 ? 8 + : required_bits <= 16 ? 16 + : required_bits <= 32 ? 32 + : 64; + } + + // Use 32-bit hash for now + static constexpr int bits_hash_ = 32; + + private: + // Lookup helpers + + /// \brief Scan bytes in block in reverse and stop as soon + /// as a position of interest is found. + /// + /// Positions of interest: + /// a) slot with a matching stamp is encountered, + /// b) first empty slot is encountered, + /// c) we reach the end of the block. + /// + /// Optionally an index of the first slot to start the search from can be specified. + /// In this case slots before it will be ignored. + /// + /// \param[in] block 8 byte block of hash table + /// \param[in] stamp 7 bits of hash used as a stamp + /// \param[in] start_slot Index of the first slot in the block to start search from. We + /// assume that this index always points to a non-empty slot, equivalently + /// that it comes before any empty slots. (Used only by one template + /// variant.) + /// \param[out] out_slot index corresponding to the discovered position of interest (8 + /// represents end of block). + /// \param[out] out_match_found an integer flag (0 or 1) indicating if we reached an + /// empty slot (0) or not (1). Therefore 1 can mean that either actual match was found + /// (case a) above) or we reached the end of full block (case b) above). + /// + template + inline void search_block(uint64_t block, int stamp, int start_slot, int* out_slot, + int* out_match_found) const; + + void extract_group_ids(const int num_keys, const uint16_t* optional_selection, + const uint32_t* hashes, const uint8_t* local_slots, + uint32_t* out_group_ids) const; + + template + void extract_group_ids_imp(const int num_keys, const uint16_t* selection, + const uint32_t* hashes, const uint8_t* local_slots, + uint32_t* out_group_ids, int elements_offset, + int element_multiplier) const; + + inline uint64_t next_slot_to_visit(uint64_t block_index, int slot, + int match_found) const; + + inline uint64_t num_groups_for_resize() const; + + inline uint64_t wrap_global_slot_id(uint64_t global_slot_id) const; + + void init_slot_ids(const int num_keys, const uint16_t* selection, + const uint32_t* hashes, const uint8_t* local_slots, + const uint8_t* match_bitvector, uint32_t* out_slot_ids) const; + + void init_slot_ids_for_new_keys(uint32_t num_ids, const uint16_t* ids, + const uint32_t* hashes, uint32_t* slot_ids) const; + + // Quickly filter out keys that have no matches based only on hash value and the + // corresponding starting 64-bit block of slot status bytes. May return false positives. + // + void early_filter_imp(const int num_keys, const uint32_t* hashes, + uint8_t* out_match_bitvector, uint8_t* out_local_slots) const; +#if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2) + // The functions below use BMI2 instructions, be careful before calling! + int early_filter_imp_avx2_x8(const int num_hashes, const uint32_t* hashes, + uint8_t* out_match_bitvector, + uint8_t* out_local_slots) const; + int early_filter_imp_avx2_x32(const int num_hashes, const uint32_t* hashes, + uint8_t* out_match_bitvector, + uint8_t* out_local_slots) const; + int extract_group_ids_avx2(const int num_keys, const uint32_t* hashes, + const uint8_t* local_slots, uint32_t* out_group_ids, + int byte_offset, int byte_multiplier, int byte_size) const; +#endif + + void run_comparisons(const int num_keys, const uint16_t* optional_selection_ids, + const uint8_t* optional_selection_bitvector, + const uint32_t* groupids, int* out_num_not_equal, + uint16_t* out_not_equal_selection, const EqualImpl& equal_impl, + void* callback_ctx) const; + + inline bool find_next_stamp_match(const uint32_t hash, const uint32_t in_slot_id, + uint32_t* out_slot_id, uint32_t* out_group_id) const; + + // Slow processing of input keys in the most generic case. + // Handles inserting new keys. + // Preexisting keys will be handled correctly, although the intended use is for this + // call to follow a call to find() method, which would only pass on new keys that were + // not present in the hash table. + // + Status map_new_keys_helper(const uint32_t* hashes, uint32_t* inout_num_selected, + uint16_t* inout_selection, bool* out_need_resize, + uint32_t* out_group_ids, uint32_t* out_next_slot_ids, + util::TempVectorStack* temp_stack, + const EqualImpl& equal_impl, const AppendImpl& append_impl, + void* callback_ctx); + + // Resize small hash tables when 50% full (up to 8KB). + // Resize large hash tables when 75% full. + Status grow_double(); + + // Number of hash bits stored in slots in a block. + // The highest bits of hash determine block id. + // The next set of highest bits is a "stamp" stored in a slot in a block. + static constexpr int bits_stamp_ = 7; + + // Padding bytes added at the end of buffers for ease of SIMD access + static constexpr int padding_ = 64; + + int log_minibatch_; + // Base 2 log of the number of blocks + int log_blocks_ = 0; + // Number of keys inserted into hash table + uint32_t num_inserted_ = 0; + + // Data for blocks. + // Each block has 8 status bytes for 8 slots, followed by 8 bit packed group ids for + // these slots. In 8B status word, the order of bytes is reversed. Group ids are in + // normal order. There is 64B padding at the end. + // + // 0 byte - 7 bucket | 1. byte - 6 bucket | ... + // --------------------------------------------------- + // | Empty bit* | Empty bit | + // --------------------------------------------------- + // | 7-bit hash | 7-bit hash | + // --------------------------------------------------- + // * Empty bucket has value 0x80. Non-empty bucket has highest bit set to 0. + // + std::shared_ptr blocks_; + + // Array of hashes of values inserted into slots. + // Undefined if the corresponding slot is empty. + // There is 64B padding at the end. + std::shared_ptr hashes_; + + int64_t hardware_flags_; + MemoryPool* pool_; +}; + +uint64_t SwissTable::extract_group_id(const uint8_t* block_ptr, int slot, + uint64_t group_id_mask) const { + // Group id values for all 8 slots in the block are bit-packed and follow the status + // bytes. We assume here that the number of bits is rounded up to 8, 16, 32 or 64. In + // that case we can extract group id using aligned 64-bit word access. + int num_group_id_bits = static_cast(ARROW_POPCOUNT64(group_id_mask)); + assert(num_group_id_bits == 8 || num_group_id_bits == 16 || num_group_id_bits == 32 || + num_group_id_bits == 64); + + int bit_offset = slot * num_group_id_bits; + const uint64_t* group_id_bytes = + reinterpret_cast(block_ptr) + 1 + (bit_offset >> 6); + uint64_t group_id = (*group_id_bytes >> (bit_offset & 63)) & group_id_mask; + + return group_id; +} + +void SwissTable::insert_into_empty_slot(uint32_t slot_id, uint32_t hash, + uint32_t group_id) { + const uint64_t num_groupid_bits = num_groupid_bits_from_log_blocks(log_blocks_); + + // We assume here that the number of bits is rounded up to 8, 16, 32 or 64. + // In that case we can insert group id value using aligned 64-bit word access. + assert(num_groupid_bits == 8 || num_groupid_bits == 16 || num_groupid_bits == 32 || + num_groupid_bits == 64); + + const uint64_t num_block_bytes = (8 + num_groupid_bits); + constexpr uint64_t stamp_mask = 0x7f; + + int start_slot = (slot_id & 7); + int stamp = + static_cast((hash >> (bits_hash_ - log_blocks_ - bits_stamp_)) & stamp_mask); + uint64_t block_id = slot_id >> 3; + uint8_t* blockbase = blocks_->mutable_data() + num_block_bytes * block_id; + + blockbase[7 - start_slot] = static_cast(stamp); + int groupid_bit_offset = static_cast(start_slot * num_groupid_bits); + + // Block status bytes should start at an address aligned to 8 bytes + assert((reinterpret_cast(blockbase) & 7) == 0); + uint64_t* ptr = reinterpret_cast(blockbase) + 1 + (groupid_bit_offset >> 6); + *ptr |= (static_cast(group_id) << (groupid_bit_offset & 63)); +} + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/light_array.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/light_array.h new file mode 100644 index 0000000000000000000000000000000000000000..67de71bf56c928615054ad50cf0fc6836d4a0e84 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/light_array.h @@ -0,0 +1,451 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array.h" +#include "arrow/compute/exec.h" +#include "arrow/compute/util.h" +#include "arrow/type.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/logging.h" + +/// This file contains lightweight containers for Arrow buffers. These containers +/// makes compromises in terms of strong ownership and the range of data types supported +/// in order to gain performance and reduced overhead. + +namespace arrow { +namespace compute { + +/// \brief Context needed by various execution engine operations +/// +/// In the execution engine this context is provided by either the node or the +/// plan and the context exists for the lifetime of the plan. Defining this here +/// allows us to take advantage of these resources without coupling the logic with +/// the execution engine. +struct LightContext { + bool has_avx2() const { return (hardware_flags & arrow::internal::CpuInfo::AVX2) > 0; } + int64_t hardware_flags; + util::TempVectorStack* stack; +}; + +/// \brief Description of the layout of a "key" column +/// +/// A "key" column is a non-nested, non-union column. +/// Every key column has either 0 (null), 2 (e.g. int32) or 3 (e.g. string) buffers +/// and no children. +/// +/// This metadata object is a zero-allocation analogue of arrow::DataType +struct ARROW_EXPORT KeyColumnMetadata { + KeyColumnMetadata() = default; + KeyColumnMetadata(bool is_fixed_length_in, uint32_t fixed_length_in, + bool is_null_type_in = false) + : is_fixed_length(is_fixed_length_in), + is_null_type(is_null_type_in), + fixed_length(fixed_length_in) {} + /// \brief True if the column is not a varying-length binary type + /// + /// If this is true the column will have a validity buffer and + /// a data buffer and the third buffer will be unused. + bool is_fixed_length; + /// \brief True if this column is the null type + bool is_null_type; + /// \brief The number of bytes for each item + /// + /// Zero has a special meaning, indicating a bit vector with one bit per value if it + /// isn't a null type column. + /// + /// For a varying-length binary column this represents the number of bytes per offset. + uint32_t fixed_length; +}; + +/// \brief A lightweight view into a "key" array +/// +/// A "key" column is a non-nested, non-union column \see KeyColumnMetadata +/// +/// This metadata object is a zero-allocation analogue of arrow::ArrayData +class ARROW_EXPORT KeyColumnArray { + public: + /// \brief Create an uninitialized KeyColumnArray + KeyColumnArray() = default; + /// \brief Create a read-only view from buffers + /// + /// This is a view only and does not take ownership of the buffers. The lifetime + /// of the buffers must exceed the lifetime of this view + KeyColumnArray(const KeyColumnMetadata& metadata, int64_t length, + const uint8_t* validity_buffer, const uint8_t* fixed_length_buffer, + const uint8_t* var_length_buffer, int bit_offset_validity = 0, + int bit_offset_fixed = 0); + /// \brief Create a mutable view from buffers + /// + /// This is a view only and does not take ownership of the buffers. The lifetime + /// of the buffers must exceed the lifetime of this view + KeyColumnArray(const KeyColumnMetadata& metadata, int64_t length, + uint8_t* validity_buffer, uint8_t* fixed_length_buffer, + uint8_t* var_length_buffer, int bit_offset_validity = 0, + int bit_offset_fixed = 0); + /// \brief Create a sliced view of `this` + /// + /// The number of rows used in offset must be divisible by 8 + /// in order to not split bit vectors within a single byte. + KeyColumnArray Slice(int64_t offset, int64_t length) const; + /// \brief Create a copy of `this` with a buffer from `other` + /// + /// The copy will be identical to `this` except the buffer at buffer_id_to_replace + /// will be replaced by the corresponding buffer in `other`. + KeyColumnArray WithBufferFrom(const KeyColumnArray& other, + int buffer_id_to_replace) const; + + /// \brief Create a copy of `this` with new metadata + KeyColumnArray WithMetadata(const KeyColumnMetadata& metadata) const; + + // Constants used for accessing buffers using data() and mutable_data(). + static constexpr int kValidityBuffer = 0; + static constexpr int kFixedLengthBuffer = 1; + static constexpr int kVariableLengthBuffer = 2; + + /// \brief Return one of the underlying mutable buffers + uint8_t* mutable_data(int i) { + ARROW_DCHECK(i >= 0 && i < kMaxBuffers); + return mutable_buffers_[i]; + } + /// \brief Return one of the underlying read-only buffers + const uint8_t* data(int i) const { + ARROW_DCHECK(i >= 0 && i < kMaxBuffers); + return buffers_[i]; + } + /// \brief Return a mutable version of the offsets buffer + /// + /// Only valid if this is a view into a varbinary type + uint32_t* mutable_offsets() { + DCHECK(!metadata_.is_fixed_length); + DCHECK_EQ(metadata_.fixed_length, sizeof(uint32_t)); + return reinterpret_cast(mutable_data(kFixedLengthBuffer)); + } + /// \brief Return a read-only version of the offsets buffer + /// + /// Only valid if this is a view into a varbinary type + const uint32_t* offsets() const { + DCHECK(!metadata_.is_fixed_length); + DCHECK_EQ(metadata_.fixed_length, sizeof(uint32_t)); + return reinterpret_cast(data(kFixedLengthBuffer)); + } + /// \brief Return a mutable version of the large-offsets buffer + /// + /// Only valid if this is a view into a large varbinary type + uint64_t* mutable_large_offsets() { + DCHECK(!metadata_.is_fixed_length); + DCHECK_EQ(metadata_.fixed_length, sizeof(uint64_t)); + return reinterpret_cast(mutable_data(kFixedLengthBuffer)); + } + /// \brief Return a read-only version of the large-offsets buffer + /// + /// Only valid if this is a view into a large varbinary type + const uint64_t* large_offsets() const { + DCHECK(!metadata_.is_fixed_length); + DCHECK_EQ(metadata_.fixed_length, sizeof(uint64_t)); + return reinterpret_cast(data(kFixedLengthBuffer)); + } + /// \brief Return the type metadata + const KeyColumnMetadata& metadata() const { return metadata_; } + /// \brief Return the length (in rows) of the array + int64_t length() const { return length_; } + /// \brief Return the bit offset into the corresponding vector + /// + /// if i == 1 then this must be a bool array + int bit_offset(int i) const { + ARROW_DCHECK(i >= 0 && i < kMaxBuffers); + return bit_offset_[i]; + } + + private: + static constexpr int kMaxBuffers = 3; + const uint8_t* buffers_[kMaxBuffers]; + uint8_t* mutable_buffers_[kMaxBuffers]; + KeyColumnMetadata metadata_; + int64_t length_; + // Starting bit offset within the first byte (between 0 and 7) + // to be used when accessing buffers that store bit vectors. + int bit_offset_[kMaxBuffers - 1]; + + bool is_bool_type() const { + return metadata_.is_fixed_length && metadata_.fixed_length == 0 && + !metadata_.is_null_type; + } + + bool is_fixed_width_types() const { + return metadata_.is_fixed_length && metadata_.fixed_length != 0 && + !metadata_.is_null_type; + } + + bool is_binary_type() const { + return !metadata_.is_fixed_length && metadata_.fixed_length == sizeof(uint32_t) && + !metadata_.is_null_type; + } + + bool is_large_binary_type() const { + return !metadata_.is_fixed_length && metadata_.fixed_length == sizeof(uint64_t) && + !metadata_.is_null_type; + } + + bool is_null_type() const { + return metadata_.is_fixed_length && metadata_.fixed_length == 0 && + metadata_.is_null_type; + } +}; + +/// \brief Create KeyColumnMetadata from a DataType +/// +/// If `type` is a dictionary type then this will return the KeyColumnMetadata for +/// the indices type +/// +/// This should only be called on "key" columns. Calling this with +/// a non-key column will return Status::TypeError. +ARROW_EXPORT Result ColumnMetadataFromDataType( + const std::shared_ptr& type); + +/// \brief Create KeyColumnArray from ArrayData +/// +/// If `type` is a dictionary type then this will return the KeyColumnArray for +/// the indices array +/// +/// The caller should ensure this is only called on "key" columns. +/// \see ColumnMetadataFromDataType for details +ARROW_EXPORT Result ColumnArrayFromArrayData( + const std::shared_ptr& array_data, int64_t start_row, int64_t num_rows); + +/// \brief Create KeyColumnArray from ArrayData and KeyColumnMetadata +/// +/// If `type` is a dictionary type then this will return the KeyColumnArray for +/// the indices array +/// +/// The caller should ensure this is only called on "key" columns. +/// \see ColumnMetadataFromDataType for details +ARROW_EXPORT KeyColumnArray ColumnArrayFromArrayDataAndMetadata( + const std::shared_ptr& array_data, const KeyColumnMetadata& metadata, + int64_t start_row, int64_t num_rows); + +/// \brief Create KeyColumnMetadata instances from an ExecBatch +/// +/// column_metadatas will be resized to fit +/// +/// All columns in `batch` must be eligible "key" columns and have an array shape +/// \see ColumnMetadataFromDataType for more details +ARROW_EXPORT Status ColumnMetadatasFromExecBatch( + const ExecBatch& batch, std::vector* column_metadatas); + +/// \brief Create KeyColumnArray instances from a slice of an ExecBatch +/// +/// column_arrays will be resized to fit +/// +/// All columns in `batch` must be eligible "key" columns and have an array shape +/// \see ColumnArrayFromArrayData for more details +ARROW_EXPORT Status ColumnArraysFromExecBatch(const ExecBatch& batch, int64_t start_row, + int64_t num_rows, + std::vector* column_arrays); + +/// \brief Create KeyColumnArray instances from an ExecBatch +/// +/// column_arrays will be resized to fit +/// +/// All columns in `batch` must be eligible "key" columns and have an array shape +/// \see ColumnArrayFromArrayData for more details +ARROW_EXPORT Status ColumnArraysFromExecBatch(const ExecBatch& batch, + std::vector* column_arrays); + +/// A lightweight resizable array for "key" columns +/// +/// Unlike KeyColumnArray this instance owns its buffers +/// +/// Resizing is handled by arrow::ResizableBuffer and a doubling approach is +/// used so that resizes will always grow up to the next power of 2 +class ARROW_EXPORT ResizableArrayData { + public: + /// \brief Create an uninitialized instance + /// + /// Init must be called before calling any other operations + ResizableArrayData() + : log_num_rows_min_(0), + pool_(NULLPTR), + num_rows_(0), + num_rows_allocated_(0), + var_len_buf_size_(0) {} + + ~ResizableArrayData() { Clear(true); } + + /// \brief Initialize the array + /// \param data_type The data type this array is holding data for. + /// \param pool The pool to make allocations on + /// \param log_num_rows_min All resize operations will allocate at least enough + /// space for (1 << log_num_rows_min) rows + void Init(const std::shared_ptr& data_type, MemoryPool* pool, + int log_num_rows_min); + + /// \brief Resets the array back to an empty state + /// \param release_buffers If true then allocated memory is released and the + /// next resize operation will have to reallocate memory + void Clear(bool release_buffers); + + /// \brief Resize the fixed length buffers + /// + /// The buffers will be resized to hold at least `num_rows_new` rows of data + Status ResizeFixedLengthBuffers(int num_rows_new); + + /// \brief Resize the varying length buffer if this array is a variable binary type + /// + /// This must be called after offsets have been populated and the buffer will be + /// resized to hold at least as much data as the offsets require + /// + /// Does nothing if the array is not a variable binary type + Status ResizeVaryingLengthBuffer(); + + /// \brief The current length (in rows) of the array + int num_rows() const { return num_rows_; } + + /// \brief A non-owning view into this array + KeyColumnArray column_array() const; + + /// \brief A lightweight descriptor of the data held by this array + Result column_metadata() const { + return ColumnMetadataFromDataType(data_type_); + } + + /// \brief Convert the data to an arrow::ArrayData + /// + /// This is a zero copy operation and the created ArrayData will reference the + /// buffers held by this instance. + std::shared_ptr array_data() const; + + // Constants used for accessing buffers using mutable_data(). + static constexpr int kValidityBuffer = 0; + static constexpr int kFixedLengthBuffer = 1; + static constexpr int kVariableLengthBuffer = 2; + + /// \brief A raw pointer to the requested buffer + /// + /// If i is 0 (kValidityBuffer) then this returns the validity buffer + /// If i is 1 (kFixedLengthBuffer) then this returns the buffer used for values (if this + /// is a fixed length data type) or offsets (if this is a variable binary type) + /// If i is 2 (kVariableLengthBuffer) then this returns the buffer used for variable + /// length binary data + uint8_t* mutable_data(int i) { return buffers_[i]->mutable_data(); } + + private: + static constexpr int64_t kNumPaddingBytes = 64; + int log_num_rows_min_; + std::shared_ptr data_type_; + MemoryPool* pool_; + int num_rows_; + int num_rows_allocated_; + int64_t var_len_buf_size_; + static constexpr int kMaxBuffers = 3; + std::shared_ptr buffers_[kMaxBuffers]; +}; + +/// \brief A builder to concatenate batches of data into a larger batch +/// +/// Will only store num_rows_max() rows +class ARROW_EXPORT ExecBatchBuilder { + public: + /// \brief Add rows from `source` into `target` column + /// + /// If `target` is uninitialized or cleared it will be initialized to use + /// the given pool. + static Status AppendSelected(const std::shared_ptr& source, + ResizableArrayData* target, int num_rows_to_append, + const uint16_t* row_ids, MemoryPool* pool); + + /// \brief Add nulls into `target` column + /// + /// If `target` is uninitialized or cleared it will be initialized to use + /// the given pool. + static Status AppendNulls(const std::shared_ptr& type, + ResizableArrayData& target, int num_rows_to_append, + MemoryPool* pool); + + /// \brief Add selected rows from `batch` + /// + /// If `col_ids` is null then `num_cols` should less than batch.num_values() and + /// the first `num_cols` columns of batch will be appended. + /// + /// All columns in `batch` must have array shape + Status AppendSelected(MemoryPool* pool, const ExecBatch& batch, int num_rows_to_append, + const uint16_t* row_ids, int num_cols, + const int* col_ids = NULLPTR); + + /// \brief Add all-null rows + Status AppendNulls(MemoryPool* pool, + const std::vector>& types, + int num_rows_to_append); + + /// \brief Create an ExecBatch with the data that has been appended so far + /// and clear this builder to be used again + /// + /// Should only be called if num_rows() returns non-zero. + ExecBatch Flush(); + + int num_rows() const { return values_.empty() ? 0 : values_[0].num_rows(); } + + static int num_rows_max() { return 1 << kLogNumRows; } + + private: + static constexpr int kLogNumRows = 15; + + // Calculate how many rows to skip from the tail of the + // sequence of selected rows, such that the total size of skipped rows is at + // least equal to the size specified by the caller. + // + // Skipping of the tail rows + // is used to allow for faster processing by the caller of remaining rows + // without checking buffer bounds (useful with SIMD or fixed size memory loads + // and stores). + // + // The sequence of row_ids provided must be non-decreasing. In case of consecutive rows + // with the same row id, they are skipped all at once because they occupy the same + // space. + // + static int NumRowsToSkip(const std::shared_ptr& column, int num_rows, + const uint16_t* row_ids, int num_tail_bytes_to_skip); + + // The supplied lambda will be called for each row in the given list of rows. + // The arguments given to it will be: + // - index of a row (within the set of selected rows), + // - pointer to the value, + // - byte length of the value. + // + // The information about nulls (validity bitmap) is not used in this call and + // has to be processed separately. + // + template + static void Visit(const std::shared_ptr& column, int num_rows, + const uint16_t* row_ids, PROCESS_VALUE_FN process_value_fn); + + template + static void CollectBitsImp(const uint8_t* input_bits, int64_t input_bits_offset, + uint8_t* output_bits, int64_t output_bits_offset, + int num_rows, const uint16_t* row_ids); + static void CollectBits(const uint8_t* input_bits, int64_t input_bits_offset, + uint8_t* output_bits, int64_t output_bits_offset, int num_rows, + const uint16_t* row_ids); + + std::vector values_; +}; + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h new file mode 100644 index 0000000000000000000000000000000000000000..61caa2b570dd31dc988d34406f9b05c3573333e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/compare.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +enum class SortOrder { + /// Arrange values in increasing order + Ascending, + /// Arrange values in decreasing order + Descending, +}; + +enum class NullPlacement { + /// Place nulls and NaNs before any non-null values. + /// NaNs will come after nulls. + AtStart, + /// Place nulls and NaNs after any non-null values. + /// NaNs will come before nulls. + AtEnd, +}; + +/// \brief One sort key for PartitionNthIndices (TODO) and SortIndices +class ARROW_EXPORT SortKey : public util::EqualityComparable { + public: + explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending) + : target(std::move(target)), order(order) {} + + bool Equals(const SortKey& other) const; + std::string ToString() const; + + /// A FieldRef targeting the sort column. + FieldRef target; + /// How to order by this sort key. + SortOrder order; +}; + +class ARROW_EXPORT Ordering : public util::EqualityComparable { + public: + Ordering(std::vector sort_keys, + NullPlacement null_placement = NullPlacement::AtStart) + : sort_keys_(std::move(sort_keys)), null_placement_(null_placement) {} + /// true if data ordered by other is also ordered by this + /// + /// For example, if data is ordered by [a, b, c] then it is also ordered + /// by [a, b] but not by [b, c] or [a, b, c, d]. + /// + /// [a, b].IsSuborderOf([a, b, c]) - true + /// [a, b, c].IsSuborderOf([a, b, c]) - true + /// [b, c].IsSuborderOf([a, b, c]) - false + /// [a, b, c, d].IsSuborderOf([a, b, c]) - false + /// + /// The implicit ordering is not a suborder of any other ordering and + /// no other ordering is a suborder of it. The implicit ordering is not a + /// suborder of itself. + /// + /// The unordered ordering is a suborder of all other orderings but no + /// other ordering is a suborder of it. The unordered ordering is a suborder + /// of itself. + /// + /// The unordered ordering is a suborder of the implicit ordering. + bool IsSuborderOf(const Ordering& other) const; + + bool Equals(const Ordering& other) const; + std::string ToString() const; + + bool is_implicit() const { return is_implicit_; } + bool is_unordered() const { return !is_implicit_ && sort_keys_.empty(); } + + const std::vector& sort_keys() const { return sort_keys_; } + NullPlacement null_placement() const { return null_placement_; } + + static const Ordering& Implicit() { + static const Ordering kImplicit(true); + return kImplicit; + } + + static const Ordering& Unordered() { + static const Ordering kUnordered(false); + // It is also possible to get an unordered ordering by passing in an empty vector + // using the normal constructor. This is ok and useful when ordering comes from user + // input. + return kUnordered; + } + + private: + explicit Ordering(bool is_implicit) + : null_placement_(NullPlacement::AtStart), is_implicit_(is_implicit) {} + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys_; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement_; + bool is_implicit_ = false; +}; + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h new file mode 100644 index 0000000000000000000000000000000000000000..afd9f2007b6cc868565f8d2ce91ca89c618900bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h @@ -0,0 +1,121 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +class Function; +class FunctionOptionsType; + +/// \brief A mutable central function registry for built-in functions as well +/// as user-defined functions. Functions are implementations of +/// arrow::compute::Function. +/// +/// Generally, each function contains kernels which are implementations of a +/// function for a specific argument signature. After looking up a function in +/// the registry, one can either execute it eagerly with Function::Execute or +/// use one of the function's dispatch methods to pick a suitable kernel for +/// lower-level function execution. +class ARROW_EXPORT FunctionRegistry { + public: + ~FunctionRegistry(); + + /// \brief Construct a new registry. + /// + /// Most users only need to use the global registry. + static std::unique_ptr Make(); + + /// \brief Construct a new nested registry with the given parent. + /// + /// Most users only need to use the global registry. The returned registry never changes + /// its parent, even when an operation allows overwriting. + static std::unique_ptr Make(FunctionRegistry* parent); + + /// \brief Check whether a new function can be added to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status CanAddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Add a new function to the registry. + /// + /// \returns Status::KeyError if a function with the same name is already registered. + Status AddFunction(std::shared_ptr function, bool allow_overwrite = false); + + /// \brief Check whether an alias can be added for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status CanAddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Add alias for the given function name. + /// + /// \returns Status::KeyError if the function with the given name is not registered. + Status AddAlias(const std::string& target_name, const std::string& source_name); + + /// \brief Check whether a new function options type can be added to the registry. + /// + /// \return Status::KeyError if a function options type with the same name is already + /// registered. + Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Add a new function options type to the registry. + /// + /// \returns Status::KeyError if a function options type with the same name is already + /// registered. + Status AddFunctionOptionsType(const FunctionOptionsType* options_type, + bool allow_overwrite = false); + + /// \brief Retrieve a function by name from the registry. + Result> GetFunction(const std::string& name) const; + + /// \brief Return vector of all entry names in the registry. + /// + /// Helpful for displaying a manifest of available functions. + std::vector GetFunctionNames() const; + + /// \brief Retrieve a function options type by name from the registry. + Result GetFunctionOptionsType( + const std::string& name) const; + + /// \brief The number of currently registered functions. + int num_functions() const; + + private: + FunctionRegistry(); + + // Use PIMPL pattern to not have std::unordered_map here + class FunctionRegistryImpl; + std::unique_ptr impl_; + + explicit FunctionRegistry(FunctionRegistryImpl* impl); +}; + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h new file mode 100644 index 0000000000000000000000000000000000000000..628a9c14f3e4402300a3daafdc2308379af5103a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/compute/kernel.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \brief A segment +/// A segment group is a chunk of continuous rows that have the same segment key. (For +/// example, in ordered time series processing, segment key can be "date", and a segment +/// group can be all the rows that belong to the same date.) A segment group can span +/// across multiple exec batches. A segment is a chunk of continuous rows that has the +/// same segment key within a given batch. When a segment group span cross batches, it +/// will have multiple segments. A segment never spans cross batches. The segment data +/// structure only makes sense when used along with a exec batch. +struct ARROW_EXPORT Segment { + /// \brief the offset into the batch where the segment starts + int64_t offset; + /// \brief the length of the segment + int64_t length; + /// \brief whether the segment may be extended by a next one + bool is_open; + /// \brief whether the segment extends a preceeding one + bool extends; +}; + +inline bool operator==(const Segment& segment1, const Segment& segment2) { + return segment1.offset == segment2.offset && segment1.length == segment2.length && + segment1.is_open == segment2.is_open && segment1.extends == segment2.extends; +} +inline bool operator!=(const Segment& segment1, const Segment& segment2) { + return !(segment1 == segment2); +} + +/// \brief a helper class to divide a batch into segments of equal values +/// +/// For example, given a batch with two rows: +/// +/// A A +/// A A +/// A B +/// A B +/// A A +/// +/// Then the batch could be divided into 3 segments. The first would be rows 0 & 1, +/// the second would be rows 2 & 3, and the third would be row 4. +/// +/// Further, a segmenter keeps track of the last value seen. This allows it to calculate +/// segments which span batches. In our above example the last batch we emit would set +/// the "open" flag, which indicates whether the segment may extend into the next batch. +/// +/// If the next call to the segmenter starts with `A A` then that segment would set the +/// "extends" flag, which indicates whether the segment continues the last open batch. +class ARROW_EXPORT RowSegmenter { + public: + virtual ~RowSegmenter() = default; + + /// \brief Construct a Segmenter which segments on the specified key types + /// + /// \param[in] key_types the specified key types + /// \param[in] nullable_keys whether values of the specified keys may be null + /// \param[in] ctx the execution context to use + static Result> Make( + const std::vector& key_types, bool nullable_keys, ExecContext* ctx); + + /// \brief Return the key types of this segmenter + virtual const std::vector& key_types() const = 0; + + /// \brief Reset this segmenter + /// + /// A segmenter normally extends (see `Segment`) a segment from one batch to the next. + /// If segment-extension is undesirable, for example when each batch is processed + /// independently, then `Reset` should be invoked before processing the next batch. + virtual Status Reset() = 0; + + /// \brief Get the next segment for the given batch starting from the given offset + virtual Result GetNextSegment(const ExecSpan& batch, int64_t offset) = 0; +}; + +/// Consumes batches of keys and yields batches of the group ids. +class ARROW_EXPORT Grouper { + public: + virtual ~Grouper() = default; + + /// Construct a Grouper which receives the specified key types + static Result> Make(const std::vector& key_types, + ExecContext* ctx = default_exec_context()); + + /// Consume a batch of keys, producing the corresponding group ids as an integer array, + /// over a slice defined by an offset and length, which defaults to the batch length. + /// Currently only uint32 indices will be produced, eventually the bit width will only + /// be as wide as necessary. + virtual Result Consume(const ExecSpan& batch, int64_t offset = 0, + int64_t length = -1) = 0; + + /// Get current unique keys. May be called multiple times. + virtual Result GetUniques() = 0; + + /// Get the current number of groups. + virtual uint32_t num_groups() const = 0; + + /// \brief Assemble lists of indices of identical elements. + /// + /// \param[in] ids An unsigned, all-valid integral array which will be + /// used as grouping criteria. + /// \param[in] num_groups An upper bound for the elements of ids + /// \param[in] ctx Execution context to use during the operation + /// \return A num_groups-long ListArray where the slot at i contains a + /// list of indices where i appears in ids. + /// + /// MakeGroupings([ + /// 2, + /// 2, + /// 5, + /// 5, + /// 2, + /// 3 + /// ], 8) == [ + /// [], + /// [], + /// [0, 1, 4], + /// [5], + /// [], + /// [2, 3], + /// [], + /// [] + /// ] + static Result> MakeGroupings( + const UInt32Array& ids, uint32_t num_groups, + ExecContext* ctx = default_exec_context()); + + /// \brief Produce a ListArray whose slots are selections of `array` which correspond to + /// the provided groupings. + /// + /// For example, + /// ApplyGroupings([ + /// [], + /// [], + /// [0, 1, 4], + /// [5], + /// [], + /// [2, 3], + /// [], + /// [] + /// ], [2, 2, 5, 5, 2, 3]) == [ + /// [], + /// [], + /// [2, 2, 2], + /// [3], + /// [], + /// [5, 5], + /// [], + /// [] + /// ] + static Result> ApplyGroupings( + const ListArray& groupings, const Array& array, + ExecContext* ctx = default_exec_context()); +}; + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..89f32ceb0f906e0d50bf063da22f33c3a856fe5d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { + +struct Datum; +struct TypeHolder; + +namespace compute { + +class Function; +class ScalarAggregateFunction; +class FunctionExecutor; +class FunctionOptions; +class FunctionRegistry; + +/// \brief Return the process-global function registry. +// Defined in registry.cc +ARROW_EXPORT FunctionRegistry* GetFunctionRegistry(); + +class CastOptions; + +struct ExecBatch; +class ExecContext; +class KernelContext; + +struct Kernel; +struct ScalarKernel; +struct ScalarAggregateKernel; +struct VectorKernel; + +struct KernelState; + +class Expression; + +ARROW_EXPORT ExecContext* default_exec_context(); +ARROW_EXPORT ExecContext* threaded_exec_context(); + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h new file mode 100644 index 0000000000000000000000000000000000000000..e2a2e71b8521dfa055a2010e4cfd2e151cdc6090 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h @@ -0,0 +1,290 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/mutex.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/type_fwd.h" + +#if defined(__clang__) || defined(__GNUC__) +#define BYTESWAP(x) __builtin_bswap64(x) +#define ROTL(x, n) (((x) << (n)) | ((x) >> ((-n) & 31))) +#define ROTL64(x, n) (((x) << (n)) | ((x) >> ((-n) & 63))) +#define PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +#elif defined(_MSC_VER) +#include +#define BYTESWAP(x) _byteswap_uint64(x) +#define ROTL(x, n) _rotl((x), (n)) +#define ROTL64(x, n) _rotl64((x), (n)) +#if defined(_M_X64) || defined(_M_I86) +#include // https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx +#define PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +#else +#define PREFETCH(ptr) (void)(ptr) /* disabled */ +#endif +#endif + +namespace arrow { +namespace util { + +// Some platforms typedef int64_t as long int instead of long long int, +// which breaks the _mm256_i64gather_epi64 and _mm256_i32gather_epi64 intrinsics +// which need long long. +// We use the cast to the type below in these intrinsics to make the code +// compile in all cases. +// +using int64_for_gather_t = const long long int; // NOLINT runtime-int + +// All MiniBatch... classes use TempVectorStack for vector allocations and can +// only work with vectors up to 1024 elements. +// +// They should only be allocated on the stack to guarantee the right sequence +// of allocation and deallocation of vectors from TempVectorStack. +// +class MiniBatch { + public: + static constexpr int kLogMiniBatchLength = 10; + static constexpr int kMiniBatchLength = 1 << kLogMiniBatchLength; +}; + +/// Storage used to allocate temporary vectors of a batch size. +/// Temporary vectors should resemble allocating temporary variables on the stack +/// but in the context of vectorized processing where we need to store a vector of +/// temporaries instead of a single value. +class ARROW_EXPORT TempVectorStack { + template + friend class TempVectorHolder; + + public: + Status Init(MemoryPool* pool, int64_t size) { + num_vectors_ = 0; + top_ = 0; + buffer_size_ = PaddedAllocationSize(size) + kPadding + 2 * sizeof(uint64_t); + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateResizableBuffer(size, pool)); + // Ensure later operations don't accidentally read uninitialized memory. + std::memset(buffer->mutable_data(), 0xFF, size); + buffer_ = std::move(buffer); + return Status::OK(); + } + + private: + int64_t PaddedAllocationSize(int64_t num_bytes) { + // Round up allocation size to multiple of 8 bytes + // to avoid returning temp vectors with unaligned address. + // + // Also add padding at the end to facilitate loads and stores + // using SIMD when number of vector elements is not divisible + // by the number of SIMD lanes. + // + return ::arrow::bit_util::RoundUp(num_bytes, sizeof(int64_t)) + kPadding; + } + void alloc(uint32_t num_bytes, uint8_t** data, int* id); + void release(int id, uint32_t num_bytes); + static constexpr uint64_t kGuard1 = 0x3141592653589793ULL; + static constexpr uint64_t kGuard2 = 0x0577215664901532ULL; + static constexpr int64_t kPadding = 64; + int num_vectors_; + int64_t top_; + std::unique_ptr buffer_; + int64_t buffer_size_; +}; + +template +class TempVectorHolder { + friend class TempVectorStack; + + public: + ~TempVectorHolder() { stack_->release(id_, num_elements_ * sizeof(T)); } + T* mutable_data() { return reinterpret_cast(data_); } + TempVectorHolder(TempVectorStack* stack, uint32_t num_elements) { + stack_ = stack; + num_elements_ = num_elements; + stack_->alloc(num_elements * sizeof(T), &data_, &id_); + } + + private: + TempVectorStack* stack_; + uint8_t* data_; + int id_; + uint32_t num_elements_; +}; + +namespace bit_util { + +ARROW_EXPORT void bits_to_indexes(int bit_to_search, int64_t hardware_flags, + const int num_bits, const uint8_t* bits, + int* num_indexes, uint16_t* indexes, + int bit_offset = 0); + +ARROW_EXPORT void bits_filter_indexes(int bit_to_search, int64_t hardware_flags, + const int num_bits, const uint8_t* bits, + const uint16_t* input_indexes, int* num_indexes, + uint16_t* indexes, int bit_offset = 0); + +// Input and output indexes may be pointing to the same data (in-place filtering). +ARROW_EXPORT void bits_split_indexes(int64_t hardware_flags, const int num_bits, + const uint8_t* bits, int* num_indexes_bit0, + uint16_t* indexes_bit0, uint16_t* indexes_bit1, + int bit_offset = 0); + +// Bit 1 is replaced with byte 0xFF. +ARROW_EXPORT void bits_to_bytes(int64_t hardware_flags, const int num_bits, + const uint8_t* bits, uint8_t* bytes, int bit_offset = 0); + +// Return highest bit of each byte. +ARROW_EXPORT void bytes_to_bits(int64_t hardware_flags, const int num_bits, + const uint8_t* bytes, uint8_t* bits, int bit_offset = 0); + +ARROW_EXPORT bool are_all_bytes_zero(int64_t hardware_flags, const uint8_t* bytes, + uint32_t num_bytes); + +#if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2) +// The functions below use BMI2 instructions, be careful before calling! + +namespace avx2 { +ARROW_EXPORT void bits_filter_indexes_avx2(int bit_to_search, const int num_bits, + const uint8_t* bits, + const uint16_t* input_indexes, + int* num_indexes, uint16_t* indexes); +ARROW_EXPORT void bits_to_indexes_avx2(int bit_to_search, const int num_bits, + const uint8_t* bits, int* num_indexes, + uint16_t* indexes, uint16_t base_index = 0); +ARROW_EXPORT void bits_to_bytes_avx2(const int num_bits, const uint8_t* bits, + uint8_t* bytes); +ARROW_EXPORT void bytes_to_bits_avx2(const int num_bits, const uint8_t* bytes, + uint8_t* bits); +ARROW_EXPORT bool are_all_bytes_zero_avx2(const uint8_t* bytes, uint32_t num_bytes); +} // namespace avx2 + +#endif + +} // namespace bit_util +} // namespace util + +namespace compute { + +/// Modify an Expression with pre-order and post-order visitation. +/// `pre` will be invoked on each Expression. `pre` will visit Calls before their +/// arguments, `post_call` will visit Calls (and no other Expressions) after their +/// arguments. Visitors should return the Identical expression to indicate no change; this +/// will prevent unnecessary construction in the common case where a modification is not +/// possible/necessary/... +/// +/// If an argument was modified, `post_call` visits a reconstructed Call with the modified +/// arguments but also receives a pointer to the unmodified Expression as a second +/// argument. If no arguments were modified the unmodified Expression* will be nullptr. +template +Result ModifyExpression(Expression expr, const PreVisit& pre, + const PostVisitCall& post_call) { + ARROW_ASSIGN_OR_RAISE(expr, Result(pre(std::move(expr)))); + + auto call = expr.call(); + if (!call) return expr; + + bool at_least_one_modified = false; + std::vector modified_arguments; + + for (size_t i = 0; i < call->arguments.size(); ++i) { + ARROW_ASSIGN_OR_RAISE(auto modified_argument, + ModifyExpression(call->arguments[i], pre, post_call)); + + if (Identical(modified_argument, call->arguments[i])) { + continue; + } + + if (!at_least_one_modified) { + modified_arguments = call->arguments; + at_least_one_modified = true; + } + + modified_arguments[i] = std::move(modified_argument); + } + + if (at_least_one_modified) { + // reconstruct the call expression with the modified arguments + auto modified_call = *call; + modified_call.arguments = std::move(modified_arguments); + return post_call(Expression(std::move(modified_call)), &expr); + } + + return post_call(std::move(expr), NULLPTR); +} + +// Helper class to calculate the modified number of rows to process using SIMD. +// +// Some array elements at the end will be skipped in order to avoid buffer +// overrun, when doing memory loads and stores using larger word size than a +// single array element. +// +class TailSkipForSIMD { + public: + static int64_t FixBitAccess(int num_bytes_accessed_together, int64_t num_rows, + int bit_offset) { + int64_t num_bytes = bit_util::BytesForBits(num_rows + bit_offset); + int64_t num_bytes_safe = + std::max(static_cast(0LL), num_bytes - num_bytes_accessed_together + 1); + int64_t num_rows_safe = + std::max(static_cast(0LL), 8 * num_bytes_safe - bit_offset); + return std::min(num_rows_safe, num_rows); + } + static int64_t FixBinaryAccess(int num_bytes_accessed_together, int64_t num_rows, + int64_t length) { + int64_t num_rows_to_skip = bit_util::CeilDiv(length, num_bytes_accessed_together); + int64_t num_rows_safe = + std::max(static_cast(0LL), num_rows - num_rows_to_skip); + return num_rows_safe; + } + static int64_t FixVarBinaryAccess(int num_bytes_accessed_together, int64_t num_rows, + const uint32_t* offsets) { + // Do not process rows that could read past the end of the buffer using N + // byte loads/stores. + // + int64_t num_rows_safe = num_rows; + while (num_rows_safe > 0 && + offsets[num_rows_safe] + num_bytes_accessed_together > offsets[num_rows]) { + --num_rows_safe; + } + return num_rows_safe; + } + static int FixSelection(int64_t num_rows_safe, int num_selected, + const uint16_t* selection) { + int num_selected_safe = num_selected; + while (num_selected_safe > 0 && selection[num_selected_safe - 1] >= num_rows_safe) { + --num_selected_safe; + } + return num_selected_safe; + } +}; + +} // namespace compute +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h new file mode 100644 index 0000000000000000000000000000000000000000..6d76dcef727e7643ba559d8802665755a4f8a870 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h @@ -0,0 +1,275 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/// Logic for automatically determining the structure of multi-file +/// dataset with possible partitioning according to available +/// partitioning + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/dataset/partition.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/dataset/visibility.h" +#include "arrow/filesystem/type_fwd.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace dataset { + +/// \defgroup dataset-discovery Discovery API +/// +/// @{ + +struct InspectOptions { + /// See `fragments` property. + static constexpr int kInspectAllFragments = -1; + + /// Indicate how many fragments should be inspected to infer the unified dataset + /// schema. Limiting the number of fragments accessed improves the latency of + /// the discovery process when dealing with a high number of fragments and/or + /// high latency file systems. + /// + /// The default value of `1` inspects the schema of the first (in no particular + /// order) fragment only. If the dataset has a uniform schema for all fragments, + /// this default is the optimal value. In order to inspect all fragments and + /// robustly unify their potentially varying schemas, set this option to + /// `kInspectAllFragments`. A value of `0` disables inspection of fragments + /// altogether so only the partitioning schema will be inspected. + int fragments = 1; + + /// Control how to unify types. By default, types are merged strictly (the + /// type must match exactly, except nulls can be merged with other types). + Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults(); +}; + +struct FinishOptions { + /// Finalize the dataset with this given schema. If the schema is not + /// provided, infer the schema via the Inspect, see the `inspect_options` + /// property. + std::shared_ptr schema = NULLPTR; + + /// If the schema is not provided, it will be discovered by passing the + /// following options to `DatasetDiscovery::Inspect`. + InspectOptions inspect_options{}; + + /// Indicate if the given Schema (when specified), should be validated against + /// the fragments' schemas. `inspect_options` will control how many fragments + /// are checked. + bool validate_fragments = false; +}; + +/// \brief DatasetFactory provides a way to inspect/discover a Dataset's expected +/// schema before materializing said Dataset. +class ARROW_DS_EXPORT DatasetFactory { + public: + /// \brief Get the schemas of the Fragments and Partitioning. + virtual Result>> InspectSchemas( + InspectOptions options) = 0; + + /// \brief Get unified schema for the resulting Dataset. + Result> Inspect(InspectOptions options = {}); + + /// \brief Create a Dataset + Result> Finish(); + /// \brief Create a Dataset with the given schema (see \a InspectOptions::schema) + Result> Finish(std::shared_ptr schema); + /// \brief Create a Dataset with the given options + virtual Result> Finish(FinishOptions options) = 0; + + /// \brief Optional root partition for the resulting Dataset. + const compute::Expression& root_partition() const { return root_partition_; } + /// \brief Set the root partition for the resulting Dataset. + Status SetRootPartition(compute::Expression partition) { + root_partition_ = std::move(partition); + return Status::OK(); + } + + virtual ~DatasetFactory() = default; + + protected: + DatasetFactory(); + + compute::Expression root_partition_; +}; + +/// @} + +/// \brief DatasetFactory provides a way to inspect/discover a Dataset's +/// expected schema before materialization. +/// \ingroup dataset-implementations +class ARROW_DS_EXPORT UnionDatasetFactory : public DatasetFactory { + public: + static Result> Make( + std::vector> factories); + + /// \brief Return the list of child DatasetFactory + const std::vector>& factories() const { + return factories_; + } + + /// \brief Get the schemas of the Datasets. + /// + /// Instead of applying options globally, it applies at each child factory. + /// This will not respect `options.fragments` exactly, but will respect the + /// spirit of peeking the first fragments or all of them. + Result>> InspectSchemas( + InspectOptions options) override; + + /// \brief Create a Dataset. + Result> Finish(FinishOptions options) override; + + protected: + explicit UnionDatasetFactory(std::vector> factories); + + std::vector> factories_; +}; + +/// \ingroup dataset-filesystem +struct FileSystemFactoryOptions { + /// Either an explicit Partitioning or a PartitioningFactory to discover one. + /// + /// If a factory is provided, it will be used to infer a schema for partition fields + /// based on file and directory paths then construct a Partitioning. The default + /// is a Partitioning which will yield no partition information. + /// + /// The (explicit or discovered) partitioning will be applied to discovered files + /// and the resulting partition information embedded in the Dataset. + PartitioningOrFactory partitioning{Partitioning::Default()}; + + /// For the purposes of applying the partitioning, paths will be stripped + /// of the partition_base_dir. Files not matching the partition_base_dir + /// prefix will be skipped for partition discovery. The ignored files will still + /// be part of the Dataset, but will not have partition information. + /// + /// Example: + /// partition_base_dir = "/dataset"; + /// + /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning + /// + /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery. + /// + /// This is useful for partitioning which parses directory when ordering + /// is important, e.g. DirectoryPartitioning. + std::string partition_base_dir; + + /// Invalid files (via selector or explicitly) will be excluded by checking + /// with the FileFormat::IsSupported method. This will incur IO for each files + /// in a serial and single threaded fashion. Disabling this feature will skip the + /// IO, but unsupported files may be present in the Dataset + /// (resulting in an error at scan time). + bool exclude_invalid_files = false; + + /// When discovering from a Selector (and not from an explicit file list), ignore + /// files and directories matching any of these prefixes. + /// + /// Example (with selector = "/dataset/**"): + /// selector_ignore_prefixes = {"_", ".DS_STORE" }; + /// + /// - "/dataset/data.csv" -> not ignored + /// - "/dataset/_metadata" -> ignored + /// - "/dataset/.DS_STORE" -> ignored + /// - "/dataset/_hidden/dat" -> ignored + /// - "/dataset/nested/.DS_STORE" -> ignored + std::vector selector_ignore_prefixes = { + ".", + "_", + }; +}; + +/// \brief FileSystemDatasetFactory creates a Dataset from a vector of +/// fs::FileInfo or a fs::FileSelector. +/// \ingroup dataset-filesystem +class ARROW_DS_EXPORT FileSystemDatasetFactory : public DatasetFactory { + public: + /// \brief Build a FileSystemDatasetFactory from an explicit list of + /// paths. + /// + /// \param[in] filesystem passed to FileSystemDataset + /// \param[in] paths passed to FileSystemDataset + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make( + std::shared_ptr filesystem, const std::vector& paths, + std::shared_ptr format, FileSystemFactoryOptions options); + + /// \brief Build a FileSystemDatasetFactory from a fs::FileSelector. + /// + /// The selector will expand to a vector of FileInfo. The expansion/crawling + /// is performed in this function call. Thus, the finalized Dataset is + /// working with a snapshot of the filesystem. + // + /// If options.partition_base_dir is not provided, it will be overwritten + /// with selector.base_dir. + /// + /// \param[in] filesystem passed to FileSystemDataset + /// \param[in] selector used to crawl and search files + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make( + std::shared_ptr filesystem, fs::FileSelector selector, + std::shared_ptr format, FileSystemFactoryOptions options); + + /// \brief Build a FileSystemDatasetFactory from an uri including filesystem + /// information. + /// + /// \param[in] uri passed to FileSystemDataset + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make(std::string uri, + std::shared_ptr format, + FileSystemFactoryOptions options); + + /// \brief Build a FileSystemDatasetFactory from an explicit list of + /// file information. + /// + /// \param[in] filesystem passed to FileSystemDataset + /// \param[in] files passed to FileSystemDataset + /// \param[in] format passed to FileSystemDataset + /// \param[in] options see FileSystemFactoryOptions for more information. + static Result> Make( + std::shared_ptr filesystem, const std::vector& files, + std::shared_ptr format, FileSystemFactoryOptions options); + + Result>> InspectSchemas( + InspectOptions options) override; + + Result> Finish(FinishOptions options) override; + + protected: + FileSystemDatasetFactory(std::vector files, + std::shared_ptr filesystem, + std::shared_ptr format, + FileSystemFactoryOptions options); + + Result> PartitionSchema(); + + std::vector files_; + std::shared_ptr fs_; + std::shared_ptr format_; + FileSystemFactoryOptions options_; +}; + +} // namespace dataset +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h new file mode 100644 index 0000000000000000000000000000000000000000..732be5f928f58c9c2e3291eeef155c73bc2eda8e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/config.h" // IWYU pragma: export + +#include "arrow/filesystem/filesystem.h" // IWYU pragma: export +#include "arrow/filesystem/hdfs.h" // IWYU pragma: export +#ifdef ARROW_GCS +#include "arrow/filesystem/gcsfs.h" // IWYU pragma: export +#endif +#include "arrow/filesystem/localfs.h" // IWYU pragma: export +#include "arrow/filesystem/mockfs.h" // IWYU pragma: export +#ifdef ARROW_S3 +#include "arrow/filesystem/s3fs.h" // IWYU pragma: export +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h new file mode 100644 index 0000000000000000000000000000000000000000..55f89ba4776e20f68253dcb7a81a3823fe775941 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h @@ -0,0 +1,237 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/macros.h" +#include "arrow/util/uri.h" + +namespace Azure::Core::Credentials { +class TokenCredential; +} + +namespace Azure::Storage { +class StorageSharedKeyCredential; +} + +namespace Azure::Storage::Blobs { +class BlobServiceClient; +} + +namespace Azure::Storage::Files::DataLake { +class DataLakeFileSystemClient; +class DataLakeServiceClient; +} // namespace Azure::Storage::Files::DataLake + +namespace arrow::fs { + +class TestAzureFileSystem; + +/// Options for the AzureFileSystem implementation. +/// +/// By default, authentication is handled by the Azure SDK's credential chain +/// which may read from multiple environment variables, such as: +/// - `AZURE_TENANT_ID` +/// - `AZURE_CLIENT_ID` +/// - `AZURE_CLIENT_SECRET` +/// - `AZURE_AUTHORITY_HOST` +/// - `AZURE_CLIENT_CERTIFICATE_PATH` +/// - `AZURE_FEDERATED_TOKEN_FILE` +/// +/// Functions are provided for explicit configuration of credentials if that is preferred. +struct ARROW_EXPORT AzureOptions { + /// \brief The name of the Azure Storage Account being accessed. + /// + /// All service URLs will be constructed using this storage account name. + /// `ConfigureAccountKeyCredential` assumes the user wants to authenticate + /// this account. + std::string account_name; + + /// \brief hostname[:port] of the Azure Blob Storage Service. + /// + /// If the hostname is a relative domain name (one that starts with a '.'), then storage + /// account URLs will be constructed by prepending the account name to the hostname. + /// If the hostname is a fully qualified domain name, then the hostname will be used + /// as-is and the account name will follow the hostname in the URL path. + /// + /// Default: ".blob.core.windows.net" + std::string blob_storage_authority = ".blob.core.windows.net"; + + /// \brief hostname[:port] of the Azure Data Lake Storage Gen 2 Service. + /// + /// If the hostname is a relative domain name (one that starts with a '.'), then storage + /// account URLs will be constructed by prepending the account name to the hostname. + /// If the hostname is a fully qualified domain name, then the hostname will be used + /// as-is and the account name will follow the hostname in the URL path. + /// + /// Default: ".dfs.core.windows.net" + std::string dfs_storage_authority = ".dfs.core.windows.net"; + + /// \brief Azure Blob Storage connection transport. + /// + /// Default: "https" + std::string blob_storage_scheme = "https"; + + /// \brief Azure Data Lake Storage Gen 2 connection transport. + /// + /// Default: "https" + std::string dfs_storage_scheme = "https"; + + // TODO(GH-38598): Add support for more auth methods. + // std::string connection_string; + // std::string sas_token; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + private: + enum class CredentialKind { + kDefault, + kAnonymous, + kStorageSharedKey, + kClientSecret, + kManagedIdentity, + kWorkloadIdentity, + } credential_kind_ = CredentialKind::kDefault; + + std::shared_ptr + storage_shared_key_credential_; + mutable std::shared_ptr token_credential_; + + public: + AzureOptions(); + ~AzureOptions(); + + Status ConfigureDefaultCredential(); + Status ConfigureAnonymousCredential(); + Status ConfigureAccountKeyCredential(const std::string& account_key); + Status ConfigureClientSecretCredential(const std::string& tenant_id, + const std::string& client_id, + const std::string& client_secret); + Status ConfigureManagedIdentityCredential(const std::string& client_id = std::string()); + Status ConfigureWorkloadIdentityCredential(); + + bool Equals(const AzureOptions& other) const; + + std::string AccountBlobUrl(const std::string& account_name) const; + std::string AccountDfsUrl(const std::string& account_name) const; + + Result> + MakeBlobServiceClient() const; + + Result> + MakeDataLakeServiceClient() const; +}; + +/// \brief FileSystem implementation backed by Azure Blob Storage (ABS) [1] and +/// Azure Data Lake Storage Gen2 (ADLS Gen2) [2]. +/// +/// ADLS Gen2 isn't a dedicated service or account type. It's a set of capabilities that +/// support high throughput analytic workloads, built on Azure Blob Storage. All the data +/// ingested via the ADLS Gen2 APIs is persisted as blobs in the storage account. +/// ADLS Gen2 provides filesystem semantics, file-level security, and Hadoop +/// compatibility. ADLS Gen1 exists as a separate object that will retired on 2024-02-29 +/// and new ADLS accounts use Gen2 instead. +/// +/// ADLS Gen2 and Blob APIs can operate on the same data, but there are +/// some limitations [3]. The ones that are relevant to this +/// implementation are listed here: +/// +/// - You can't use Blob APIs, and ADLS APIs to write to the same instance of a file. If +/// you write to a file by using ADLS APIs then that file's blocks won't be visible +/// to calls to the GetBlockList Blob API. The only exception is when you're +/// overwriting. +/// - When you use the ListBlobs operation without specifying a delimiter, the results +/// include both directories and blobs. If you choose to use a delimiter, use only a +/// forward slash (/) -- the only supported delimiter. +/// - If you use the DeleteBlob API to delete a directory, that directory is deleted only +/// if it's empty. This means that you can't use the Blob API delete directories +/// recursively. +/// +/// [1]: https://azure.microsoft.com/en-us/products/storage/blobs +/// [2]: https://azure.microsoft.com/en-us/products/storage/data-lake-storage +/// [3]: +/// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-known-issues +class ARROW_EXPORT AzureFileSystem : public FileSystem { + private: + class Impl; + std::unique_ptr impl_; + + explicit AzureFileSystem(std::unique_ptr&& impl); + + friend class TestAzureFileSystem; + void ForceCachedHierarchicalNamespaceSupport(int hns_support); + + public: + ~AzureFileSystem() override = default; + + static Result> Make( + const AzureOptions& options, const io::IOContext& = io::default_io_context()); + + std::string type_name() const override { return "abfs"; } + + /// Return the original Azure options when constructing the filesystem + const AzureOptions& options() const; + + bool Equals(const FileSystem& other) const override; + + Result GetFileInfo(const std::string& path) override; + + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + + Result> OpenInputStream(const FileInfo& info) override; + + Result> OpenInputFile( + const std::string& path) override; + + Result> OpenInputFile( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; +}; + +} // namespace arrow::fs diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h new file mode 100644 index 0000000000000000000000000000000000000000..559f1335f12c13aba2761b0b46e867cfce5cebdf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h @@ -0,0 +1,565 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/filesystem/type_fwd.h" +#include "arrow/io/interfaces.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow { +namespace fs { + +// A system clock time point expressed as a 64-bit (or more) number of +// nanoseconds since the epoch. +using TimePoint = + std::chrono::time_point; + +ARROW_EXPORT std::string ToString(FileType); + +ARROW_EXPORT std::ostream& operator<<(std::ostream& os, FileType); + +static const int64_t kNoSize = -1; +static const TimePoint kNoTime = TimePoint(TimePoint::duration(-1)); + +/// \brief FileSystem entry info +struct ARROW_EXPORT FileInfo : public util::EqualityComparable { + FileInfo() = default; + FileInfo(FileInfo&&) = default; + FileInfo& operator=(FileInfo&&) = default; + FileInfo(const FileInfo&) = default; + FileInfo& operator=(const FileInfo&) = default; + + explicit FileInfo(std::string path, FileType type = FileType::Unknown) + : path_(std::move(path)), type_(type) {} + + /// The file type + FileType type() const { return type_; } + void set_type(FileType type) { type_ = type; } + + /// The full file path in the filesystem + const std::string& path() const { return path_; } + void set_path(std::string path) { path_ = std::move(path); } + + /// The file base name (component after the last directory separator) + std::string base_name() const; + + // The directory base name (component before the file base name). + std::string dir_name() const; + + /// The size in bytes, if available + /// + /// Only regular files are guaranteed to have a size. + int64_t size() const { return size_; } + void set_size(int64_t size) { size_ = size; } + + /// The file extension (excluding the dot) + std::string extension() const; + + /// The time of last modification, if available + TimePoint mtime() const { return mtime_; } + void set_mtime(TimePoint mtime) { mtime_ = mtime; } + + bool IsFile() const { return type_ == FileType::File; } + bool IsDirectory() const { return type_ == FileType::Directory; } + + bool Equals(const FileInfo& other) const { + return type() == other.type() && path() == other.path() && size() == other.size() && + mtime() == other.mtime(); + } + + std::string ToString() const; + + /// Function object implementing less-than comparison and hashing by + /// path, to support sorting infos, using them as keys, and other + /// interactions with the STL. + struct ByPath { + bool operator()(const FileInfo& l, const FileInfo& r) const { + return l.path() < r.path(); + } + + size_t operator()(const FileInfo& i) const { + return std::hash{}(i.path()); + } + }; + + protected: + std::string path_; + FileType type_ = FileType::Unknown; + int64_t size_ = kNoSize; + TimePoint mtime_ = kNoTime; +}; + +ARROW_EXPORT std::ostream& operator<<(std::ostream& os, const FileInfo&); + +/// \brief File selector for filesystem APIs +struct ARROW_EXPORT FileSelector { + /// The directory in which to select files. + /// If the path exists but doesn't point to a directory, this should be an error. + std::string base_dir; + /// The behavior if `base_dir` isn't found in the filesystem. If false, + /// an error is returned. If true, an empty selection is returned. + bool allow_not_found; + /// Whether to recurse into subdirectories. + bool recursive; + /// The maximum number of subdirectories to recurse into. + int32_t max_recursion; + + FileSelector() : allow_not_found(false), recursive(false), max_recursion(INT32_MAX) {} +}; + +/// \brief FileSystem, path pair +struct ARROW_EXPORT FileLocator { + std::shared_ptr filesystem; + std::string path; +}; + +using FileInfoVector = std::vector; +using FileInfoGenerator = std::function()>; + +} // namespace fs + +template <> +struct IterationTraits { + static fs::FileInfoVector End() { return {}; } + static bool IsEnd(const fs::FileInfoVector& val) { return val.empty(); } +}; + +namespace fs { + +/// \brief Abstract file system API +class ARROW_EXPORT FileSystem : public std::enable_shared_from_this { + public: + virtual ~FileSystem(); + + virtual std::string type_name() const = 0; + + /// EXPERIMENTAL: The IOContext associated with this filesystem. + const io::IOContext& io_context() const { return io_context_; } + + /// Normalize path for the given filesystem + /// + /// The default implementation of this method is a no-op, but subclasses + /// may allow normalizing irregular path forms (such as Windows local paths). + virtual Result NormalizePath(std::string path); + + /// \brief Ensure a URI (or path) is compatible with the given filesystem and return the + /// path + /// + /// \param uri_string A URI representing a resource in the given filesystem. + /// + /// This method will check to ensure the given filesystem is compatible with the + /// URI. This can be useful when the user provides both a URI and a filesystem or + /// when a user provides multiple URIs that should be compatible with the same + /// filesystem. + /// + /// uri_string can be an absolute path instead of a URI. In that case it will ensure + /// the filesystem (if supplied) is the local filesystem (or some custom filesystem that + /// is capable of reading local paths) and will normalize the path's file separators. + /// + /// Note, this method only checks to ensure the URI scheme is valid. It will not detect + /// inconsistencies like a mismatching region or endpoint override. + /// + /// \return The path inside the filesystem that is indicated by the URI. + virtual Result PathFromUri(const std::string& uri_string) const; + + virtual bool Equals(const FileSystem& other) const = 0; + + virtual bool Equals(const std::shared_ptr& other) const { + return Equals(*other); + } + + /// Get info for the given target. + /// + /// Any symlink is automatically dereferenced, recursively. + /// A nonexistent or unreachable file returns an Ok status and + /// has a FileType of value NotFound. An error status indicates + /// a truly exceptional condition (low-level I/O error, etc.). + virtual Result GetFileInfo(const std::string& path) = 0; + /// Same, for many targets at once. + virtual Result GetFileInfo(const std::vector& paths); + /// Same, according to a selector. + /// + /// The selector's base directory will not be part of the results, even if + /// it exists. + /// If it doesn't exist, see `FileSelector::allow_not_found`. + virtual Result GetFileInfo(const FileSelector& select) = 0; + + /// Async version of GetFileInfo + virtual Future GetFileInfoAsync(const std::vector& paths); + + /// Streaming async version of GetFileInfo + /// + /// The returned generator is not async-reentrant, i.e. you need to wait for + /// the returned future to complete before calling the generator again. + virtual FileInfoGenerator GetFileInfoGenerator(const FileSelector& select); + + /// Create a directory and subdirectories. + /// + /// This function succeeds if the directory already exists. + virtual Status CreateDir(const std::string& path, bool recursive = true) = 0; + + /// Delete a directory and its contents, recursively. + virtual Status DeleteDir(const std::string& path) = 0; + + /// Delete a directory's contents, recursively. + /// + /// Like DeleteDir, but doesn't delete the directory itself. + /// Passing an empty path ("" or "/") is disallowed, see DeleteRootDirContents. + virtual Status DeleteDirContents(const std::string& path, + bool missing_dir_ok = false) = 0; + + /// Async version of DeleteDirContents. + virtual Future<> DeleteDirContentsAsync(const std::string& path, + bool missing_dir_ok = false); + + /// EXPERIMENTAL: Delete the root directory's contents, recursively. + /// + /// Implementations may decide to raise an error if this operation is + /// too dangerous. + // NOTE: may decide to remove this if it's deemed not useful + virtual Status DeleteRootDirContents() = 0; + + /// Delete a file. + virtual Status DeleteFile(const std::string& path) = 0; + /// Delete many files. + /// + /// The default implementation issues individual delete operations in sequence. + virtual Status DeleteFiles(const std::vector& paths); + + /// Move / rename a file or directory. + /// + /// If the destination exists: + /// - if it is a non-empty directory, an error is returned + /// - otherwise, if it has the same type as the source, it is replaced + /// - otherwise, behavior is unspecified (implementation-dependent). + virtual Status Move(const std::string& src, const std::string& dest) = 0; + + /// Copy a file. + /// + /// If the destination exists and is a directory, an error is returned. + /// Otherwise, it is replaced. + virtual Status CopyFile(const std::string& src, const std::string& dest) = 0; + + /// Open an input stream for sequential reading. + virtual Result> OpenInputStream( + const std::string& path) = 0; + /// Open an input stream for sequential reading. + /// + /// This override assumes the given FileInfo validly represents the file's + /// characteristics, and may optimize access depending on them (for example + /// avoid querying the file size or its existence). + virtual Result> OpenInputStream(const FileInfo& info); + + /// Open an input file for random access reading. + virtual Result> OpenInputFile( + const std::string& path) = 0; + /// Open an input file for random access reading. + /// + /// This override assumes the given FileInfo validly represents the file's + /// characteristics, and may optimize access depending on them (for example + /// avoid querying the file size or its existence). + virtual Result> OpenInputFile( + const FileInfo& info); + + /// Async version of OpenInputStream + virtual Future> OpenInputStreamAsync( + const std::string& path); + /// Async version of OpenInputStream + virtual Future> OpenInputStreamAsync( + const FileInfo& info); + + /// Async version of OpenInputFile + virtual Future> OpenInputFileAsync( + const std::string& path); + /// Async version of OpenInputFile + virtual Future> OpenInputFileAsync( + const FileInfo& info); + + /// Open an output stream for sequential writing. + /// + /// If the target already exists, existing data is truncated. + virtual Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) = 0; + Result> OpenOutputStream(const std::string& path); + + /// Open an output stream for appending. + /// + /// If the target doesn't exist, a new empty file is created. + /// + /// Note: some filesystem implementations do not support efficient appending + /// to an existing file, in which case this method will return NotImplemented. + /// Consider writing to multiple files (using e.g. the dataset layer) instead. + virtual Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) = 0; + Result> OpenAppendStream(const std::string& path); + + protected: + explicit FileSystem(io::IOContext io_context = io::default_io_context()) + : io_context_(std::move(io_context)) {} + + io::IOContext io_context_; + // Whether metadata operations (such as GetFileInfo or OpenInputStream) + // are cheap enough that the default async variants don't bother with + // a thread pool. + bool default_async_is_sync_ = true; +}; + +/// \brief A FileSystem implementation that delegates to another +/// implementation after prepending a fixed base path. +/// +/// This is useful to expose a logical view of a subtree of a filesystem, +/// for example a directory in a LocalFileSystem. +/// This works on abstract paths, i.e. paths using forward slashes and +/// and a single root "/". Windows paths are not guaranteed to work. +/// This makes no security guarantee. For example, symlinks may allow to +/// "escape" the subtree and access other parts of the underlying filesystem. +class ARROW_EXPORT SubTreeFileSystem : public FileSystem { + public: + // This constructor may abort if base_path is invalid. + explicit SubTreeFileSystem(const std::string& base_path, + std::shared_ptr base_fs); + ~SubTreeFileSystem() override; + + std::string type_name() const override { return "subtree"; } + std::string base_path() const { return base_path_; } + std::shared_ptr base_fs() const { return base_fs_; } + + Result NormalizePath(std::string path) override; + Result PathFromUri(const std::string& uri_string) const override; + + bool Equals(const FileSystem& other) const override; + + /// \cond FALSE + using FileSystem::GetFileInfo; + /// \endcond + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + + Future> OpenInputStreamAsync( + const std::string& path) override; + Future> OpenInputStreamAsync( + const FileInfo& info) override; + Future> OpenInputFileAsync( + const std::string& path) override; + Future> OpenInputFileAsync( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + protected: + SubTreeFileSystem() {} + + const std::string base_path_; + std::shared_ptr base_fs_; + + Result PrependBase(const std::string& s) const; + Result PrependBaseNonEmpty(const std::string& s) const; + Result StripBase(const std::string& s) const; + Status FixInfo(FileInfo* info) const; + + static Result NormalizeBasePath( + std::string base_path, const std::shared_ptr& base_fs); +}; + +/// \brief A FileSystem implementation that delegates to another +/// implementation but inserts latencies at various points. +class ARROW_EXPORT SlowFileSystem : public FileSystem { + public: + SlowFileSystem(std::shared_ptr base_fs, + std::shared_ptr latencies); + SlowFileSystem(std::shared_ptr base_fs, double average_latency); + SlowFileSystem(std::shared_ptr base_fs, double average_latency, + int32_t seed); + + std::string type_name() const override { return "slow"; } + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + using FileSystem::GetFileInfo; + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + protected: + std::shared_ptr base_fs_; + std::shared_ptr latencies_; +}; + +/// \defgroup filesystem-factories Functions for creating FileSystem instances +/// +/// @{ + +/// \brief Create a new FileSystem by URI +/// +/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3", +/// "gs" and "gcs". +/// +/// \param[in] uri a URI-based path, ex: file:///some/local/path +/// \param[out] out_path (optional) Path inside the filesystem. +/// \return out_fs FileSystem instance. +ARROW_EXPORT +Result> FileSystemFromUri(const std::string& uri, + std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI with a custom IO context +/// +/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3", +/// "gs" and "gcs". +/// +/// \param[in] uri a URI-based path, ex: file:///some/local/path +/// \param[in] io_context an IOContext which will be associated with the filesystem +/// \param[out] out_path (optional) Path inside the filesystem. +/// \return out_fs FileSystem instance. +ARROW_EXPORT +Result> FileSystemFromUri(const std::string& uri, + const io::IOContext& io_context, + std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI +/// +/// Same as FileSystemFromUri, but in addition also recognize non-URIs +/// and treat them as local filesystem paths. Only absolute local filesystem +/// paths are allowed. +ARROW_EXPORT +Result> FileSystemFromUriOrPath( + const std::string& uri, std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI with a custom IO context +/// +/// Same as FileSystemFromUri, but in addition also recognize non-URIs +/// and treat them as local filesystem paths. Only absolute local filesystem +/// paths are allowed. +ARROW_EXPORT +Result> FileSystemFromUriOrPath( + const std::string& uri, const io::IOContext& io_context, + std::string* out_path = NULLPTR); + +/// @} + +/// \brief Copy files, including from one FileSystem to another +/// +/// If a source and destination are resident in the same FileSystem FileSystem::CopyFile +/// will be used, otherwise the file will be opened as a stream in both FileSystems and +/// chunks copied from the source to the destination. No directories will be created. +ARROW_EXPORT +Status CopyFiles(const std::vector& sources, + const std::vector& destinations, + const io::IOContext& io_context = io::default_io_context(), + int64_t chunk_size = 1024 * 1024, bool use_threads = true); + +/// \brief Copy selected files, including from one FileSystem to another +/// +/// Directories will be created under the destination base directory as needed. +ARROW_EXPORT +Status CopyFiles(const std::shared_ptr& source_fs, + const FileSelector& source_sel, + const std::shared_ptr& destination_fs, + const std::string& destination_base_dir, + const io::IOContext& io_context = io::default_io_context(), + int64_t chunk_size = 1024 * 1024, bool use_threads = true); + +struct FileSystemGlobalOptions { + /// Path to a single PEM file holding all TLS CA certificates + /// + /// If empty, the underlying TLS library's defaults will be used. + std::string tls_ca_file_path; + + /// Path to a directory holding TLS CA certificates in individual PEM files + /// named along the OpenSSL "hashed" format. + /// + /// If empty, the underlying TLS library's defaults will be used. + std::string tls_ca_dir_path; +}; + +/// EXPERIMENTAL: optional global initialization routine +/// +/// This is for environments (such as manylinux) where the path +/// to TLS CA certificates needs to be configured at runtime. +ARROW_EXPORT +Status Initialize(const FileSystemGlobalOptions& options); + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h new file mode 100644 index 0000000000000000000000000000000000000000..e4a1edfd6bfb9b52e8663306e283862062449cbc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h @@ -0,0 +1,247 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/uri.h" + +namespace arrow { +namespace fs { +namespace internal { + +// Opaque wrapper for GCS's library credentials to avoid exposing in Arrow headers. +struct GcsCredentialsHolder; + +} // namespace internal + +class GcsFileSystem; + +/// \brief Container for GCS Credentials and information necessary to recreate them. +class ARROW_EXPORT GcsCredentials { + public: + bool Equals(const GcsCredentials& other) const; + bool anonymous() const { return anonymous_; } + const std::string& access_token() const { return access_token_; } + TimePoint expiration() const { return expiration_; } + const std::string& target_service_account() const { return target_service_account_; } + const std::string& json_credentials() const { return json_credentials_; } + const std::shared_ptr& holder() const { + return holder_; + } + + private: + GcsCredentials() = default; + bool anonymous_ = false; + std::string access_token_; + TimePoint expiration_; + std::string target_service_account_; + std::string json_credentials_; + std::shared_ptr holder_; + friend class GcsFileSystem; + friend struct GcsOptions; +}; + +/// Options for the GcsFileSystem implementation. +struct ARROW_EXPORT GcsOptions { + /// \brief Equivalent to GcsOptions::Defaults(). + GcsOptions(); + GcsCredentials credentials; + + std::string endpoint_override; + std::string scheme; + /// \brief Location to use for creating buckets. + std::string default_bucket_location; + + /// \brief If set used to control total time allowed for retrying underlying + /// errors. + /// + /// The default policy is to retry for up to 15 minutes. + std::optional retry_limit_seconds; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + /// \brief The project to use for creating buckets. + /// + /// If not set, the library uses the GOOGLE_CLOUD_PROJECT environment + /// variable. Most I/O operations do not need a project id, only applications + /// that create new buckets need a project id. + std::optional project_id; + + bool Equals(const GcsOptions& other) const; + + /// \brief Initialize with Google Default Credentials + /// + /// Create options configured to use [Application Default Credentials][aip/4110]. The + /// details of this mechanism are too involved to describe here, but suffice is to say + /// that applications can override any defaults using an environment variable + /// (`GOOGLE_APPLICATION_CREDENTIALS`), and that the defaults work with most Google + /// Cloud Platform deployment environments (GCE, GKE, Cloud Run, etc.), and that have + /// the same behavior as the `gcloud` CLI tool on your workstation. + /// + /// \see https://cloud.google.com/docs/authentication + /// + /// [aip/4110]: https://google.aip.dev/auth/4110 + static GcsOptions Defaults(); + + /// \brief Initialize with anonymous credentials + static GcsOptions Anonymous(); + + /// \brief Initialize with access token + /// + /// These credentials are useful when using an out-of-band mechanism to fetch access + /// tokens. Note that access tokens are time limited, you will need to manually refresh + /// the tokens created by the out-of-band mechanism. + static GcsOptions FromAccessToken(const std::string& access_token, + TimePoint expiration); + + /// \brief Initialize with service account impersonation + /// + /// Service account impersonation allows one principal (a user or service account) to + /// impersonate a service account. It requires that the calling principal has the + /// necessary permissions *on* the service account. + static GcsOptions FromImpersonatedServiceAccount( + const GcsCredentials& base_credentials, const std::string& target_service_account); + + /// Creates service account credentials from a JSON object in string form. + /// + /// The @p json_object is expected to be in the format described by [aip/4112]. Such an + /// object contains the identity of a service account, as well as a private key that can + /// be used to sign tokens, showing the caller was holding the private key. + /// + /// In GCP one can create several "keys" for each service account, and these keys are + /// downloaded as a JSON "key file". The contents of such a file are in the format + /// required by this function. Remember that key files and their contents should be + /// treated as any other secret with security implications, think of them as passwords + /// (because they are!), don't store them or output them where unauthorized persons may + /// read them. + /// + /// Most applications should probably use default credentials, maybe pointing them to a + /// file with these contents. Using this function may be useful when the json object is + /// obtained from a Cloud Secret Manager or a similar service. + /// + /// [aip/4112]: https://google.aip.dev/auth/4112 + static GcsOptions FromServiceAccountCredentials(const std::string& json_object); + + /// Initialize from URIs such as "gs://bucket/object". + static Result FromUri(const arrow::internal::Uri& uri, + std::string* out_path); + static Result FromUri(const std::string& uri, std::string* out_path); +}; + +/// \brief GCS-backed FileSystem implementation. +/// +/// GCS (Google Cloud Storage - https://cloud.google.com/storage) is a scalable object +/// storage system for any amount of data. The main abstractions in GCS are buckets and +/// objects. A bucket is a namespace for objects, buckets can store any number of objects, +/// tens of millions and even billions is not uncommon. Each object contains a single +/// blob of data, up to 5TiB in size. Buckets are typically configured to keep a single +/// version of each object, but versioning can be enabled. Versioning is important because +/// objects are immutable, once created one cannot append data to the object or modify the +/// object data in any way. +/// +/// GCS buckets are in a global namespace, if a Google Cloud customer creates a bucket +/// named `foo` no other customer can create a bucket with the same name. Note that a +/// principal (a user or service account) may only list the buckets they are entitled to, +/// and then only within a project. It is not possible to list "all" the buckets. +/// +/// Within each bucket objects are in flat namespace. GCS does not have folders or +/// directories. However, following some conventions it is possible to emulate +/// directories. To this end, this class: +/// +/// - All buckets are treated as directories at the "root" +/// - Creating a root directory results in a new bucket being created, this may be slower +/// than most GCS operations. +/// - The class creates marker objects for a directory, using a metadata attribute to +/// annotate the file. +/// - GCS can list all the objects with a given prefix, this is used to emulate listing +/// of directories. +/// - In object lists GCS can summarize all the objects with a common prefix as a single +/// entry, this is used to emulate non-recursive lists. Note that GCS list time is +/// proportional to the number of objects in the prefix. Listing recursively takes +/// almost the same time as non-recursive lists. +/// +class ARROW_EXPORT GcsFileSystem : public FileSystem { + public: + ~GcsFileSystem() override = default; + + std::string type_name() const override; + const GcsOptions& options() const; + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + + /// This is not implemented in GcsFileSystem, as it would be too dangerous. + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + ARROW_DEPRECATED( + "Deprecated. " + "OpenAppendStream is unsupported on the GCS FileSystem.") + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + /// Create a GcsFileSystem instance from the given options. + // TODO(ARROW-16884): make this return Result for consistency + static std::shared_ptr Make( + const GcsOptions& options, const io::IOContext& = io::default_io_context()); + + private: + explicit GcsFileSystem(const GcsOptions& options, const io::IOContext& io_context); + + class Impl; + std::shared_ptr impl_; +}; + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h new file mode 100644 index 0000000000000000000000000000000000000000..798aac0ea9075fb15161187d7c0aac242d7ae2d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/io/hdfs.h" +#include "arrow/util/uri.h" + +namespace arrow { +namespace fs { + +/// Options for the HDFS implementation. +struct ARROW_EXPORT HdfsOptions { + HdfsOptions() = default; + ~HdfsOptions() = default; + + /// Hdfs configuration options, contains host, port, driver + io::HdfsConnectionConfig connection_config; + + /// Used by Hdfs OpenWritable Interface. + int32_t buffer_size = 0; + int16_t replication = 3; + int64_t default_block_size = 0; + + void ConfigureEndPoint(std::string host, int port); + void ConfigureReplication(int16_t replication); + void ConfigureUser(std::string user_name); + void ConfigureBufferSize(int32_t buffer_size); + void ConfigureBlockSize(int64_t default_block_size); + void ConfigureKerberosTicketCachePath(std::string path); + void ConfigureExtraConf(std::string key, std::string val); + + bool Equals(const HdfsOptions& other) const; + + static Result FromUri(const ::arrow::internal::Uri& uri); + static Result FromUri(const std::string& uri); +}; + +/// HDFS-backed FileSystem implementation. +/// +/// implementation notes: +/// - This is a wrapper of arrow/io/hdfs, so we can use FileSystem API to handle hdfs. +class ARROW_EXPORT HadoopFileSystem : public FileSystem { + public: + ~HadoopFileSystem() override; + + std::string type_name() const override { return "hdfs"; } + HdfsOptions options() const; + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::GetFileInfo; + /// \endcond + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + /// Create a HdfsFileSystem instance from the given options. + static Result> Make( + const HdfsOptions& options, const io::IOContext& = io::default_io_context()); + + protected: + HadoopFileSystem(const HdfsOptions& options, const io::IOContext&); + + class Impl; + std::unique_ptr impl_; +}; + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h new file mode 100644 index 0000000000000000000000000000000000000000..108530c2b277872fc470f8135c2c4af05818ba04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" + +namespace arrow { +namespace internal { + +class Uri; + +} + +namespace fs { + +/// Options for the LocalFileSystem implementation. +struct ARROW_EXPORT LocalFileSystemOptions { + static constexpr int32_t kDefaultDirectoryReadahead = 16; + static constexpr int32_t kDefaultFileInfoBatchSize = 1000; + + /// Whether OpenInputStream and OpenInputFile return a mmap'ed file, + /// or a regular one. + bool use_mmap = false; + + /// Options related to `GetFileInfoGenerator` interface. + + /// EXPERIMENTAL: The maximum number of directories processed in parallel + /// by `GetFileInfoGenerator`. + int32_t directory_readahead = kDefaultDirectoryReadahead; + + /// EXPERIMENTAL: The maximum number of entries aggregated into each + /// FileInfoVector chunk by `GetFileInfoGenerator`. + /// + /// Since each FileInfo entry needs a separate `stat` system call, a + /// directory with a very large number of files may take a lot of time to + /// process entirely. By generating a FileInfoVector after this chunk + /// size is reached, we ensure FileInfo entries can start being consumed + /// from the FileInfoGenerator with less initial latency. + int32_t file_info_batch_size = kDefaultFileInfoBatchSize; + + /// \brief Initialize with defaults + static LocalFileSystemOptions Defaults(); + + bool Equals(const LocalFileSystemOptions& other) const; + + static Result FromUri(const ::arrow::internal::Uri& uri, + std::string* out_path); +}; + +/// \brief A FileSystem implementation accessing files on the local machine. +/// +/// This class handles only `/`-separated paths. If desired, conversion +/// from Windows backslash-separated paths should be done by the caller. +/// Details such as symlinks are abstracted away (symlinks are always +/// followed, except when deleting an entry). +class ARROW_EXPORT LocalFileSystem : public FileSystem { + public: + explicit LocalFileSystem(const io::IOContext& = io::default_io_context()); + explicit LocalFileSystem(const LocalFileSystemOptions&, + const io::IOContext& = io::default_io_context()); + ~LocalFileSystem() override; + + std::string type_name() const override { return "local"; } + + Result NormalizePath(std::string path) override; + Result PathFromUri(const std::string& uri_string) const override; + + bool Equals(const FileSystem& other) const override; + + LocalFileSystemOptions options() const { return options_; } + + /// \cond FALSE + using FileSystem::GetFileInfo; + /// \endcond + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + protected: + LocalFileSystemOptions options_; +}; + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h new file mode 100644 index 0000000000000000000000000000000000000000..32d06e5910dfec578d2debd04a51f91bc34145c0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow { +namespace fs { +namespace internal { + +struct MockDirInfo { + std::string full_path; + TimePoint mtime; + + bool operator==(const MockDirInfo& other) const { + return mtime == other.mtime && full_path == other.full_path; + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockDirInfo&); +}; + +struct MockFileInfo { + std::string full_path; + TimePoint mtime; + std::string_view data; + + bool operator==(const MockFileInfo& other) const { + return mtime == other.mtime && full_path == other.full_path && data == other.data; + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockFileInfo&); +}; + +/// A mock FileSystem implementation that holds its contents in memory. +/// +/// Useful for validating the FileSystem API, writing conformance suite, +/// and bootstrapping FileSystem-based APIs. +class ARROW_EXPORT MockFileSystem : public FileSystem { + public: + explicit MockFileSystem(TimePoint current_time, + const io::IOContext& = io::default_io_context()); + ~MockFileSystem() override; + + std::string type_name() const override { return "mock"; } + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + // XXX It's not very practical to have to explicitly declare inheritance + // of default overrides. + using FileSystem::GetFileInfo; + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + // Contents-dumping helpers to ease testing. + // Output is lexicographically-ordered by full path. + std::vector AllDirs(); + std::vector AllFiles(); + + // Create a File with a content from a string. + Status CreateFile(const std::string& path, std::string_view content, + bool recursive = true); + + // Create a MockFileSystem out of (empty) FileInfo. The content of every + // file is empty and of size 0. All directories will be created recursively. + static Result> Make(TimePoint current_time, + const std::vector& infos); + + class Impl; + + protected: + std::unique_ptr impl_; +}; + +class ARROW_EXPORT MockAsyncFileSystem : public MockFileSystem { + public: + explicit MockAsyncFileSystem(TimePoint current_time, + const io::IOContext& io_context = io::default_io_context()) + : MockFileSystem(current_time, io_context) { + default_async_is_sync_ = false; + } + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; +}; + +} // namespace internal +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h new file mode 100644 index 0000000000000000000000000000000000000000..1da7afd3f9381a8c022107ef6a17db5fd334b8ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h @@ -0,0 +1,174 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/type_fwd.h" + +namespace arrow { +namespace fs { +namespace internal { + +constexpr char kSep = '/'; + +// Computations on abstract paths (not local paths with system-dependent behaviour). +// Abstract paths are typically used in URIs. + +// Split an abstract path into its individual components. +ARROW_EXPORT +std::vector SplitAbstractPath(const std::string& path, char sep = kSep); + +// Slice the individual components of an abstract path and combine them +// +// If offset or length are negative then an empty string is returned +// If offset is >= the number of components then an empty string is returned +// If offset + length is >= the number of components then length is truncated +ARROW_EXPORT +std::string SliceAbstractPath(const std::string& path, int offset, int length, + char sep = kSep); + +// Return the extension of the file +ARROW_EXPORT std::string GetAbstractPathExtension(const std::string& s); + +// Return the depth (number of components) of an abstract path +// +// Trailing slashes do not count towards depth +// Leading slashes do not count towards depth +// +// The root path ("/") has depth 0 +ARROW_EXPORT int GetAbstractPathDepth(std::string_view path); + +// Return the parent directory and basename of an abstract path. Both values may be +// empty. +ARROW_EXPORT +std::pair GetAbstractPathParent(const std::string& s); + +// Validate the components of an abstract path. +ARROW_EXPORT +Status ValidateAbstractPathParts(const std::vector& parts); + +// Append a non-empty stem to an abstract path. +ARROW_EXPORT +std::string ConcatAbstractPath(std::string_view base, std::string_view stem); + +// Make path relative to base, if it starts with base. Otherwise error out. +ARROW_EXPORT +Result MakeAbstractPathRelative(const std::string& base, + const std::string& path); + +ARROW_EXPORT +std::string EnsureLeadingSlash(std::string_view s); + +ARROW_EXPORT +std::string_view RemoveLeadingSlash(std::string_view s); + +ARROW_EXPORT +std::string EnsureTrailingSlash(std::string_view s); + +/// \brief remove the forward slash (if any) from the given path +/// \param s the input path +/// \param preserve_root if true, allow a path of just "/" to remain unchanged +ARROW_EXPORT +std::string_view RemoveTrailingSlash(std::string_view s, bool preserve_root = false); + +ARROW_EXPORT +Status AssertNoTrailingSlash(std::string_view s); + +inline bool HasTrailingSlash(std::string_view s) { + return !s.empty() && s.back() == kSep; +} + +inline bool HasLeadingSlash(std::string_view s) { + return !s.empty() && s.front() == kSep; +} + +ARROW_EXPORT +bool IsAncestorOf(std::string_view ancestor, std::string_view descendant); + +ARROW_EXPORT +std::optional RemoveAncestor(std::string_view ancestor, + std::string_view descendant); + +/// Return a vector of ancestors between a base path and a descendant. +/// For example, +/// +/// AncestorsFromBasePath("a/b", "a/b/c/d/e") -> ["a/b/c", "a/b/c/d"] +ARROW_EXPORT +std::vector AncestorsFromBasePath(std::string_view base_path, + std::string_view descendant); + +/// Given a vector of paths of directories which must be created, produce a the minimal +/// subset for passing to CreateDir(recursive=true) by removing redundant parent +/// directories +ARROW_EXPORT +std::vector MinimalCreateDirSet(std::vector dirs); + +// Join the components of an abstract path. +template +std::string JoinAbstractPath(StringIt it, StringIt end, char sep = kSep) { + std::string path; + for (; it != end; ++it) { + if (it->empty()) continue; + + if (!path.empty()) { + path += sep; + } + path += *it; + } + return path; +} + +template +std::string JoinAbstractPath(const StringRange& range, char sep = kSep) { + return JoinAbstractPath(range.begin(), range.end(), sep); +} + +/// Convert slashes to backslashes, on all platforms. Mostly useful for testing. +ARROW_EXPORT +std::string ToBackslashes(std::string_view s); + +/// Ensure a local path is abstract, by converting backslashes to regular slashes +/// on Windows. Return the path unchanged on other systems. +ARROW_EXPORT +std::string ToSlashes(std::string_view s); + +ARROW_EXPORT +bool IsEmptyPath(std::string_view s); + +ARROW_EXPORT +bool IsLikelyUri(std::string_view s); + +class ARROW_EXPORT Globber { + public: + ~Globber(); + explicit Globber(std::string pattern); + bool Matches(const std::string& path); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e270a6e1c469abdc8905b6f00da6510bbb585258 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include + +#include "arrow/filesystem/s3fs.h" +#include "arrow/status.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/util.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace fs { + +// A minio test server, managed as a child process + +class MinioTestServer { + public: + MinioTestServer(); + ~MinioTestServer(); + + Status Start(); + + Status Stop(); + + std::string connect_string() const; + + std::string access_key() const; + + std::string secret_key() const; + + private: + struct Impl; + std::unique_ptr impl_; +}; + +// A Minio "environment" that spawns Minio processes in advances, such as +// to hide process launch latencies during testing. + +class MinioTestEnvironment : public ::testing::Environment { + public: + MinioTestEnvironment(); + ~MinioTestEnvironment(); + + void SetUp() override; + + Result> GetOneServer(); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +// A global test "environment", to ensure that the S3 API is initialized before +// running unit tests. + +class S3Environment : public ::testing::Environment { + public: + // We set this environment variable to speed up tests by ensuring + // DefaultAWSCredentialsProviderChain does not query (inaccessible) + // EC2 metadata endpoint. + // This must be done before spawning any Minio child process to avoid any race + // condition accessing environment variables. + S3Environment() : ec2_metadata_disabled_guard_("AWS_EC2_METADATA_DISABLED", "true") {} + + void SetUp() override { + // Change this to increase logging during tests + S3GlobalOptions options; + options.log_level = S3LogLevel::Fatal; + ASSERT_OK(InitializeS3(options)); + } + + void TearDown() override { ASSERT_OK(FinalizeS3()); } + + private: + EnvVarGuard ec2_metadata_disabled_guard_; +}; + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h new file mode 100644 index 0000000000000000000000000000000000000000..13a0abde3231891fd309c7793793c6013886403c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h @@ -0,0 +1,396 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/macros.h" +#include "arrow/util/uri.h" + +namespace Aws { +namespace Auth { + +class AWSCredentialsProvider; +class STSAssumeRoleCredentialsProvider; + +} // namespace Auth +namespace STS { +class STSClient; +} +} // namespace Aws + +namespace arrow { +namespace fs { + +/// Options for using a proxy for S3 +struct ARROW_EXPORT S3ProxyOptions { + std::string scheme; + std::string host; + int port = -1; + std::string username; + std::string password; + + /// Initialize from URI such as http://username:password@host:port + /// or http://host:port + static Result FromUri(const std::string& uri); + static Result FromUri(const ::arrow::internal::Uri& uri); + + bool Equals(const S3ProxyOptions& other) const; +}; + +enum class S3CredentialsKind : int8_t { + /// Anonymous access (no credentials used) + Anonymous, + /// Use default AWS credentials, configured through environment variables + Default, + /// Use explicitly-provided access key pair + Explicit, + /// Assume role through a role ARN + Role, + /// Use web identity token to assume role, configured through environment variables + WebIdentity +}; + +/// Pure virtual class for describing custom S3 retry strategies +class ARROW_EXPORT S3RetryStrategy { + public: + virtual ~S3RetryStrategy() = default; + + /// Simple struct where each field corresponds to a field in Aws::Client::AWSError + struct AWSErrorDetail { + /// Corresponds to AWSError::GetErrorType() + int error_type; + /// Corresponds to AWSError::GetMessage() + std::string message; + /// Corresponds to AWSError::GetExceptionName() + std::string exception_name; + /// Corresponds to AWSError::ShouldRetry() + bool should_retry; + }; + /// Returns true if the S3 request resulting in the provided error should be retried. + virtual bool ShouldRetry(const AWSErrorDetail& error, int64_t attempted_retries) = 0; + /// Returns the time in milliseconds the S3 client should sleep for until retrying. + virtual int64_t CalculateDelayBeforeNextRetry(const AWSErrorDetail& error, + int64_t attempted_retries) = 0; + /// Returns a stock AWS Default retry strategy. + static std::shared_ptr GetAwsDefaultRetryStrategy( + int64_t max_attempts); + /// Returns a stock AWS Standard retry strategy. + static std::shared_ptr GetAwsStandardRetryStrategy( + int64_t max_attempts); +}; + +/// Options for the S3FileSystem implementation. +struct ARROW_EXPORT S3Options { + /// \brief AWS region to connect to. + /// + /// If unset, the AWS SDK will choose a default value. The exact algorithm + /// depends on the SDK version. Before 1.8, the default is hardcoded + /// to "us-east-1". Since 1.8, several heuristics are used to determine + /// the region (environment variables, configuration profile, EC2 metadata + /// server). + std::string region; + + /// \brief Socket connection timeout, in seconds + /// + /// If negative, the AWS SDK default value is used (typically 1 second). + double connect_timeout = -1; + + /// \brief Socket read timeout on Windows and macOS, in seconds + /// + /// If negative, the AWS SDK default value is used (typically 3 seconds). + /// This option is ignored on non-Windows, non-macOS systems. + double request_timeout = -1; + + /// If non-empty, override region with a connect string such as "localhost:9000" + // XXX perhaps instead take a URL like "http://localhost:9000"? + std::string endpoint_override; + /// S3 connection transport, default "https" + std::string scheme = "https"; + + /// ARN of role to assume + std::string role_arn; + /// Optional identifier for an assumed role session. + std::string session_name; + /// Optional external identifier to pass to STS when assuming a role + std::string external_id; + /// Frequency (in seconds) to refresh temporary credentials from assumed role + int load_frequency = 900; + + /// If connection is through a proxy, set options here + S3ProxyOptions proxy_options; + + /// AWS credentials provider + std::shared_ptr credentials_provider; + + /// Type of credentials being used. Set along with credentials_provider. + S3CredentialsKind credentials_kind = S3CredentialsKind::Default; + + /// Whether to use virtual addressing of buckets + /// + /// If true, then virtual addressing is always enabled. + /// If false, then virtual addressing is only enabled if `endpoint_override` is empty. + /// + /// This can be used for non-AWS backends that only support virtual hosted-style access. + bool force_virtual_addressing = false; + + /// Whether OutputStream writes will be issued in the background, without blocking. + bool background_writes = true; + + /// Whether to allow creation of buckets + /// + /// When S3FileSystem creates new buckets, it does not pass any non-default settings. + /// In AWS S3, the bucket and all objects will be not publicly visible, and there + /// will be no bucket policies and no resource tags. To have more control over how + /// buckets are created, use a different API to create them. + bool allow_bucket_creation = false; + + /// Whether to allow deletion of buckets + bool allow_bucket_deletion = false; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + /// Optional retry strategy to determine which error types should be retried, and the + /// delay between retries. + std::shared_ptr retry_strategy; + + S3Options(); + + /// Configure with the default AWS credentials provider chain. + void ConfigureDefaultCredentials(); + + /// Configure with anonymous credentials. This will only let you access public buckets. + void ConfigureAnonymousCredentials(); + + /// Configure with explicit access and secret key. + void ConfigureAccessKey(const std::string& access_key, const std::string& secret_key, + const std::string& session_token = ""); + + /// Configure with credentials from an assumed role. + void ConfigureAssumeRoleCredentials( + const std::string& role_arn, const std::string& session_name = "", + const std::string& external_id = "", int load_frequency = 900, + const std::shared_ptr& stsClient = NULLPTR); + + /// Configure with credentials from role assumed using a web identity token + void ConfigureAssumeRoleWithWebIdentityCredentials(); + + std::string GetAccessKey() const; + std::string GetSecretKey() const; + std::string GetSessionToken() const; + + bool Equals(const S3Options& other) const; + + /// \brief Initialize with default credentials provider chain + /// + /// This is recommended if you use the standard AWS environment variables + /// and/or configuration file. + static S3Options Defaults(); + + /// \brief Initialize with anonymous credentials. + /// + /// This will only let you access public buckets. + static S3Options Anonymous(); + + /// \brief Initialize with explicit access and secret key. + /// + /// Optionally, a session token may also be provided for temporary credentials + /// (from STS). + static S3Options FromAccessKey(const std::string& access_key, + const std::string& secret_key, + const std::string& session_token = ""); + + /// \brief Initialize from an assumed role. + static S3Options FromAssumeRole( + const std::string& role_arn, const std::string& session_name = "", + const std::string& external_id = "", int load_frequency = 900, + const std::shared_ptr& stsClient = NULLPTR); + + /// \brief Initialize from an assumed role with web-identity. + /// Uses the AWS SDK which uses environment variables to + /// generate temporary credentials. + static S3Options FromAssumeRoleWithWebIdentity(); + + static Result FromUri(const ::arrow::internal::Uri& uri, + std::string* out_path = NULLPTR); + static Result FromUri(const std::string& uri, + std::string* out_path = NULLPTR); +}; + +/// S3-backed FileSystem implementation. +/// +/// Some implementation notes: +/// - buckets are special and the operations available on them may be limited +/// or more expensive than desired. +class ARROW_EXPORT S3FileSystem : public FileSystem { + public: + ~S3FileSystem() override; + + std::string type_name() const override { return "s3"; } + + /// Return the original S3 options when constructing the filesystem + S3Options options() const; + /// Return the actual region this filesystem connects to + std::string region() const; + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::GetFileInfo; + /// \endcond + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive = true) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + Future<> DeleteDirContentsAsync(const std::string& path, + bool missing_dir_ok = false) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + /// Create a sequential input stream for reading from a S3 object. + /// + /// NOTE: Reads from the stream will be synchronous and unbuffered. + /// You way want to wrap the stream in a BufferedInputStream or use + /// a custom readahead strategy to avoid idle waits. + Result> OpenInputStream( + const std::string& path) override; + /// Create a sequential input stream for reading from a S3 object. + /// + /// This override avoids a HEAD request by assuming the FileInfo + /// contains correct information. + Result> OpenInputStream(const FileInfo& info) override; + + /// Create a random access file for reading from a S3 object. + /// + /// See OpenInputStream for performance notes. + Result> OpenInputFile( + const std::string& path) override; + /// Create a random access file for reading from a S3 object. + /// + /// This override avoids a HEAD request by assuming the FileInfo + /// contains correct information. + Result> OpenInputFile( + const FileInfo& info) override; + + /// Create a sequential output stream for writing to a S3 object. + /// + /// NOTE: Writes to the stream will be buffered. Depending on + /// S3Options.background_writes, they can be synchronous or not. + /// It is recommended to enable background_writes unless you prefer + /// implementing your own background execution strategy. + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + /// Create a S3FileSystem instance from the given options. + static Result> Make( + const S3Options& options, const io::IOContext& = io::default_io_context()); + + protected: + explicit S3FileSystem(const S3Options& options, const io::IOContext&); + + class Impl; + std::shared_ptr impl_; +}; + +enum class S3LogLevel : int8_t { Off, Fatal, Error, Warn, Info, Debug, Trace }; + +struct ARROW_EXPORT S3GlobalOptions { + S3LogLevel log_level; + /// The number of threads to configure when creating AWS' I/O event loop + /// + /// Defaults to 1 as recommended by AWS' doc when the # of connections is + /// expected to be, at most, in the hundreds + /// + /// For more details see Aws::Crt::Io::EventLoopGroup + int num_event_loop_threads = 1; + + /// \brief Initialize with default options + /// + /// For log_level, this method first tries to extract a suitable value from the + /// environment variable ARROW_S3_LOG_LEVEL. + static S3GlobalOptions Defaults(); +}; + +/// \brief Initialize the S3 APIs with the specified set of options. +/// +/// It is required to call this function at least once before using S3FileSystem. +/// +/// Once this function is called you MUST call FinalizeS3 before the end of the +/// application in order to avoid a segmentation fault at shutdown. +ARROW_EXPORT +Status InitializeS3(const S3GlobalOptions& options); + +/// \brief Ensure the S3 APIs are initialized, but only if not already done. +/// +/// If necessary, this will call InitializeS3() with some default options. +ARROW_EXPORT +Status EnsureS3Initialized(); + +/// Whether S3 was initialized, and not finalized. +ARROW_EXPORT +bool IsS3Initialized(); + +/// Whether S3 was finalized. +ARROW_EXPORT +bool IsS3Finalized(); + +/// \brief Shutdown the S3 APIs. +/// +/// This can wait for some S3 concurrent calls to finish so as to avoid +/// race conditions. +/// After this function has been called, all S3 calls will fail with an error. +/// +/// Calls to InitializeS3() and FinalizeS3() should be serialized by the +/// application (this also applies to EnsureS3Initialized() and +/// EnsureS3Finalized()). +ARROW_EXPORT +Status FinalizeS3(); + +/// \brief Ensure the S3 APIs are shutdown, but only if not already done. +/// +/// If necessary, this will call FinalizeS3(). +ARROW_EXPORT +Status EnsureS3Finalized(); + +ARROW_EXPORT +Result ResolveS3BucketRegion(const std::string& bucket); + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..62b488e159a248186279cd3940bb5782a2d31f72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h @@ -0,0 +1,252 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/filesystem/mockfs.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/counting_semaphore.h" + +namespace arrow { +namespace fs { + +static constexpr double kTimeSlack = 2.0; // In seconds + +static inline FileInfo File(std::string path) { + return FileInfo(std::move(path), FileType::File); +} + +static inline FileInfo Dir(std::string path) { + return FileInfo(std::move(path), FileType::Directory); +} + +// A subclass of MockFileSystem that blocks operations until an unlock method is +// called. +// +// This is intended for testing fine-grained ordering of filesystem operations. +// +// N.B. Only OpenOutputStream supports gating at the moment but this is simply because +// it is all that has been needed so far. Feel free to add support for more methods +// as required. +class ARROW_TESTING_EXPORT GatedMockFilesystem : public internal::MockFileSystem { + public: + GatedMockFilesystem(TimePoint current_time, + const io::IOContext& = io::default_io_context()); + ~GatedMockFilesystem() override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + // Wait until at least num_waiters are waiting on OpenOutputStream + Status WaitForOpenOutputStream(uint32_t num_waiters); + // Unlock `num_waiters` individual calls to OpenOutputStream + Status UnlockOpenOutputStream(uint32_t num_waiters); + + private: + util::CountingSemaphore open_output_sem_; +}; + +ARROW_TESTING_EXPORT +void CreateFile(FileSystem* fs, const std::string& path, const std::string& data); + +// Sort a vector of FileInfo by lexicographic path order +ARROW_TESTING_EXPORT +void SortInfos(FileInfoVector* infos); + +// Create a copy of a FileInfo vector sorted by lexicographic path order +ARROW_TESTING_EXPORT +FileInfoVector SortedInfos(const FileInfoVector& infos); + +ARROW_TESTING_EXPORT +void CollectFileInfoGenerator(FileInfoGenerator gen, FileInfoVector* out_infos); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + TimePoint mtime); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + TimePoint mtime, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, + TimePoint mtime); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, + TimePoint mtime, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileContents(FileSystem* fs, const std::string& path, + const std::string& expected_data); + +template +void AssertDurationBetween(Duration d, double min_secs, double max_secs) { + auto seconds = std::chrono::duration_cast>(d); + ASSERT_GE(seconds.count(), min_secs); + ASSERT_LE(seconds.count(), max_secs); +} + +// Generic tests for FileSystem implementations. +// To use this class, subclass both from it and ::testing::Test, +// implement GetEmptyFileSystem(), and use GENERIC_FS_TEST_FUNCTIONS() +// to define the various tests. +class ARROW_TESTING_EXPORT GenericFileSystemTest { + public: + virtual ~GenericFileSystemTest(); + + void TestEmpty(); + void TestNormalizePath(); + void TestCreateDir(); + void TestDeleteDir(); + void TestDeleteDirContents(); + void TestDeleteRootDirContents(); + void TestDeleteFile(); + void TestDeleteFiles(); + void TestMoveFile(); + void TestMoveDir(); + void TestCopyFile(); + void TestGetFileInfo(); + void TestGetFileInfoVector(); + void TestGetFileInfoSelector(); + void TestGetFileInfoSelectorWithRecursion(); + void TestGetFileInfoAsync(); + void TestGetFileInfoGenerator(); + void TestOpenOutputStream(); + void TestOpenAppendStream(); + void TestOpenInputStream(); + void TestOpenInputStreamWithFileInfo(); + void TestOpenInputStreamAsync(); + void TestOpenInputFile(); + void TestOpenInputFileWithFileInfo(); + void TestOpenInputFileAsync(); + void TestSpecialChars(); + + protected: + // This function should return the filesystem under test. + virtual std::shared_ptr GetEmptyFileSystem() = 0; + + // Override the following functions to specify deviations from expected + // filesystem semantics. + // - Whether the filesystem may "implicitly" create intermediate directories + virtual bool have_implicit_directories() const { return false; } + // - Whether the filesystem may allow writing a file "over" a directory + virtual bool allow_write_file_over_dir() const { return false; } + // - Whether the filesystem allows reading a directory + virtual bool allow_read_dir_as_file() const { return false; } + // - Whether the filesystem allows moving a directory + virtual bool allow_move_dir() const { return true; } + // - Whether the filesystem allows moving a directory "over" a non-empty destination + virtual bool allow_move_dir_over_non_empty_dir() const { return false; } + // - Whether the filesystem allows appending to a file + virtual bool allow_append_to_file() const { return true; } + // - Whether the filesystem allows appending to a nonexistent file + virtual bool allow_append_to_new_file() const { return true; } + // - Whether the filesystem supports directory modification times + virtual bool have_directory_mtimes() const { return true; } + // - Whether some directory tree deletion tests may fail randomly + virtual bool have_flaky_directory_tree_deletion() const { return false; } + // - Whether the filesystem stores some metadata alongside files + virtual bool have_file_metadata() const { return false; } + + void TestEmpty(FileSystem* fs); + void TestNormalizePath(FileSystem* fs); + void TestCreateDir(FileSystem* fs); + void TestDeleteDir(FileSystem* fs); + void TestDeleteDirContents(FileSystem* fs); + void TestDeleteRootDirContents(FileSystem* fs); + void TestDeleteFile(FileSystem* fs); + void TestDeleteFiles(FileSystem* fs); + void TestMoveFile(FileSystem* fs); + void TestMoveDir(FileSystem* fs); + void TestCopyFile(FileSystem* fs); + void TestGetFileInfo(FileSystem* fs); + void TestGetFileInfoVector(FileSystem* fs); + void TestGetFileInfoSelector(FileSystem* fs); + void TestGetFileInfoSelectorWithRecursion(FileSystem* fs); + void TestGetFileInfoAsync(FileSystem* fs); + void TestGetFileInfoGenerator(FileSystem* fs); + void TestOpenOutputStream(FileSystem* fs); + void TestOpenAppendStream(FileSystem* fs); + void TestOpenInputStream(FileSystem* fs); + void TestOpenInputStreamWithFileInfo(FileSystem* fs); + void TestOpenInputStreamAsync(FileSystem* fs); + void TestOpenInputFile(FileSystem* fs); + void TestOpenInputFileWithFileInfo(FileSystem* fs); + void TestOpenInputFileAsync(FileSystem* fs); + void TestSpecialChars(FileSystem* fs); +}; + +#define GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NAME) \ + TEST_MACRO(TEST_CLASS, NAME) { this->Test##NAME(); } + +#define GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_MACRO, TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, Empty) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NormalizePath) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CreateDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDirContents) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteRootDirContents) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFiles) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CopyFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoVector) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelector) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelectorWithRecursion) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoGenerator) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenOutputStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenAppendStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamWithFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileWithFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, SpecialChars) + +#define GENERIC_FS_TEST_FUNCTIONS(TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_F, TEST_CLASS) + +#define GENERIC_FS_TYPED_TEST_FUNCTIONS(TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTIONS_MACROS(TYPED_TEST, TEST_CLASS) + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..892f7ad2e1b16e9e426b6b7c82f24ca26094de38 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace fs { + +/// \brief FileSystem entry type +enum class FileType : int8_t { + /// Entry is not found + NotFound, + /// Entry exists but its type is unknown + /// + /// This can designate a special file such as a Unix socket or character + /// device, or Windows NUL / CON / ... + Unknown, + /// Entry is a regular file + File, + /// Entry is a directory + Directory +}; + +struct FileInfo; + +struct FileSelector; + +class FileSystem; +class SubTreeFileSystem; +class SlowFileSystem; +class LocalFileSystem; +class S3FileSystem; +class GcsFileSystem; + +} // namespace fs +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h new file mode 100644 index 0000000000000000000000000000000000000000..da88ee22f8291f81da3046e3c6e5844a5021be4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h @@ -0,0 +1,150 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Public API for the "Feather" file format, originally created at +// http://github.com/wesm/feather + +#pragma once + +#include +#include +#include +#include + +#include "arrow/ipc/options.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compression.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Schema; +class Status; +class Table; + +namespace io { + +class OutputStream; +class RandomAccessFile; + +} // namespace io + +namespace ipc { +namespace feather { + +static constexpr const int kFeatherV1Version = 2; +static constexpr const int kFeatherV2Version = 3; + +// ---------------------------------------------------------------------- +// Metadata accessor classes + +/// \class Reader +/// \brief An interface for reading columns from Feather files +class ARROW_EXPORT Reader { + public: + virtual ~Reader() = default; + + /// \brief Open a Feather file from a RandomAccessFile interface + /// + /// \param[in] source a RandomAccessFile instance + /// \return the table reader + static Result> Open( + const std::shared_ptr& source); + + /// \brief Open a Feather file from a RandomAccessFile interface + /// with IPC Read options + /// + /// \param[in] source a RandomAccessFile instance + /// \param[in] options IPC Read options + /// \return the table reader + static Result> Open( + const std::shared_ptr& source, const IpcReadOptions& options); + + /// \brief Return the version number of the Feather file + virtual int version() const = 0; + + virtual std::shared_ptr schema() const = 0; + + /// \brief Read all columns from the file as an arrow::Table. + /// + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(std::shared_ptr* out) = 0; + + /// \brief Read only the specified columns from the file as an arrow::Table. + /// + /// \param[in] indices the column indices to read + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(const std::vector& indices, std::shared_ptr
* out) = 0; + + /// \brief Read only the specified columns from the file as an arrow::Table. + /// + /// \param[in] names the column names to read + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(const std::vector& names, + std::shared_ptr
* out) = 0; +}; + +struct ARROW_EXPORT WriteProperties { + static WriteProperties Defaults(); + + static WriteProperties DefaultsV1() { + WriteProperties props = Defaults(); + props.version = kFeatherV1Version; + return props; + } + + /// Feather file version number + /// + /// version 2: "Feather V1" Apache Arrow <= 0.16.0 + /// version 3: "Feather V2" Apache Arrow > 0.16.0 + int version = kFeatherV2Version; + + // Parameters for Feather V2 only + + /// Number of rows per intra-file chunk. Use smaller chunksize when you need + /// faster random row access + int64_t chunksize = 1LL << 16; + + /// Compression type to use. Only UNCOMPRESSED, LZ4_FRAME, and ZSTD are + /// supported. The default compression returned by Defaults() is LZ4 if the + /// project is built with support for it, otherwise + /// UNCOMPRESSED. UNCOMPRESSED is set as the object default here so that if + /// WriteProperties::Defaults() is not used, the default constructor for + /// WriteProperties will work regardless of the options used to build the C++ + /// project. + Compression::type compression = Compression::UNCOMPRESSED; + + /// Compressor-specific compression level + int compression_level = ::arrow::util::kUseDefaultCompressionLevel; +}; + +ARROW_EXPORT +Status WriteTable(const Table& table, io::OutputStream* dst, + const WriteProperties& properties = WriteProperties::Defaults()); + +} // namespace feather +} // namespace ipc +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h new file mode 100644 index 0000000000000000000000000000000000000000..3a730ee6a3f1963e2f7a486f8fac3ab4472ddf74 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Implement a simple JSON representation format for arrays + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; + +namespace ipc { +namespace internal { +namespace json { + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + const std::string& json); + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + std::string_view json); + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + const char* json); + +ARROW_EXPORT +Status ChunkedArrayFromJSON(const std::shared_ptr& type, + const std::vector& json_strings, + std::shared_ptr* out); + +ARROW_EXPORT +Status DictArrayFromJSON(const std::shared_ptr&, std::string_view indices_json, + std::string_view dictionary_json, std::shared_ptr* out); + +ARROW_EXPORT +Status ScalarFromJSON(const std::shared_ptr&, std::string_view json, + std::shared_ptr* out); + +ARROW_EXPORT +Status DictScalarFromJSON(const std::shared_ptr&, std::string_view index_json, + std::string_view dictionary_json, std::shared_ptr* out); + +} // namespace json +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h new file mode 100644 index 0000000000000000000000000000000000000000..1cd72ce993ed28ddfd1f894af35eeefbbdce6050 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h @@ -0,0 +1,565 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// C++ object model and user API for interprocess schema messaging + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace ipc { + +struct IpcWriteOptions; + +// Read interface classes. We do not fully deserialize the flatbuffers so that +// individual fields metadata can be retrieved from very large schema without +// + +/// \class Message +/// \brief An IPC message including metadata and body +class ARROW_EXPORT Message { + public: + /// \brief Construct message, but do not validate + /// + /// Use at your own risk; Message::Open has more metadata validation + Message(std::shared_ptr metadata, std::shared_ptr body); + + ~Message(); + + /// \brief Create and validate a Message instance from two buffers + /// + /// \param[in] metadata a buffer containing the Flatbuffer metadata + /// \param[in] body a buffer containing the message body, which may be null + /// \return the created message + static Result> Open(std::shared_ptr metadata, + std::shared_ptr body); + + /// \brief Read message body and create Message given Flatbuffer metadata + /// \param[in] metadata containing a serialized Message flatbuffer + /// \param[in] stream an InputStream + /// \return the created Message + /// + /// \note If stream supports zero-copy, this is zero-copy + static Result> ReadFrom(std::shared_ptr metadata, + io::InputStream* stream); + + /// \brief Read message body from position in file, and create Message given + /// the Flatbuffer metadata + /// \param[in] offset the position in the file where the message body starts. + /// \param[in] metadata containing a serialized Message flatbuffer + /// \param[in] file the seekable file interface to read from + /// \return the created Message + /// + /// \note If file supports zero-copy, this is zero-copy + static Result> ReadFrom(const int64_t offset, + std::shared_ptr metadata, + io::RandomAccessFile* file); + + /// \brief Return true if message type and contents are equal + /// + /// \param other another message + /// \return true if contents equal + bool Equals(const Message& other) const; + + /// \brief the Message metadata + /// + /// \return buffer + std::shared_ptr metadata() const; + + /// \brief Custom metadata serialized in metadata Flatbuffer. Returns nullptr + /// when none set + const std::shared_ptr& custom_metadata() const; + + /// \brief the Message body, if any + /// + /// \return buffer is null if no body + std::shared_ptr body() const; + + /// \brief The expected body length according to the metadata, for + /// verification purposes + int64_t body_length() const; + + /// \brief The Message type + MessageType type() const; + + /// \brief The Message metadata version + MetadataVersion metadata_version() const; + + const void* header() const; + + /// \brief Write length-prefixed metadata and body to output stream + /// + /// \param[in] file output stream to write to + /// \param[in] options IPC writing options including alignment + /// \param[out] output_length the number of bytes written + /// \return Status + Status SerializeTo(io::OutputStream* file, const IpcWriteOptions& options, + int64_t* output_length) const; + + /// \brief Return true if the Message metadata passes Flatbuffer validation + bool Verify() const; + + /// \brief Whether a given message type needs a body. + static bool HasBody(MessageType type) { + return type != MessageType::NONE && type != MessageType::SCHEMA; + } + + private: + // Hide serialization details from user API + class MessageImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(Message); +}; + +ARROW_EXPORT std::string FormatMessageType(MessageType type); + +/// \class MessageDecoderListener +/// \brief An abstract class to listen events from MessageDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT MessageDecoderListener { + public: + virtual ~MessageDecoderListener() = default; + + /// \brief Called when a message is decoded. + /// + /// MessageDecoder calls this method when it decodes a message. This + /// method is called multiple times when the target stream has + /// multiple messages. + /// + /// \param[in] message a decoded message + /// \return Status + virtual Status OnMessageDecoded(std::unique_ptr message) = 0; + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::INITIAL. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnInitial(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::METADATA_LENGTH. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnMetadataLength(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::METADATA. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnMetadata(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::BODY. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnBody(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::EOS. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnEOS(); +}; + +/// \class AssignMessageDecoderListener +/// \brief Assign a message decoded by MessageDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT AssignMessageDecoderListener : public MessageDecoderListener { + public: + /// \brief Construct a listener that assigns a decoded message to the + /// specified location. + /// + /// \param[in] message a location to store the received message + explicit AssignMessageDecoderListener(std::unique_ptr* message) + : message_(message) {} + + virtual ~AssignMessageDecoderListener() = default; + + Status OnMessageDecoded(std::unique_ptr message) override { + *message_ = std::move(message); + return Status::OK(); + } + + private: + std::unique_ptr* message_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(AssignMessageDecoderListener); +}; + +/// \class MessageDecoder +/// \brief Push style message decoder that receives data from user. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT MessageDecoder { + public: + /// \brief State for reading a message + enum State { + /// The initial state. It requires one of the followings as the next data: + /// + /// * int32_t continuation token + /// * int32_t end-of-stream mark (== 0) + /// * int32_t metadata length (backward compatibility for + /// reading old IPC messages produced prior to version 0.15.0 + INITIAL, + + /// It requires int32_t metadata length. + METADATA_LENGTH, + + /// It requires metadata. + METADATA, + + /// It requires message body. + BODY, + + /// The end-of-stream state. No more data is processed. + EOS, + }; + + /// \brief Construct a message decoder. + /// + /// \param[in] listener a MessageDecoderListener that responds events from + /// the decoder + /// \param[in] pool an optional MemoryPool to copy metadata on the + /// \param[in] skip_body if true the body will be skipped even if the message has a body + /// CPU, if required + explicit MessageDecoder(std::shared_ptr listener, + MemoryPool* pool = default_memory_pool(), + bool skip_body = false); + + /// \brief Construct a message decoder with the specified state. + /// + /// This is a construct for advanced users that know how to decode + /// Message. + /// + /// \param[in] listener a MessageDecoderListener that responds events from + /// the decoder + /// \param[in] initial_state an initial state of the decode + /// \param[in] initial_next_required_size the number of bytes needed + /// to run the next action + /// \param[in] pool an optional MemoryPool to copy metadata on the + /// CPU, if required + /// \param[in] skip_body if true the body will be skipped even if the message has a body + MessageDecoder(std::shared_ptr listener, State initial_state, + int64_t initial_next_required_size, + MemoryPool* pool = default_memory_pool(), bool skip_body = false); + + virtual ~MessageDecoder(); + + /// \brief Feed data to the decoder as a raw data. + /// + /// If the decoder can decode one or more messages by the data, the + /// decoder calls listener->OnMessageDecoded() with a decoded + /// message multiple times. + /// + /// If the state of the decoder is changed, corresponding callbacks + /// on listener is called: + /// + /// * MessageDecoder::State::INITIAL: listener->OnInitial() + /// * MessageDecoder::State::METADATA_LENGTH: listener->OnMetadataLength() + /// * MessageDecoder::State::METADATA: listener->OnMetadata() + /// * MessageDecoder::State::BODY: listener->OnBody() + /// * MessageDecoder::State::EOS: listener->OnEOS() + /// + /// \param[in] data a raw data to be processed. This data isn't + /// copied. The passed memory must be kept alive through message + /// processing. + /// \param[in] size raw data size. + /// \return Status + Status Consume(const uint8_t* data, int64_t size); + + /// \brief Feed data to the decoder as a Buffer. + /// + /// If the decoder can decode one or more messages by the Buffer, + /// the decoder calls listener->OnMessageDecoded() with a decoded + /// message multiple times. + /// + /// \param[in] buffer a Buffer to be processed. + /// \return Status + Status Consume(std::shared_ptr buffer); + + /// \brief Return the number of bytes needed to advance the state of + /// the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Here is an example usage for normal users: + /// + /// ~~~{.cpp} + /// decoder.Consume(buffer1); + /// decoder.Consume(buffer2); + /// decoder.Consume(buffer3); + /// ~~~ + /// + /// Decoder has internal buffer. If consumed data isn't enough to + /// advance the state of the decoder, consumed data is buffered to + /// the internal buffer. It causes performance overhead. + /// + /// If you pass next_required_size() size data to each Consume() + /// call, the decoder doesn't use its internal buffer. It improves + /// performance. + /// + /// Here is an example usage to avoid using internal buffer: + /// + /// ~~~{.cpp} + /// buffer1 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer1); + /// buffer2 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer2); + /// ~~~ + /// + /// Users can use this method to avoid creating small + /// chunks. Message body must be contiguous data. If users pass + /// small chunks to the decoder, the decoder needs concatenate small + /// chunks internally. It causes performance overhead. + /// + /// Here is an example usage to reduce small chunks: + /// + /// ~~~{.cpp} + /// buffer = AllocateResizableBuffer(); + /// while ((small_chunk = get_data(&small_chunk_size))) { + /// auto current_buffer_size = buffer->size(); + /// buffer->Resize(current_buffer_size + small_chunk_size); + /// memcpy(buffer->mutable_data() + current_buffer_size, + /// small_chunk, + /// small_chunk_size); + /// if (buffer->size() < decoder.next_required_size()) { + /// continue; + /// } + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// buffer = AllocateResizableBuffer(); + /// } + /// if (buffer->size() > 0) { + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// } + /// ~~~ + /// + /// \return the number of bytes needed to advance the state of the + /// decoder + int64_t next_required_size() const; + + /// \brief Return the current state of the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Decoder doesn't need Buffer to process data on the + /// MessageDecoder::State::INITIAL state and the + /// MessageDecoder::State::METADATA_LENGTH. Creating Buffer has + /// performance overhead. Advanced users can avoid creating Buffer + /// by checking the current state of the decoder: + /// + /// ~~~{.cpp} + /// switch (decoder.state()) { + /// MessageDecoder::State::INITIAL: + /// MessageDecoder::State::METADATA_LENGTH: + /// { + /// uint8_t data[sizeof(int32_t)]; + /// auto data_size = input->Read(decoder.next_required_size(), data); + /// decoder.Consume(data, data_size); + /// } + /// break; + /// default: + /// { + /// auto buffer = input->Read(decoder.next_required_size()); + /// decoder.Consume(buffer); + /// } + /// break; + /// } + /// ~~~ + /// + /// \return the current state + State state() const; + + private: + class MessageDecoderImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(MessageDecoder); +}; + +/// \brief Abstract interface for a sequence of messages +/// \since 0.5.0 +class ARROW_EXPORT MessageReader { + public: + virtual ~MessageReader() = default; + + /// \brief Create MessageReader that reads from InputStream + static std::unique_ptr Open(io::InputStream* stream); + + /// \brief Create MessageReader that reads from owned InputStream + static std::unique_ptr Open( + const std::shared_ptr& owned_stream); + + /// \brief Read next Message from the interface + /// + /// \return an arrow::ipc::Message instance + virtual Result> ReadNextMessage() = 0; +}; + +// the first parameter of the function should be a pointer to metadata (aka. +// org::apache::arrow::flatbuf::RecordBatch*) +using FieldsLoaderFunction = std::function; + +/// \brief Read encapsulated RPC message from position in file +/// +/// Read a length-prefixed message flatbuffer starting at the indicated file +/// offset. If the message has a body with non-zero length, it will also be +/// read +/// +/// The metadata_length includes at least the length prefix and the flatbuffer +/// +/// \param[in] offset the position in the file where the message starts. The +/// first 4 bytes after the offset are the message length +/// \param[in] metadata_length the total number of bytes to read from file +/// \param[in] file the seekable file interface to read from +/// \param[in] fields_loader the function for loading subset of fields from the given file +/// \return the message read + +ARROW_EXPORT +Result> ReadMessage( + const int64_t offset, const int32_t metadata_length, io::RandomAccessFile* file, + const FieldsLoaderFunction& fields_loader = {}); + +/// \brief Read encapsulated RPC message from cached buffers +/// +/// The buffers should contain an entire message. Partial reads are not handled. +/// +/// This method can be used to read just the metadata by passing in a nullptr for the +/// body. The body will then be skipped and the body size will not be validated. +/// +/// If the body buffer is provided then it must be the complete body buffer +/// +/// This is similar to Message::Open but performs slightly more validation (e.g. checks +/// to see that the metadata length is correct and that the body is the size the metadata +/// expected) +/// +/// \param metadata The bytes for the metadata +/// \param body The bytes for the body +/// \return The message represented by the buffers +ARROW_EXPORT Result> ReadMessage( + std::shared_ptr metadata, std::shared_ptr body); + +ARROW_EXPORT +Future> ReadMessageAsync( + const int64_t offset, const int32_t metadata_length, const int64_t body_length, + io::RandomAccessFile* file, const io::IOContext& context = io::default_io_context()); + +/// \brief Advance stream to an 8-byte offset if its position is not a multiple +/// of 8 already +/// \param[in] stream an input stream +/// \param[in] alignment the byte multiple for the metadata prefix, usually 8 +/// or 64, to ensure the body starts on a multiple of that alignment +/// \return Status +ARROW_EXPORT +Status AlignStream(io::InputStream* stream, int32_t alignment = 8); + +/// \brief Advance stream to an 8-byte offset if its position is not a multiple +/// of 8 already +/// \param[in] stream an output stream +/// \param[in] alignment the byte multiple for the metadata prefix, usually 8 +/// or 64, to ensure the body starts on a multiple of that alignment +/// \return Status +ARROW_EXPORT +Status AlignStream(io::OutputStream* stream, int32_t alignment = 8); + +/// \brief Return error Status if file position is not a multiple of the +/// indicated alignment +ARROW_EXPORT +Status CheckAligned(io::FileInterface* stream, int32_t alignment = 8); + +/// \brief Read encapsulated IPC message (metadata and body) from InputStream +/// +/// Returns null if there are not enough bytes available or the +/// message length is 0 (e.g. EOS in a stream) +/// +/// \param[in] stream an input stream +/// \param[in] pool an optional MemoryPool to copy metadata on the CPU, if required +/// \return Message +ARROW_EXPORT +Result> ReadMessage(io::InputStream* stream, + MemoryPool* pool = default_memory_pool()); + +/// \brief Feed data from InputStream to MessageDecoder to decode an +/// encapsulated IPC message (metadata and body) +/// +/// This API is EXPERIMENTAL. +/// +/// \param[in] decoder a decoder +/// \param[in] stream an input stream +/// \return Status +/// +/// \since 0.17.0 +ARROW_EXPORT +Status DecodeMessage(MessageDecoder* decoder, io::InputStream* stream); + +/// Write encapsulated IPC message Does not make assumptions about +/// whether the stream is aligned already. Can write legacy (pre +/// version 0.15.0) IPC message if option set +/// +/// continuation: 0xFFFFFFFF +/// message_size: int32 +/// message: const void* +/// padding +/// +/// +/// \param[in] message a buffer containing the metadata to write +/// \param[in] options IPC writing options, including alignment and +/// legacy message support +/// \param[in,out] file the OutputStream to write to +/// \param[out] message_length the total size of the payload written including +/// padding +/// \return Status +Status WriteMessage(const Buffer& message, const IpcWriteOptions& options, + io::OutputStream* file, int32_t* message_length); + +} // namespace ipc +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h new file mode 100644 index 0000000000000000000000000000000000000000..48b6758212bd5370aa2ff48f095080c92f60b086 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compression.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; + +namespace ipc { + +// ARROW-109: We set this number arbitrarily to help catch user mistakes. For +// deeply nested schemas, it is expected the user will indicate explicitly the +// maximum allowed recursion depth +constexpr int kMaxNestingDepth = 64; + +/// \brief Options for writing Arrow IPC messages +struct ARROW_EXPORT IpcWriteOptions { + /// \brief If true, allow field lengths that don't fit in a signed 32-bit int. + /// + /// Some implementations may not be able to parse streams created with this option. + bool allow_64bit = false; + + /// \brief The maximum permitted schema nesting depth. + int max_recursion_depth = kMaxNestingDepth; + + /// \brief Write padding after memory buffers up to this multiple of bytes. + int32_t alignment = 8; + + /// \brief Write the pre-0.15.0 IPC message format + /// + /// This legacy format consists of a 4-byte prefix instead of 8-byte. + bool write_legacy_ipc_format = false; + + /// \brief The memory pool to use for allocations made during IPC writing + /// + /// While Arrow IPC is predominantly zero-copy, it may have to allocate + /// memory in some cases (for example if compression is enabled). + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief Compression codec to use for record batch body buffers + /// + /// May only be UNCOMPRESSED, LZ4_FRAME and ZSTD. + std::shared_ptr codec; + + /// \brief Minimum space savings percentage required for compression to be applied + /// + /// Space savings is calculated as (1.0 - compressed_size / uncompressed_size). + /// + /// For example, if min_space_savings = 0.1, a 100-byte body buffer won't undergo + /// compression if its expected compressed size exceeds 90 bytes. If this option is + /// unset, compression will be used indiscriminately. If no codec was supplied, this + /// option is ignored. + /// + /// Values outside of the range [0,1] are handled as errors. + /// + /// Note that enabling this option may result in unreadable data for Arrow C++ versions + /// prior to 12.0.0. + std::optional min_space_savings; + + /// \brief Use global CPU thread pool to parallelize any computational tasks + /// like compression + bool use_threads = true; + + /// \brief Whether to emit dictionary deltas + /// + /// If false, a changed dictionary for a given field will emit a full + /// dictionary replacement. + /// If true, a changed dictionary will be compared against the previous + /// version. If possible, a dictionary delta will be emitted, otherwise + /// a full dictionary replacement. + /// + /// Default is false to maximize stream compatibility. + /// + /// Also, note that if a changed dictionary is a nested dictionary, + /// then a delta is never emitted, for compatibility with the read path. + bool emit_dictionary_deltas = false; + + /// \brief Whether to unify dictionaries for the IPC file format + /// + /// The IPC file format doesn't support dictionary replacements. + /// Therefore, chunks of a column with a dictionary type must have the same + /// dictionary in each record batch (or an extended dictionary + delta). + /// + /// If this option is true, RecordBatchWriter::WriteTable will attempt + /// to unify dictionaries across each table column. If this option is + /// false, incompatible dictionaries across a table column will simply + /// raise an error. + /// + /// Note that enabling this option has a runtime cost. Also, not all types + /// currently support dictionary unification. + /// + /// This option is ignored for IPC streams, which support dictionary replacement + /// and deltas. + bool unify_dictionaries = false; + + /// \brief Format version to use for IPC messages and their metadata. + /// + /// Presently using V5 version (readable by 1.0.0 and later). + /// V4 is also available (readable by 0.8.0 and later). + MetadataVersion metadata_version = MetadataVersion::V5; + + static IpcWriteOptions Defaults(); +}; + +/// \brief Options for reading Arrow IPC messages +struct ARROW_EXPORT IpcReadOptions { + /// \brief The maximum permitted schema nesting depth. + int max_recursion_depth = kMaxNestingDepth; + + /// \brief The memory pool to use for allocations made during IPC reading + /// + /// While Arrow IPC is predominantly zero-copy, it may have to allocate + /// memory in some cases (for example if compression is enabled). + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief Top-level schema fields to include when deserializing RecordBatch. + /// + /// If empty (the default), return all deserialized fields. + /// If non-empty, the values are the indices of fields in the top-level schema. + std::vector included_fields; + + /// \brief Use global CPU thread pool to parallelize any computational tasks + /// like decompression + bool use_threads = true; + + /// \brief Whether to convert incoming data to platform-native endianness + /// + /// If the endianness of the received schema is not equal to platform-native + /// endianness, then all buffers with endian-sensitive data will be byte-swapped. + /// This includes the value buffers of numeric types, temporal types, decimal + /// types, as well as the offset buffers of variable-sized binary and list-like + /// types. + /// + /// Endianness conversion is achieved by the RecordBatchFileReader, + /// RecordBatchStreamReader and StreamDecoder classes. + bool ensure_native_endian = true; + + /// \brief Options to control caching behavior when pre-buffering is requested + /// + /// The lazy property will always be reset to true to deliver the expected behavior + io::CacheOptions pre_buffer_cache_options = io::CacheOptions::LazyDefaults(); + + static IpcReadOptions Defaults(); +}; + +namespace internal { + +Status CheckCompressionSupported(Compression::type codec); + +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..4e0ee3dfc8b440218e53b68a110f878cabeebc09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h @@ -0,0 +1,475 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Implement Arrow streaming binary format + +#pragma once + +#include +#include +#include + +#include "arrow/ipc/dictionary.h" // IWYU pragma: export +#include "arrow/ipc/message.h" +#include "arrow/ipc/options.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class Buffer; +class MemoryManager; +class MemoryPool; +class RecordBatch; +class Schema; +class Status; +class Table; +class Tensor; +class SparseTensor; + +namespace io { + +class OutputStream; + +} // namespace io + +namespace ipc { + +/// \brief Intermediate data structure with metadata header, and zero +/// or more buffers for the message body. +struct IpcPayload { + MessageType type = MessageType::NONE; + std::shared_ptr metadata; + std::vector> body_buffers; + std::vector variadic_buffer_counts; + int64_t body_length = 0; // serialized body length (padded, maybe compressed) + int64_t raw_body_length = 0; // initial uncompressed body length +}; + +struct WriteStats { + /// Number of IPC messages written. + int64_t num_messages = 0; + /// Number of record batches written. + int64_t num_record_batches = 0; + /// Number of dictionary batches written. + /// + /// Note: num_dictionary_batches >= num_dictionary_deltas + num_replaced_dictionaries + int64_t num_dictionary_batches = 0; + + /// Number of dictionary deltas written. + int64_t num_dictionary_deltas = 0; + /// Number of replaced dictionaries (i.e. where a dictionary batch replaces + /// an existing dictionary with an unrelated new dictionary). + int64_t num_replaced_dictionaries = 0; + + /// Total size in bytes of record batches emitted. + /// The "raw" size counts the original buffer sizes, while the "serialized" size + /// includes padding and (optionally) compression. + int64_t total_raw_body_size = 0; + int64_t total_serialized_body_size = 0; +}; + +/// \class RecordBatchWriter +/// \brief Abstract interface for writing a stream of record batches +class ARROW_EXPORT RecordBatchWriter { + public: + virtual ~RecordBatchWriter(); + + /// \brief Write a record batch to the stream + /// + /// \param[in] batch the record batch to write to the stream + /// \return Status + virtual Status WriteRecordBatch(const RecordBatch& batch) = 0; + + /// \brief Write a record batch with custom metadata to the stream + /// + /// \param[in] batch the record batch to write to the stream + /// \param[in] custom_metadata the record batch's custom metadata to write to the stream + /// \return Status + virtual Status WriteRecordBatch( + const RecordBatch& batch, + const std::shared_ptr& custom_metadata); + + /// \brief Write possibly-chunked table by creating sequence of record batches + /// \param[in] table table to write + /// \return Status + Status WriteTable(const Table& table); + + /// \brief Write Table with a particular chunksize + /// \param[in] table table to write + /// \param[in] max_chunksize maximum length of table chunks. To indicate + /// that no maximum should be enforced, pass -1. + /// \return Status + virtual Status WriteTable(const Table& table, int64_t max_chunksize); + + /// \brief Perform any logic necessary to finish the stream + /// + /// \return Status + virtual Status Close() = 0; + + /// \brief Return current write statistics + virtual WriteStats stats() const = 0; +}; + +/// \defgroup record-batch-writer-factories Functions for creating RecordBatchWriter +/// instances +/// +/// @{ + +/// Create a new IPC stream writer from stream sink and schema. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeStreamWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC stream writer from stream sink and schema. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeStreamWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC file writer from stream sink and schema +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Result> +ARROW_EXPORT +Result> MakeFileWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// Create a new IPC file writer from stream sink and schema +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Result> +ARROW_EXPORT +Result> MakeFileWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// @} + +/// \brief Low-level API for writing a record batch (without schema) +/// to an OutputStream as encapsulated IPC message. See Arrow format +/// documentation for more detail. +/// +/// \param[in] batch the record batch to write +/// \param[in] buffer_start_offset the start offset to use in the buffer metadata, +/// generally should be 0 +/// \param[in] dst an OutputStream +/// \param[out] metadata_length the size of the length-prefixed flatbuffer +/// including padding to a 64-byte boundary +/// \param[out] body_length the size of the contiguous buffer block plus +/// \param[in] options options for serialization +/// \return Status +ARROW_EXPORT +Status WriteRecordBatch(const RecordBatch& batch, int64_t buffer_start_offset, + io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length, const IpcWriteOptions& options); + +/// \brief Serialize record batch as encapsulated IPC message in a new buffer +/// +/// \param[in] batch the record batch +/// \param[in] options the IpcWriteOptions to use for serialization +/// \return the serialized message +ARROW_EXPORT +Result> SerializeRecordBatch(const RecordBatch& batch, + const IpcWriteOptions& options); + +/// \brief Serialize record batch as encapsulated IPC message in a new buffer +/// +/// \param[in] batch the record batch +/// \param[in] mm a MemoryManager to allocate memory from +/// \return the serialized message +ARROW_EXPORT +Result> SerializeRecordBatch(const RecordBatch& batch, + std::shared_ptr mm); + +/// \brief Write record batch to OutputStream +/// +/// \param[in] batch the record batch to write +/// \param[in] options the IpcWriteOptions to use for serialization +/// \param[in] out the OutputStream to write the output to +/// \return Status +/// +/// If writing to pre-allocated memory, you can use +/// arrow::ipc::GetRecordBatchSize to compute how much space is required +ARROW_EXPORT +Status SerializeRecordBatch(const RecordBatch& batch, const IpcWriteOptions& options, + io::OutputStream* out); + +/// \brief Serialize schema as encapsulated IPC message +/// +/// \param[in] schema the schema to write +/// \param[in] pool a MemoryPool to allocate memory from +/// \return the serialized schema +ARROW_EXPORT +Result> SerializeSchema(const Schema& schema, + MemoryPool* pool = default_memory_pool()); + +/// \brief Write multiple record batches to OutputStream, including schema +/// \param[in] batches a vector of batches. Must all have same schema +/// \param[in] options options for serialization +/// \param[out] dst an OutputStream +/// \return Status +ARROW_EXPORT +Status WriteRecordBatchStream(const std::vector>& batches, + const IpcWriteOptions& options, io::OutputStream* dst); + +/// \brief Compute the number of bytes needed to write an IPC payload +/// including metadata +/// +/// \param[in] payload the IPC payload to write +/// \param[in] options write options +/// \return the size of the complete encapsulated message +ARROW_EXPORT +int64_t GetPayloadSize(const IpcPayload& payload, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// \brief Compute the number of bytes needed to write a record batch including metadata +/// +/// \param[in] batch the record batch to write +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetRecordBatchSize(const RecordBatch& batch, int64_t* size); + +/// \brief Compute the number of bytes needed to write a record batch including metadata +/// +/// \param[in] batch the record batch to write +/// \param[in] options options for serialization +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetRecordBatchSize(const RecordBatch& batch, const IpcWriteOptions& options, + int64_t* size); + +/// \brief Compute the number of bytes needed to write a tensor including metadata +/// +/// \param[in] tensor the tensor to write +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetTensorSize(const Tensor& tensor, int64_t* size); + +/// \brief EXPERIMENTAL: Convert arrow::Tensor to a Message with minimal memory +/// allocation +/// +/// \param[in] tensor the Tensor to write +/// \param[in] pool MemoryPool to allocate space for metadata +/// \return the resulting Message +ARROW_EXPORT +Result> GetTensorMessage(const Tensor& tensor, MemoryPool* pool); + +/// \brief Write arrow::Tensor as a contiguous message. +/// +/// The metadata and body are written assuming 64-byte alignment. It is the +/// user's responsibility to ensure that the OutputStream has been aligned +/// to a 64-byte multiple before writing the message. +/// +/// The message is written out as followed: +/// \code +/// +/// \endcode +/// +/// \param[in] tensor the Tensor to write +/// \param[in] dst the OutputStream to write to +/// \param[out] metadata_length the actual metadata length, including padding +/// \param[out] body_length the actual message body length +/// \return Status +ARROW_EXPORT +Status WriteTensor(const Tensor& tensor, io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length); + +/// \brief EXPERIMENTAL: Convert arrow::SparseTensor to a Message with minimal memory +/// allocation +/// +/// The message is written out as followed: +/// \code +/// +/// \endcode +/// +/// \param[in] sparse_tensor the SparseTensor to write +/// \param[in] pool MemoryPool to allocate space for metadata +/// \return the resulting Message +ARROW_EXPORT +Result> GetSparseTensorMessage(const SparseTensor& sparse_tensor, + MemoryPool* pool); + +/// \brief EXPERIMENTAL: Write arrow::SparseTensor as a contiguous message. The metadata, +/// sparse index, and body are written assuming 64-byte alignment. It is the +/// user's responsibility to ensure that the OutputStream has been aligned +/// to a 64-byte multiple before writing the message. +/// +/// \param[in] sparse_tensor the SparseTensor to write +/// \param[in] dst the OutputStream to write to +/// \param[out] metadata_length the actual metadata length, including padding +/// \param[out] body_length the actual message body length +/// \return Status +ARROW_EXPORT +Status WriteSparseTensor(const SparseTensor& sparse_tensor, io::OutputStream* dst, + int32_t* metadata_length, int64_t* body_length); + +/// \brief Compute IpcPayload for the given schema +/// \param[in] schema the Schema that is being serialized +/// \param[in] options options for serialization +/// \param[in] mapper object mapping dictionary fields to dictionary ids +/// \param[out] out the returned vector of IpcPayloads +/// \return Status +ARROW_EXPORT +Status GetSchemaPayload(const Schema& schema, const IpcWriteOptions& options, + const DictionaryFieldMapper& mapper, IpcPayload* out); + +/// \brief Compute IpcPayload for a dictionary +/// \param[in] id the dictionary id +/// \param[in] dictionary the dictionary values +/// \param[in] options options for serialization +/// \param[out] payload the output IpcPayload +/// \return Status +ARROW_EXPORT +Status GetDictionaryPayload(int64_t id, const std::shared_ptr& dictionary, + const IpcWriteOptions& options, IpcPayload* payload); + +/// \brief Compute IpcPayload for a dictionary +/// \param[in] id the dictionary id +/// \param[in] is_delta whether the dictionary is a delta dictionary +/// \param[in] dictionary the dictionary values +/// \param[in] options options for serialization +/// \param[out] payload the output IpcPayload +/// \return Status +ARROW_EXPORT +Status GetDictionaryPayload(int64_t id, bool is_delta, + const std::shared_ptr& dictionary, + const IpcWriteOptions& options, IpcPayload* payload); + +/// \brief Compute IpcPayload for the given record batch +/// \param[in] batch the RecordBatch that is being serialized +/// \param[in] options options for serialization +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetRecordBatchPayload(const RecordBatch& batch, const IpcWriteOptions& options, + IpcPayload* out); + +/// \brief Compute IpcPayload for the given record batch and custom metadata +/// \param[in] batch the RecordBatch that is being serialized +/// \param[in] custom_metadata the custom metadata to be serialized with the record batch +/// \param[in] options options for serialization +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetRecordBatchPayload( + const RecordBatch& batch, + const std::shared_ptr& custom_metadata, + const IpcWriteOptions& options, IpcPayload* out); + +/// \brief Write an IPC payload to the given stream. +/// \param[in] payload the payload to write +/// \param[in] options options for serialization +/// \param[in] dst The stream to write the payload to. +/// \param[out] metadata_length the length of the serialized metadata +/// \return Status +ARROW_EXPORT +Status WriteIpcPayload(const IpcPayload& payload, const IpcWriteOptions& options, + io::OutputStream* dst, int32_t* metadata_length); + +/// \brief Compute IpcPayload for the given sparse tensor +/// \param[in] sparse_tensor the SparseTensor that is being serialized +/// \param[in,out] pool for any required temporary memory allocations +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetSparseTensorPayload(const SparseTensor& sparse_tensor, MemoryPool* pool, + IpcPayload* out); + +namespace internal { + +// These internal APIs may change without warning or deprecation + +class ARROW_EXPORT IpcPayloadWriter { + public: + virtual ~IpcPayloadWriter(); + + // Default implementation is a no-op + virtual Status Start(); + + virtual Status WritePayload(const IpcPayload& payload) = 0; + + virtual Status Close() = 0; +}; + +/// Create a new IPC payload stream writer from stream sink. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakePayloadStreamWriter( + io::OutputStream* sink, const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC payload file writer from stream sink. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Status +ARROW_EXPORT +Result> MakePayloadFileWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// Create a new RecordBatchWriter from IpcPayloadWriter and schema. +/// +/// The format is implicitly the IPC stream format (allowing dictionary +/// replacement and deltas). +/// +/// \param[in] sink the IpcPayloadWriter to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> OpenRecordBatchWriter( + std::unique_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h new file mode 100644 index 0000000000000000000000000000000000000000..47b56684b5af7f383e6e2acee014dde6ba40d11d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/json/options.h" +#include "arrow/json/reader.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..93b327bf3ae2b63bc4439d77440b54d10e45810a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { + +class PromotionGraph; + +class ARROW_EXPORT ChunkedArrayBuilder { + public: + virtual ~ChunkedArrayBuilder() = default; + + /// Spawn a task that will try to convert and insert the given JSON block + virtual void Insert(int64_t block_index, + const std::shared_ptr& unconverted_field, + const std::shared_ptr& unconverted) = 0; + + /// Return the final chunked array. + /// Every chunk must be inserted before this is called! + virtual Status Finish(std::shared_ptr* out) = 0; + + /// Finish current task group and substitute a new one + virtual Status ReplaceTaskGroup( + const std::shared_ptr& task_group) = 0; + + protected: + explicit ChunkedArrayBuilder( + const std::shared_ptr& task_group) + : task_group_(task_group) {} + + std::shared_ptr task_group_; +}; + +/// create a chunked builder +/// +/// if unexpected fields and promotion need to be handled, promotion_graph must be +/// non-null +ARROW_EXPORT Status MakeChunkedArrayBuilder( + const std::shared_ptr& task_group, MemoryPool* pool, + const PromotionGraph* promotion_graph, const std::shared_ptr& type, + std::shared_ptr* out); + +} // namespace json +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h new file mode 100644 index 0000000000000000000000000000000000000000..9ed85126da1412774bc216737b7f4abc3795815c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/delimiting.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { + +struct ParseOptions; + +ARROW_EXPORT +std::unique_ptr MakeChunker(const ParseOptions& options); + +} // namespace json +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..9a812dd3c3afaec0ccc36f3bb72fa2d1a459f4e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; +class Field; +class MemoryPool; + +namespace json { + +/// \brief interface for conversion of Arrays +/// +/// Converters are not required to be correct for arbitrary input- only +/// for unconverted arrays emitted by a corresponding parser. +class ARROW_EXPORT Converter { + public: + virtual ~Converter() = default; + + /// convert an array + /// on failure, this converter may be promoted to another converter which + /// *can* convert the given input. + virtual Status Convert(const std::shared_ptr& in, + std::shared_ptr* out) = 0; + + std::shared_ptr out_type() const { return out_type_; } + + MemoryPool* pool() { return pool_; } + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Converter); + + Converter(MemoryPool* pool, const std::shared_ptr& out_type) + : pool_(pool), out_type_(out_type) {} + + MemoryPool* pool_; + std::shared_ptr out_type_; +}; + +/// \brief produce a single converter to the specified out_type +ARROW_EXPORT Status MakeConverter(const std::shared_ptr& out_type, + MemoryPool* pool, std::shared_ptr* out); + +class ARROW_EXPORT PromotionGraph { + public: + virtual ~PromotionGraph() = default; + + /// \brief produce a valid field which will be inferred as null + virtual std::shared_ptr Null(const std::string& name) const = 0; + + /// \brief given an unexpected field encountered during parsing, return a type to which + /// it may be convertible (may return null if none is available) + virtual std::shared_ptr Infer( + const std::shared_ptr& unexpected_field) const = 0; + + /// \brief given a type to which conversion failed, return a promoted type to which + /// conversion may succeed (may return null if none is available) + virtual std::shared_ptr Promote( + const std::shared_ptr& failed, + const std::shared_ptr& unexpected_field) const = 0; + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(PromotionGraph); + PromotionGraph() = default; +}; + +ARROW_EXPORT const PromotionGraph* GetPromotionGraph(); + +} // namespace json +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..b15b09dbdacfcb56cd4b37e0eb144d9ebe46c915 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace json { +namespace internal { + +/// This class is a helper to serialize a json object to a string. +/// It uses rapidjson in implementation. +class ARROW_EXPORT ObjectWriter { + public: + ObjectWriter(); + ~ObjectWriter(); + + void SetString(std::string_view key, std::string_view value); + void SetBool(std::string_view key, bool value); + + std::string Serialize(); + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace json +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h new file mode 100644 index 0000000000000000000000000000000000000000..d7edab9ceddb4d4e2d5c79b8652d7d47d0557b55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/json/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class DataType; +class Schema; + +namespace json { + +enum class UnexpectedFieldBehavior : char { + /// Unexpected JSON fields are ignored + Ignore, + /// Unexpected JSON fields error out + Error, + /// Unexpected JSON fields are type-inferred and included in the output + InferType +}; + +struct ARROW_EXPORT ParseOptions { + // Parsing options + + /// Optional explicit schema (disables type inference on those fields) + std::shared_ptr explicit_schema; + + /// Whether objects may be printed across multiple lines (for example pretty-printed) + /// + /// If true, parsing may be slower. + bool newlines_in_values = false; + + /// How JSON fields outside of explicit_schema (if given) are treated + UnexpectedFieldBehavior unexpected_field_behavior = UnexpectedFieldBehavior::InferType; + + /// Create parsing options with default values + static ParseOptions Defaults(); +}; + +struct ARROW_EXPORT ReadOptions { + // Reader options + + /// Whether to use the global CPU thread pool + bool use_threads = true; + /// Block size we request from the IO layer; also determines the size of + /// chunks when use_threads is true + int32_t block_size = 1 << 20; // 1 MB + + /// Create read options with default values + static ReadOptions Defaults(); +}; + +} // namespace json +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h new file mode 100644 index 0000000000000000000000000000000000000000..aca416dbb7b5b4915cb8d1f74d932989cde286dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h @@ -0,0 +1,107 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/json/options.h" +#include "arrow/status.h" +#include "arrow/util/key_value_metadata.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class Buffer; +class MemoryPool; +class KeyValueMetadata; +class ResizableBuffer; + +namespace json { + +struct Kind { + enum type : uint8_t { + kNull, + kBoolean, + kNumber, + kString, + kArray, + kObject, + kNumberOrString + }; + + static const std::string& Name(Kind::type); + + static const std::shared_ptr& Tag(Kind::type); + + static Kind::type FromTag(const std::shared_ptr& tag); + + static Status ForType(const DataType& type, Kind::type* kind); +}; + +/// \class BlockParser +/// \brief A reusable block-based parser for JSON data +/// +/// The parser takes a block of newline delimited JSON data and extracts Arrays +/// of unconverted strings which can be fed to a Converter to obtain a usable Array. +/// +/// Note that in addition to parse errors (such as malformed JSON) some conversion +/// errors are caught at parse time: +/// - A null value in non-nullable column +/// - Change in the JSON kind of a column. For example, if an explicit schema is provided +/// which stipulates that field "a" is integral, a row of {"a": "not a number"} will +/// result in an error. This also applies to fields outside an explicit schema. +class ARROW_EXPORT BlockParser { + public: + virtual ~BlockParser() = default; + + /// \brief Reserve storage for scalars parsed from a block of json + virtual Status ReserveScalarStorage(int64_t nbytes) = 0; + + /// \brief Parse a block of data + virtual Status Parse(const std::shared_ptr& json) = 0; + + /// \brief Extract parsed data + virtual Status Finish(std::shared_ptr* parsed) = 0; + + /// \brief Return the number of parsed rows + int32_t num_rows() const { return num_rows_; } + + /// \brief Construct a BlockParser + /// + /// \param[in] pool MemoryPool to use when constructing parsed array + /// \param[in] options ParseOptions to use when parsing JSON + /// \param[out] out constructed BlockParser + static Status Make(MemoryPool* pool, const ParseOptions& options, + std::unique_ptr* out); + + static Status Make(const ParseOptions& options, std::unique_ptr* out); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(BlockParser); + + explicit BlockParser(MemoryPool* pool) : pool_(pool) {} + + MemoryPool* pool_; + int32_t num_rows_ = 0; +}; + +} // namespace json +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/rapidjson_defs.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/rapidjson_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..9ed81d000c555c0118acc2b71a1439c5db0abf3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/rapidjson_defs.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Include this file before including any RapidJSON headers. + +#pragma once + +#define RAPIDJSON_HAS_STDSTRING 1 +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 + +// rapidjson will be defined in namespace arrow::rapidjson +#define RAPIDJSON_NAMESPACE arrow::rapidjson +#define RAPIDJSON_NAMESPACE_BEGIN \ + namespace arrow { \ + namespace rapidjson { +#define RAPIDJSON_NAMESPACE_END \ + } \ + } + +// enable SIMD whitespace skipping, if available +#if defined(ARROW_HAVE_SSE4_2) +#define RAPIDJSON_SSE2 1 +#define RAPIDJSON_SSE42 1 +#endif + +#if defined(ARROW_HAVE_NEON) +#define RAPIDJSON_NEON 1 +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/test_common.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..2f819779bdb5940b081a2a41756d3a6510260476 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/test_common.h @@ -0,0 +1,330 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_binary.h" +#include "arrow/io/memory.h" +#include "arrow/json/converter.h" +#include "arrow/json/options.h" +#include "arrow/json/parser.h" +#include "arrow/json/rapidjson_defs.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/type.h" +#include "arrow/util/checked_cast.h" +#include "arrow/visit_type_inline.h" + +#include "rapidjson/document.h" +#include "rapidjson/prettywriter.h" +#include "rapidjson/reader.h" +#include "rapidjson/writer.h" + +namespace arrow { + +using internal::checked_cast; + +namespace json { + +namespace rj = arrow::rapidjson; + +using rj::StringBuffer; +using std::string_view; +using Writer = rj::Writer; + +struct GenerateOptions { + // Probability of a field being written + double field_probability = 1.0; + // Probability of a value being null + double null_probability = 0.2; + // Whether to randomize the order of written fields + bool randomize_field_order = false; + + static constexpr GenerateOptions Defaults() { return GenerateOptions{}; } +}; + +inline static Status OK(bool ok) { return ok ? Status::OK() : Status::Invalid(""); } + +template +inline static Status Generate( + const std::shared_ptr& type, Engine& e, Writer* writer, + const GenerateOptions& options = GenerateOptions::Defaults()); + +template +inline static Status Generate( + const std::vector>& fields, Engine& e, Writer* writer, + const GenerateOptions& options = GenerateOptions::Defaults()); + +template +inline static Status Generate( + const std::shared_ptr& schm, Engine& e, Writer* writer, + const GenerateOptions& options = GenerateOptions::Defaults()) { + return Generate(schm->fields(), e, writer, options); +} + +template +struct GenerateImpl { + Status Visit(const NullType&) { return OK(writer.Null()); } + + Status Visit(const BooleanType&) { + return OK(writer.Bool(std::uniform_int_distribution{}(e)&1)); + } + + template + enable_if_physical_unsigned_integer Visit(const T&) { + auto val = std::uniform_int_distribution<>{}(e); + return OK(writer.Uint64(static_cast(val))); + } + + template + enable_if_physical_signed_integer Visit(const T&) { + auto val = std::uniform_int_distribution<>{}(e); + return OK(writer.Int64(static_cast(val))); + } + + template + enable_if_physical_floating_point Visit(const T&) { + auto val = std::normal_distribution{0, 1 << 10}(e); + return OK(writer.Double(val)); + } + + Status GenerateAscii(const DataType&) { + auto size = std::poisson_distribution<>{4}(e); + std::uniform_int_distribution gen_char(32, 126); // FIXME generate UTF8 + std::string s(size, '\0'); + for (char& ch : s) ch = static_cast(gen_char(e)); + return OK(writer.String(s.c_str())); + } + + template + enable_if_base_binary Visit(const T& t) { + return GenerateAscii(t); + } + + Status Visit(const BinaryViewType& t) { return GenerateAscii(t); } + + template + enable_if_list_like Visit(const T& t) { + auto size = std::poisson_distribution<>{4}(e); + writer.StartArray(); + for (int i = 0; i < size; ++i) { + RETURN_NOT_OK(Generate(t.value_type(), e, &writer, options)); + } + return OK(writer.EndArray(size)); + } + + Status Visit(const ListViewType& t) { return NotImplemented(t); } + + Status Visit(const LargeListViewType& t) { return NotImplemented(t); } + + Status Visit(const StructType& t) { return Generate(t.fields(), e, &writer, options); } + + Status Visit(const DayTimeIntervalType& t) { return NotImplemented(t); } + + Status Visit(const MonthDayNanoIntervalType& t) { return NotImplemented(t); } + + Status Visit(const DictionaryType& t) { return NotImplemented(t); } + + Status Visit(const ExtensionType& t) { return NotImplemented(t); } + + Status Visit(const Decimal128Type& t) { return NotImplemented(t); } + + Status Visit(const FixedSizeBinaryType& t) { return NotImplemented(t); } + + Status Visit(const UnionType& t) { return NotImplemented(t); } + + Status Visit(const RunEndEncodedType& t) { return NotImplemented(t); } + + Status NotImplemented(const DataType& t) { + return Status::NotImplemented("random generation of arrays of type ", t); + } + + Engine& e; + rj::Writer& writer; + const GenerateOptions& options; +}; + +template +inline static Status Generate(const std::shared_ptr& type, Engine& e, + Writer* writer, const GenerateOptions& options) { + if (std::bernoulli_distribution(options.null_probability)(e)) { + writer->Null(); + return Status::OK(); + } + GenerateImpl visitor = {e, *writer, options}; + return VisitTypeInline(*type, &visitor); +} + +template +inline static Status Generate(const std::vector>& fields, + Engine& e, Writer* writer, const GenerateOptions& options) { + RETURN_NOT_OK(OK(writer->StartObject())); + + int num_fields = 0; + auto write_field = [&](const Field& f) { + ++num_fields; + writer->Key(f.name().c_str()); + return Generate(f.type(), e, writer, options); + }; + + std::bernoulli_distribution bool_dist(options.field_probability); + if (options.randomize_field_order) { + std::vector indices; + indices.reserve(static_cast(fields.size() * options.field_probability)); + for (size_t i = 0; i < fields.size(); ++i) { + if (bool_dist(e)) { + indices.push_back(i); + } + } + std::shuffle(indices.begin(), indices.end(), e); + for (auto i : indices) { + RETURN_NOT_OK(write_field(*fields[i])); + } + } else { + for (const auto& f : fields) { + if (bool_dist(e)) { + RETURN_NOT_OK(write_field(*f)); + } + } + } + + return OK(writer->EndObject(num_fields)); +} + +inline static Status MakeStream(string_view src_str, + std::shared_ptr* out) { + auto src = std::make_shared(src_str); + *out = std::make_shared(src); + return Status::OK(); +} + +// scalar values (numbers and strings) are parsed into a +// dictionary. This can be decoded for ease of comparison +inline static Status DecodeStringDictionary(const DictionaryArray& dict_array, + std::shared_ptr* decoded) { + const StringArray& dict = checked_cast(*dict_array.dictionary()); + const Int32Array& indices = checked_cast(*dict_array.indices()); + StringBuilder builder; + RETURN_NOT_OK(builder.Resize(indices.length())); + for (int64_t i = 0; i < indices.length(); ++i) { + if (indices.IsNull(i)) { + builder.UnsafeAppendNull(); + continue; + } + auto value = dict.GetView(indices.GetView(i)); + RETURN_NOT_OK(builder.ReserveData(value.size())); + builder.UnsafeAppend(value); + } + return builder.Finish(decoded); +} + +inline static Status ParseFromString(ParseOptions options, string_view src_str, + std::shared_ptr* parsed) { + auto src = std::make_shared(src_str); + std::unique_ptr parser; + RETURN_NOT_OK(BlockParser::Make(options, &parser)); + RETURN_NOT_OK(parser->Parse(src)); + return parser->Finish(parsed); +} + +inline static Status ParseFromString(ParseOptions options, string_view src_str, + std::shared_ptr* parsed) { + std::shared_ptr parsed_non_struct; + RETURN_NOT_OK(ParseFromString(options, src_str, &parsed_non_struct)); + *parsed = internal::checked_pointer_cast(parsed_non_struct); + return Status::OK(); +} + +static inline std::string PrettyPrint(string_view one_line) { + rj::Document document; + + // Must pass size to avoid ASAN issues. + document.Parse(one_line.data(), one_line.size()); + rj::StringBuffer sb; + rj::PrettyWriter writer(sb); + document.Accept(writer); + return sb.GetString(); +} + +template +std::string RowsOfOneColumn(std::string_view name, std::initializer_list values, + decltype(std::to_string(*values.begin()))* = nullptr) { + std::stringstream ss; + for (auto value : values) { + ss << R"({")" << name << R"(":)" << std::to_string(value) << "}\n"; + } + return ss.str(); +} + +inline std::string RowsOfOneColumn(std::string_view name, + std::initializer_list values) { + std::stringstream ss; + for (auto value : values) { + ss << R"({")" << name << R"(":)" << value << "}\n"; + } + return ss.str(); +} + +inline static std::string scalars_only_src() { + return R"( + { "hello": 3.5, "world": false, "yo": "thing" } + { "hello": 3.25, "world": null } + { "hello": 3.125, "world": null, "yo": "\u5fcd" } + { "hello": 0.0, "world": true, "yo": null } + )"; +} + +inline static std::string nested_src() { + return R"( + { "hello": 3.5, "world": false, "yo": "thing", "arr": [1, 2, 3], "nuf": {} } + { "hello": 3.25, "world": null, "arr": [2], "nuf": null } + { "hello": 3.125, "world": null, "yo": "\u5fcd", "arr": [], "nuf": { "ps": 78 } } + { "hello": 0.0, "world": true, "yo": null, "arr": null, "nuf": { "ps": 90 } } + )"; +} + +inline static std::string null_src() { + return R"( + { "plain": null, "list1": [], "list2": [], "struct": { "plain": null } } + { "plain": null, "list1": [], "list2": [null], "struct": {} } + )"; +} + +inline static std::string unquoted_decimal_src() { + return R"( + { "price": 30.04, "cost":30.001 } + { "price": 1.23, "cost":1.229 } + )"; +} + +inline static std::string mixed_decimal_src() { + return R"( + { "price": 30.04, "cost": 30.001 } + { "price": "1.23", "cost": "1.229" } + )"; +} + +} // namespace json +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..7066bbb63d2a5775454d5cffc82df7faf0056db8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/future.h" + +namespace arrow { +namespace util { + +template +AsyncGenerator AsyncVectorIt(std::vector v) { + return MakeVectorGenerator(std::move(v)); +} + +template +AsyncGenerator FailAt(AsyncGenerator src, int failing_index) { + auto index = std::make_shared>(0); + return [src, index, failing_index]() { + auto idx = index->fetch_add(1); + if (idx >= failing_index) { + return Future::MakeFinished(Status::Invalid("XYZ")); + } + return src(); + }; +} + +template +AsyncGenerator SlowdownABit(AsyncGenerator source) { + return MakeMappedGenerator(std::move(source), [](const T& res) { + return SleepABitAsync().Then([res]() { return res; }); + }); +} + +template +class TrackingGenerator { + public: + explicit TrackingGenerator(AsyncGenerator source) + : state_(std::make_shared(std::move(source))) {} + + Future operator()() { + state_->num_read++; + return state_->source(); + } + + int num_read() { return state_->num_read.load(); } + + private: + struct State { + explicit State(AsyncGenerator source) : source(std::move(source)), num_read(0) {} + + AsyncGenerator source; + std::atomic num_read; + }; + + std::shared_ptr state_; +}; + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h new file mode 100644 index 0000000000000000000000000000000000000000..09e8f49dea9eb1023ce7db9813b240eed1cc0563 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h @@ -0,0 +1,231 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/array/builder_time.h" +#include "arrow/buffer.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { + +// ArrayFromVector: construct an Array from vectors of C values + +template +void ArrayFromVector(const std::shared_ptr& type, + const std::vector& is_valid, const std::vector& values, + std::shared_ptr* out) { + auto type_id = TYPE::type_id; + ASSERT_EQ(type_id, type->id()) + << "template parameter and concrete DataType instance don't agree"; + + std::unique_ptr builder_ptr; + ASSERT_OK(MakeBuilder(default_memory_pool(), type, &builder_ptr)); + // Get the concrete builder class to access its Append() specializations + auto& builder = dynamic_cast::BuilderType&>(*builder_ptr); + + for (size_t i = 0; i < values.size(); ++i) { + if (is_valid[i]) { + ASSERT_OK(builder.Append(values[i])); + } else { + ASSERT_OK(builder.AppendNull()); + } + } + ASSERT_OK(builder.Finish(out)); +} + +template +void ArrayFromVector(const std::shared_ptr& type, + const std::vector& values, std::shared_ptr* out) { + auto type_id = TYPE::type_id; + ASSERT_EQ(type_id, type->id()) + << "template parameter and concrete DataType instance don't agree"; + + std::unique_ptr builder_ptr; + ASSERT_OK(MakeBuilder(default_memory_pool(), type, &builder_ptr)); + // Get the concrete builder class to access its Append() specializations + auto& builder = dynamic_cast::BuilderType&>(*builder_ptr); + + for (size_t i = 0; i < values.size(); ++i) { + ASSERT_OK(builder.Append(values[i])); + } + ASSERT_OK(builder.Finish(out)); +} + +// Overloads without a DataType argument, for parameterless types + +template +void ArrayFromVector(const std::vector& is_valid, const std::vector& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ArrayFromVector(type, is_valid, values, out); +} + +template +void ArrayFromVector(const std::vector& values, std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ArrayFromVector(type, values, out); +} + +// ChunkedArrayFromVector: construct a ChunkedArray from vectors of C values + +template +void ChunkedArrayFromVector(const std::shared_ptr& type, + const std::vector>& is_valid, + const std::vector>& values, + std::shared_ptr* out) { + ArrayVector chunks; + ASSERT_EQ(is_valid.size(), values.size()); + for (size_t i = 0; i < values.size(); ++i) { + std::shared_ptr array; + ArrayFromVector(type, is_valid[i], values[i], &array); + chunks.push_back(array); + } + *out = std::make_shared(chunks); +} + +template +void ChunkedArrayFromVector(const std::shared_ptr& type, + const std::vector>& values, + std::shared_ptr* out) { + ArrayVector chunks; + for (size_t i = 0; i < values.size(); ++i) { + std::shared_ptr array; + ArrayFromVector(type, values[i], &array); + chunks.push_back(array); + } + *out = std::make_shared(chunks); +} + +// Overloads without a DataType argument, for parameterless types + +template +void ChunkedArrayFromVector(const std::vector>& is_valid, + const std::vector>& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ChunkedArrayFromVector(type, is_valid, values, out); +} + +template +void ChunkedArrayFromVector(const std::vector>& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ChunkedArrayFromVector(type, values, out); +} + +template +void FinishAndCheckPadding(BuilderType* builder, std::shared_ptr* out) { + ASSERT_OK_AND_ASSIGN(*out, builder->Finish()); + AssertZeroPadded(**out); + TestInitialized(**out); +} + +template +Status MakeArray(const std::vector& valid_bytes, const std::vector& values, + int64_t size, Builder* builder, std::shared_ptr* out) { + // Append the first 1000 + for (int64_t i = 0; i < size; ++i) { + if (valid_bytes[i] > 0) { + RETURN_NOT_OK(builder->Append(values[i])); + } else { + RETURN_NOT_OK(builder->AppendNull()); + } + } + return builder->Finish(out); +} + +template +struct VisitBuilder { + template ::BuilderType, + // need to let SFINAE drop this Visit when it would result in + // [](NullBuilder*){}(double_builder) + typename = decltype(std::declval()(std::declval()))> + Status Visit(const T&, ArrayBuilder* builder, Fn&& fn) { + fn(internal::checked_cast(builder)); + return Status::OK(); + } + + Status Visit(const DataType& t, ArrayBuilder* builder, Fn&& fn) { + return Status::NotImplemented("visiting builders of type ", t); + } +}; + +template +Result> ArrayFromBuilderVisitor( + const std::shared_ptr& type, int64_t initial_capacity, + int64_t visitor_repetitions, Fn&& fn) { + std::unique_ptr builder; + RETURN_NOT_OK(MakeBuilder(default_memory_pool(), type, &builder)); + + if (initial_capacity != 0) { + RETURN_NOT_OK(builder->Resize(initial_capacity)); + } + + VisitBuilder visitor; + for (int64_t i = 0; i < visitor_repetitions; ++i) { + RETURN_NOT_OK( + VisitTypeInline(*builder->type(), &visitor, builder.get(), std::forward(fn))); + } + + std::shared_ptr out; + RETURN_NOT_OK(builder->Finish(&out)); + return std::move(out); +} + +template +Result> ArrayFromBuilderVisitor( + const std::shared_ptr& type, int64_t length, Fn&& fn) { + return ArrayFromBuilderVisitor(type, length, length, std::forward(fn)); +} + +template +static inline Status GetBitmapFromVector(const std::vector& is_valid, + std::shared_ptr* result) { + size_t length = is_valid.size(); + + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateEmptyBitmap(length)); + + uint8_t* bitmap = buffer->mutable_data(); + for (size_t i = 0; i < static_cast(length); ++i) { + if (is_valid[i]) { + bit_util::SetBit(bitmap, i); + } + } + + *result = buffer; + return Status::OK(); +} + +template +inline void BitmapFromVector(const std::vector& is_valid, + std::shared_ptr* out) { + ASSERT_OK(GetBitmapFromVector(is_valid, out)); +} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h new file mode 100644 index 0000000000000000000000000000000000000000..846e3c7a1657850fe9f8ca91dfca31360dbf067d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h @@ -0,0 +1,211 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/extension_type.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow { + +class ARROW_TESTING_EXPORT UuidArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT UuidType : public ExtensionType { + public: + UuidType() : ExtensionType(fixed_size_binary(16)) {} + + std::string extension_name() const override { return "uuid"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "uuid-serialized"; } +}; + +class ARROW_TESTING_EXPORT SmallintArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT TinyintArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT ListExtensionArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT SmallintType : public ExtensionType { + public: + SmallintType() : ExtensionType(int16()) {} + + std::string extension_name() const override { return "smallint"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "smallint"; } +}; + +class ARROW_TESTING_EXPORT TinyintType : public ExtensionType { + public: + TinyintType() : ExtensionType(int8()) {} + + std::string extension_name() const override { return "tinyint"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "tinyint"; } +}; + +class ARROW_TESTING_EXPORT ListExtensionType : public ExtensionType { + public: + ListExtensionType() : ExtensionType(list(int32())) {} + + std::string extension_name() const override { return "list-ext"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "list-ext"; } +}; + +class ARROW_TESTING_EXPORT DictExtensionType : public ExtensionType { + public: + DictExtensionType() : ExtensionType(dictionary(int8(), utf8())) {} + + std::string extension_name() const override { return "dict-extension"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "dict-extension-serialized"; } +}; + +class ARROW_TESTING_EXPORT Complex128Array : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT Complex128Type : public ExtensionType { + public: + Complex128Type() + : ExtensionType(struct_({::arrow::field("real", float64(), /*nullable=*/false), + ::arrow::field("imag", float64(), /*nullable=*/false)})) {} + + std::string extension_name() const override { return "complex128"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "complex128-serialized"; } +}; + +ARROW_TESTING_EXPORT +std::shared_ptr uuid(); + +ARROW_TESTING_EXPORT +std::shared_ptr smallint(); + +ARROW_TESTING_EXPORT +std::shared_ptr tinyint(); + +ARROW_TESTING_EXPORT +std::shared_ptr list_extension_type(); + +ARROW_TESTING_EXPORT +std::shared_ptr dict_extension_type(); + +ARROW_TESTING_EXPORT +std::shared_ptr complex128(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleUuid(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleSmallint(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleTinyint(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleDictExtension(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleComplex128(); + +ARROW_TESTING_EXPORT +std::shared_ptr MakeComplex128(const std::shared_ptr& real, + const std::shared_ptr& imag); + +// A RAII class that registers an extension type on construction +// and unregisters it on destruction. +class ARROW_TESTING_EXPORT ExtensionTypeGuard { + public: + explicit ExtensionTypeGuard(const std::shared_ptr& type); + explicit ExtensionTypeGuard(const DataTypeVector& types); + ~ExtensionTypeGuard(); + ARROW_DEFAULT_MOVE_AND_ASSIGN(ExtensionTypeGuard); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(ExtensionTypeGuard); + + std::vector extension_names_; +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..c934dd279389068d5ebf60695284d403b2e2dcd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +// GTest < 1.11 +#ifndef GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST +#define GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(A) +#endif +// GTest < 1.10 +#ifndef TYPED_TEST_SUITE +#define TYPED_TEST_SUITE TYPED_TEST_CASE +#define TYPED_TEST_SUITE_P TYPED_TEST_CASE_P +#define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P +#define REGISTER_TYPED_TEST_SUITE_P REGISTER_TYPED_TEST_CASE_P +#define INSTANTIATE_TYPED_TEST_SUITE_P INSTANTIATE_TYPED_TEST_CASE_P +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h new file mode 100644 index 0000000000000000000000000000000000000000..1d97a3ada724aec082180aea39a37386adca273c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h @@ -0,0 +1,698 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/testing/uniform_real.h" +#include "arrow/testing/visibility.h" +#include "arrow/type.h" + +namespace arrow { + +class Array; + +namespace random { + +using SeedType = int32_t; +constexpr SeedType kSeedMax = std::numeric_limits::max(); + +class ARROW_TESTING_EXPORT RandomArrayGenerator { + public: + explicit RandomArrayGenerator(SeedType seed) + : seed_distribution_(static_cast(1), kSeedMax), seed_rng_(seed) {} + + /// \brief Generate a null bitmap + /// + /// \param[in] size the size of the bitmap to generate + /// \param[in] null_probability the probability of a bit being zero + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Buffer + std::shared_ptr NullBitmap(int64_t size, double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random BooleanArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] true_probability the probability of a value being 1 / bit-set + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Boolean(int64_t size, double true_probability, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + /// \brief Generate a random UInt8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt8(int64_t size, uint8_t min, uint8_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int8(int64_t size, int8_t min, int8_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt16(int64_t size, uint16_t min, uint16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int16(int64_t size, int16_t min, int16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt32(int64_t size, uint32_t min, uint32_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int32(int64_t size, int32_t min, int32_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt64(int64_t size, uint64_t min, uint64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int64(int64_t size, int64_t min, int64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random HalfFloatArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the distribution + /// \param[in] max the upper bound of the distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float16(int64_t size, int16_t min, int16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random FloatArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] nan_probability the probability of a value being NaN + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float32(int64_t size, float min, float max, + double null_probability = 0, double nan_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random DoubleArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] nan_probability the probability of a value being NaN + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float64(int64_t size, double min, double max, + double null_probability = 0, double nan_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Date64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Date64(int64_t size, int64_t min, int64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + template + std::shared_ptr Numeric(int64_t size, CType min, CType max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()) { + switch (ArrowType::type_id) { + case Type::UINT8: + return UInt8(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT8: + return Int8(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT16: + return UInt16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT16: + return Int16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT32: + return UInt32(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT32: + return Int32(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT64: + return UInt64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT64: + return Int64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::HALF_FLOAT: + return Float16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::FLOAT: + return Float32(size, static_cast(min), static_cast(max), + null_probability, /*nan_probability=*/0, alignment, memory_pool); + case Type::DOUBLE: + return Float64(size, static_cast(min), static_cast(max), + null_probability, /*nan_probability=*/0, alignment, memory_pool); + case Type::DATE64: + return Date64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + default: + return nullptr; + } + } + + /// \brief Generate a random Decimal128Array + /// + /// \param[in] type the type of the array to generate + /// (must be an instance of Decimal128Type) + /// \param[in] size the size of the array to generate + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Decimal128(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Decimal256Array + /// + /// \param[in] type the type of the array to generate + /// (must be an instance of Decimal256Type) + /// \param[in] size the size of the array to generate + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Decimal256(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate an array of offsets (for use in e.g. ListArray::FromArrays) + /// + /// \param[in] size the size of the array to generate + /// \param[in] first_offset the first offset value (usually 0) + /// \param[in] last_offset the last offset value (usually the size of the child array) + /// \param[in] null_probability the probability of an offset being null + /// \param[in] force_empty_nulls if true, null offsets must have 0 "length" + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Offsets(int64_t size, int32_t first_offset, int32_t last_offset, + double null_probability = 0, + bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + std::shared_ptr LargeOffsets(int64_t size, int64_t first_offset, + int64_t last_offset, double null_probability = 0, + bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr String(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringViewArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] max_data_buffer_length the data buffer size at which + /// a new chunk will be generated + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr StringView(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + std::optional max_data_buffer_length = {}, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random LargeStringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr LargeString(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringArray with repeated values + /// + /// \param[in] size the size of the array to generate + /// \param[in] unique the number of unique string values used + /// to populate the array + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr StringWithRepeats( + int64_t size, int64_t unique, int32_t min_length, int32_t max_length, + double null_probability = 0, int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Like StringWithRepeats but return BinaryArray + std::shared_ptr BinaryWithRepeats( + int64_t size, int64_t unique, int32_t min_length, int32_t max_length, + double null_probability = 0, int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random FixedSizeBinaryArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] byte_width the byte width of fixed-size binary items + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr FixedSizeBinary(int64_t size, int32_t byte_width, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random ListArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr List(const Array& values, int64_t size, + double null_probability = 0, bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random ListViewArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// must be set to 0 + /// \param[in] coverage proportion of the values array covered by list-views + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr ListView(const Array& values, int64_t size, + double null_probability = 0, + bool force_empty_nulls = false, double coverage = 1.0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random LargeListViewArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// must be set to 0 + /// \param[in] coverage proportion of the values array covered by list-views + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr LargeListView(const Array& values, int64_t size, + double null_probability = 0, + bool force_empty_nulls = false, + double coverage = 1.0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random MapArray + /// + /// \param[in] keys The underlying keys array + /// \param[in] items The underlying items array + /// \param[in] size The size of the generated map array + /// \param[in] null_probability the probability of a map value being null + /// \param[in] force_empty_nulls if true, null map entries must have 0 length + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Map(const std::shared_ptr& keys, + const std::shared_ptr& items, int64_t size, + double null_probability = 0, bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random RunEndEncodedArray + /// + /// \param[in] value_type The DataType of the encoded values + /// \param[in] logical_size The logical length of the generated array + /// \param[in] null_probability the probability of a value being null + /// + /// \return a generated Array + std::shared_ptr RunEndEncoded(std::shared_ptr value_type, + int64_t logical_size, + double null_probability = 0.0); + + /// \brief Generate a random SparseUnionArray + /// + /// The type ids are chosen randomly, according to a uniform distribution, + /// amongst the given child fields. + /// + /// \param[in] fields Vector of Arrays containing the data for each union field + /// \param[in] size The size of the generated sparse union array + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + std::shared_ptr SparseUnion(const ArrayVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random DenseUnionArray + /// + /// The type ids are chosen randomly, according to a uniform distribution, + /// amongst the given child fields. The offsets are incremented along + /// each child field. + /// + /// \param[in] fields Vector of Arrays containing the data for each union field + /// \param[in] size The size of the generated sparse union array + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + std::shared_ptr DenseUnion(const ArrayVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Array of the specified type, size, and null_probability. + /// + /// Generation parameters other than size and null_probability are determined based on + /// the type of Array to be generated. + /// If boolean the probabilities of true,false values are 0.25,0.75 respectively. + /// If numeric min,max will be the least and greatest representable values. + /// If string min_length,max_length will be 0,sqrt(size) respectively. + /// + /// \param[in] type the type of Array to generate + /// \param[in] size the size of the Array to generate + /// \param[in] null_probability the probability of a slot being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// \return a generated Array + std::shared_ptr ArrayOf(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate an array with random data based on the given field. See BatchOf + /// for usage info. + std::shared_ptr ArrayOf(const Field& field, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a record batch with random data of the specified length. + /// + /// Generation options are read from key-value metadata for each field, and may be + /// specified at any nesting level. For example, generation options for the child + /// values of a list array can be specified by constructing the list type with + /// list(field("item", int8(), options_metadata)) + /// + /// The following options are supported: + /// + /// For all types except NullType: + /// - null_probability (double): range [0.0, 1.0] the probability of a null value. + /// Default/value is 0.0 if the field is marked non-nullable, else it is 0.01 + /// + /// For all numeric types T: + /// - min (T::c_type): the minimum value to generate (inclusive), default + /// std::numeric_limits::min() + /// - max (T::c_type): the maximum value to generate (inclusive), default + /// std::numeric_limits::max() + /// Note this means that, for example, min/max are int16_t values for HalfFloatType. + /// + /// For floating point types T for which is_physical_floating_type: + /// - nan_probability (double): range [0.0, 1.0] the probability of a NaN value. + /// + /// For BooleanType: + /// - true_probability (double): range [0.0, 1.0] the probability of a true. + /// + /// For DictionaryType: + /// - values (int32_t): the size of the dictionary. + /// Other properties are passed to the generator for the dictionary indices. However, + /// min and max cannot be specified. Note it is not possible to otherwise customize + /// the generation of dictionary values. + /// + /// For list, string, and binary types T, including their large variants: + /// - min_length (T::offset_type): the minimum length of the child to generate, + /// default 0 + /// - max_length (T::offset_type): the minimum length of the child to generate, + /// default 1024 + /// + /// For string and binary types T (not including their large or view variants): + /// - unique (int32_t): if positive, this many distinct values will be generated + /// and all array values will be one of these values, default -1 + /// + /// For string and binary view types T: + /// - max_data_buffer_length (int64_t): the data buffer size at which a new chunk + /// will be generated, default 32KB + /// + /// For MapType: + /// - values (int32_t): the number of key-value pairs to generate, which will be + /// partitioned among the array values. + /// + /// For extension types: + /// - extension_allow_random_storage (bool): in general an extension array may have + /// invariants on its storage beyond those already imposed by the arrow format, + /// which may result in an invalid array if we just wrap randomly generated + /// storage. Set this flag to explicitly allow wrapping of randomly generated + /// storage. + std::shared_ptr BatchOf( + const FieldVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + SeedType seed() { return seed_distribution_(seed_rng_); } + + private: + std::uniform_int_distribution seed_distribution_; + std::default_random_engine seed_rng_; +}; + +/// Generate a batch with random data. See RandomArrayGenerator::BatchOf. +ARROW_TESTING_EXPORT +std::shared_ptr GenerateBatch( + const FieldVector& fields, int64_t size, SeedType seed, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + +/// Generate an array with random data. See RandomArrayGenerator::BatchOf. +ARROW_TESTING_EXPORT +std::shared_ptr GenerateArray( + const Field& field, int64_t size, SeedType seed, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + +} // namespace random + +// +// Assorted functions +// + +ARROW_TESTING_EXPORT +void rand_day_millis(int64_t N, std::vector* out); +ARROW_TESTING_EXPORT +void rand_month_day_nanos(int64_t N, + std::vector* out); + +template +void randint(int64_t N, T lower, T upper, std::vector* out) { + const int random_seed = 0; + std::default_random_engine gen(random_seed); + std::uniform_int_distribution d(lower, upper); + out->resize(N, static_cast(0)); + std::generate(out->begin(), out->end(), [&d, &gen] { return static_cast(d(gen)); }); +} + +template +void random_real(int64_t n, uint32_t seed, T min_value, T max_value, + std::vector* out) { + std::default_random_engine gen(seed); + ::arrow::random::uniform_real_distribution d(min_value, max_value); + out->resize(n, static_cast(0)); + std::generate(out->begin(), out->end(), [&d, &gen] { return static_cast(d(gen)); }); +} + +template +void rand_uniform_int(int64_t n, uint32_t seed, T min_value, T max_value, U* out) { + assert(out || (n == 0)); + std::default_random_engine gen(seed); + std::uniform_int_distribution d(min_value, max_value); + std::generate(out, out + n, [&d, &gen] { return static_cast(d(gen)); }); +} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h new file mode 100644 index 0000000000000000000000000000000000000000..fd2569c6de0f642f65b56e777b7e10e68fca3337 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/date.h @@ -0,0 +1,8237 @@ +#ifndef DATE_H +#define DATE_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016, 2017 Howard Hinnant +// Copyright (c) 2016 Adrian Colomitchi +// Copyright (c) 2017 Florian Dang +// Copyright (c) 2017 Paul Thompson +// Copyright (c) 2018, 2019 Tomasz Kamiński +// Copyright (c) 2019 Jiangang Zhuang +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +#ifndef HAS_STRING_VIEW +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_STRING_VIEW 1 +# else +# define HAS_STRING_VIEW 0 +# endif +#endif // HAS_STRING_VIEW + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if HAS_STRING_VIEW +# include +#endif +#include +#include + +#ifdef __GNUC__ +# pragma GCC diagnostic push +# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 7) +# pragma GCC diagnostic ignored "-Wpedantic" +# endif +# if __GNUC__ < 5 + // GCC 4.9 Bug 61489 Wrong warning with -Wmissing-field-initializers +# pragma GCC diagnostic ignored "-Wmissing-field-initializers" +# endif +#endif + +#ifdef _MSC_VER +# pragma warning(push) +// warning C4127: conditional expression is constant +# pragma warning(disable : 4127) +#endif + +namespace arrow_vendored +{ +namespace date +{ + +//---------------+ +// Configuration | +//---------------+ + +#ifndef ONLY_C_LOCALE +# define ONLY_C_LOCALE 0 +#endif + +#if defined(_MSC_VER) && (!defined(__clang__) || (_MSC_VER < 1910)) +// MSVC +# ifndef _SILENCE_CXX17_UNCAUGHT_EXCEPTION_DEPRECATION_WARNING +# define _SILENCE_CXX17_UNCAUGHT_EXCEPTION_DEPRECATION_WARNING +# endif +# if _MSC_VER < 1910 +// before VS2017 +# define CONSTDATA const +# define CONSTCD11 +# define CONSTCD14 +# define NOEXCEPT _NOEXCEPT +# else +// VS2017 and later +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 constexpr +# define NOEXCEPT noexcept +# endif + +#elif defined(__SUNPRO_CC) && __SUNPRO_CC <= 0x5150 +// Oracle Developer Studio 12.6 and earlier +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 +# define NOEXCEPT noexcept + +#elif __cplusplus >= 201402 +// C++14 +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 constexpr +# define NOEXCEPT noexcept +#else +// C++11 +# define CONSTDATA constexpr const +# define CONSTCD11 constexpr +# define CONSTCD14 +# define NOEXCEPT noexcept +#endif + +#ifndef HAS_UNCAUGHT_EXCEPTIONS +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_UNCAUGHT_EXCEPTIONS 1 +# else +# define HAS_UNCAUGHT_EXCEPTIONS 0 +# endif +#endif // HAS_UNCAUGHT_EXCEPTIONS + +#ifndef HAS_VOID_T +# if __cplusplus >= 201703 || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define HAS_VOID_T 1 +# else +# define HAS_VOID_T 0 +# endif +#endif // HAS_VOID_T + +// Protect from Oracle sun macro +#ifdef sun +# undef sun +#endif + +// Work around for a NVCC compiler bug which causes it to fail +// to compile std::ratio_{multiply,divide} when used directly +// in the std::chrono::duration template instantiations below +namespace detail { +template +using ratio_multiply = decltype(std::ratio_multiply{}); + +template +using ratio_divide = decltype(std::ratio_divide{}); +} // namespace detail + +//-----------+ +// Interface | +//-----------+ + +// durations + +using days = std::chrono::duration + , std::chrono::hours::period>>; + +using weeks = std::chrono::duration + , days::period>>; + +using years = std::chrono::duration + , days::period>>; + +using months = std::chrono::duration + >>; + +// time_point + +template + using sys_time = std::chrono::time_point; + +using sys_days = sys_time; +using sys_seconds = sys_time; + +struct local_t {}; + +template + using local_time = std::chrono::time_point; + +using local_seconds = local_time; +using local_days = local_time; + +// types + +struct last_spec +{ + explicit last_spec() = default; +}; + +class day; +class month; +class year; + +class weekday; +class weekday_indexed; +class weekday_last; + +class month_day; +class month_day_last; +class month_weekday; +class month_weekday_last; + +class year_month; + +class year_month_day; +class year_month_day_last; +class year_month_weekday; +class year_month_weekday_last; + +// date composition operators + +CONSTCD11 year_month operator/(const year& y, const month& m) NOEXCEPT; +CONSTCD11 year_month operator/(const year& y, int m) NOEXCEPT; + +CONSTCD11 month_day operator/(const day& d, const month& m) NOEXCEPT; +CONSTCD11 month_day operator/(const day& d, int m) NOEXCEPT; +CONSTCD11 month_day operator/(const month& m, const day& d) NOEXCEPT; +CONSTCD11 month_day operator/(const month& m, int d) NOEXCEPT; +CONSTCD11 month_day operator/(int m, const day& d) NOEXCEPT; + +CONSTCD11 month_day_last operator/(const month& m, last_spec) NOEXCEPT; +CONSTCD11 month_day_last operator/(int m, last_spec) NOEXCEPT; +CONSTCD11 month_day_last operator/(last_spec, const month& m) NOEXCEPT; +CONSTCD11 month_day_last operator/(last_spec, int m) NOEXCEPT; + +CONSTCD11 month_weekday operator/(const month& m, const weekday_indexed& wdi) NOEXCEPT; +CONSTCD11 month_weekday operator/(int m, const weekday_indexed& wdi) NOEXCEPT; +CONSTCD11 month_weekday operator/(const weekday_indexed& wdi, const month& m) NOEXCEPT; +CONSTCD11 month_weekday operator/(const weekday_indexed& wdi, int m) NOEXCEPT; + +CONSTCD11 month_weekday_last operator/(const month& m, const weekday_last& wdl) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(int m, const weekday_last& wdl) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(const weekday_last& wdl, const month& m) NOEXCEPT; +CONSTCD11 month_weekday_last operator/(const weekday_last& wdl, int m) NOEXCEPT; + +CONSTCD11 year_month_day operator/(const year_month& ym, const day& d) NOEXCEPT; +CONSTCD11 year_month_day operator/(const year_month& ym, int d) NOEXCEPT; +CONSTCD11 year_month_day operator/(const year& y, const month_day& md) NOEXCEPT; +CONSTCD11 year_month_day operator/(int y, const month_day& md) NOEXCEPT; +CONSTCD11 year_month_day operator/(const month_day& md, const year& y) NOEXCEPT; +CONSTCD11 year_month_day operator/(const month_day& md, int y) NOEXCEPT; + +CONSTCD11 + year_month_day_last operator/(const year_month& ym, last_spec) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const year& y, const month_day_last& mdl) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(int y, const month_day_last& mdl) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const month_day_last& mdl, const year& y) NOEXCEPT; +CONSTCD11 + year_month_day_last operator/(const month_day_last& mdl, int y) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const year_month& ym, const weekday_indexed& wdi) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const year& y, const month_weekday& mwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(int y, const month_weekday& mwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const month_weekday& mwd, const year& y) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator/(const month_weekday& mwd, int y) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const year_month& ym, const weekday_last& wdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const year& y, const month_weekday_last& mwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(int y, const month_weekday_last& mwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const month_weekday_last& mwdl, const year& y) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator/(const month_weekday_last& mwdl, int y) NOEXCEPT; + +// Detailed interface + +// day + +class day +{ + unsigned char d_; + +public: + day() = default; + explicit CONSTCD11 day(unsigned d) NOEXCEPT; + + CONSTCD14 day& operator++() NOEXCEPT; + CONSTCD14 day operator++(int) NOEXCEPT; + CONSTCD14 day& operator--() NOEXCEPT; + CONSTCD14 day operator--(int) NOEXCEPT; + + CONSTCD14 day& operator+=(const days& d) NOEXCEPT; + CONSTCD14 day& operator-=(const days& d) NOEXCEPT; + + CONSTCD11 explicit operator unsigned() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator< (const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator> (const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const day& x, const day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const day& x, const day& y) NOEXCEPT; + +CONSTCD11 day operator+(const day& x, const days& y) NOEXCEPT; +CONSTCD11 day operator+(const days& x, const day& y) NOEXCEPT; +CONSTCD11 day operator-(const day& x, const days& y) NOEXCEPT; +CONSTCD11 days operator-(const day& x, const day& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const day& d); + +// month + +class month +{ + unsigned char m_; + +public: + month() = default; + explicit CONSTCD11 month(unsigned m) NOEXCEPT; + + CONSTCD14 month& operator++() NOEXCEPT; + CONSTCD14 month operator++(int) NOEXCEPT; + CONSTCD14 month& operator--() NOEXCEPT; + CONSTCD14 month operator--(int) NOEXCEPT; + + CONSTCD14 month& operator+=(const months& m) NOEXCEPT; + CONSTCD14 month& operator-=(const months& m) NOEXCEPT; + + CONSTCD11 explicit operator unsigned() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator< (const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator> (const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month& x, const month& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month& x, const month& y) NOEXCEPT; + +CONSTCD14 month operator+(const month& x, const months& y) NOEXCEPT; +CONSTCD14 month operator+(const months& x, const month& y) NOEXCEPT; +CONSTCD14 month operator-(const month& x, const months& y) NOEXCEPT; +CONSTCD14 months operator-(const month& x, const month& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month& m); + +// year + +class year +{ + short y_; + +public: + year() = default; + explicit CONSTCD11 year(int y) NOEXCEPT; + + CONSTCD14 year& operator++() NOEXCEPT; + CONSTCD14 year operator++(int) NOEXCEPT; + CONSTCD14 year& operator--() NOEXCEPT; + CONSTCD14 year operator--(int) NOEXCEPT; + + CONSTCD14 year& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 year operator-() const NOEXCEPT; + CONSTCD11 year operator+() const NOEXCEPT; + + CONSTCD11 bool is_leap() const NOEXCEPT; + + CONSTCD11 explicit operator int() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; + + static CONSTCD11 year min() NOEXCEPT { return year{-32767}; } + static CONSTCD11 year max() NOEXCEPT { return year{32767}; } +}; + +CONSTCD11 bool operator==(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator< (const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator> (const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year& x, const year& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year& x, const year& y) NOEXCEPT; + +CONSTCD11 year operator+(const year& x, const years& y) NOEXCEPT; +CONSTCD11 year operator+(const years& x, const year& y) NOEXCEPT; +CONSTCD11 year operator-(const year& x, const years& y) NOEXCEPT; +CONSTCD11 years operator-(const year& x, const year& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year& y); + +// weekday + +class weekday +{ + unsigned char wd_; +public: + weekday() = default; + explicit CONSTCD11 weekday(unsigned wd) NOEXCEPT; + CONSTCD14 weekday(const sys_days& dp) NOEXCEPT; + CONSTCD14 explicit weekday(const local_days& dp) NOEXCEPT; + + CONSTCD14 weekday& operator++() NOEXCEPT; + CONSTCD14 weekday operator++(int) NOEXCEPT; + CONSTCD14 weekday& operator--() NOEXCEPT; + CONSTCD14 weekday operator--(int) NOEXCEPT; + + CONSTCD14 weekday& operator+=(const days& d) NOEXCEPT; + CONSTCD14 weekday& operator-=(const days& d) NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; + + CONSTCD11 unsigned c_encoding() const NOEXCEPT; + CONSTCD11 unsigned iso_encoding() const NOEXCEPT; + + CONSTCD11 weekday_indexed operator[](unsigned index) const NOEXCEPT; + CONSTCD11 weekday_last operator[](last_spec) const NOEXCEPT; + +private: + static CONSTCD14 unsigned char weekday_from_days(int z) NOEXCEPT; + + friend CONSTCD11 bool operator==(const weekday& x, const weekday& y) NOEXCEPT; + friend CONSTCD14 days operator-(const weekday& x, const weekday& y) NOEXCEPT; + friend CONSTCD14 weekday operator+(const weekday& x, const days& y) NOEXCEPT; + template + friend std::basic_ostream& + operator<<(std::basic_ostream& os, const weekday& wd); + friend class weekday_indexed; +}; + +CONSTCD11 bool operator==(const weekday& x, const weekday& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday& x, const weekday& y) NOEXCEPT; + +CONSTCD14 weekday operator+(const weekday& x, const days& y) NOEXCEPT; +CONSTCD14 weekday operator+(const days& x, const weekday& y) NOEXCEPT; +CONSTCD14 weekday operator-(const weekday& x, const days& y) NOEXCEPT; +CONSTCD14 days operator-(const weekday& x, const weekday& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday& wd); + +// weekday_indexed + +class weekday_indexed +{ + unsigned char wd_ : 4; + unsigned char index_ : 4; + +public: + weekday_indexed() = default; + CONSTCD11 weekday_indexed(const date::weekday& wd, unsigned index) NOEXCEPT; + + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 unsigned index() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_indexed& wdi); + +// weekday_last + +class weekday_last +{ + date::weekday wd_; + +public: + explicit CONSTCD11 weekday_last(const date::weekday& wd) NOEXCEPT; + + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const weekday_last& x, const weekday_last& y) NOEXCEPT; +CONSTCD11 bool operator!=(const weekday_last& x, const weekday_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_last& wdl); + +namespace detail +{ + +struct unspecified_month_disambiguator {}; + +} // namespace detail + +// year_month + +class year_month +{ + date::year y_; + date::month m_; + +public: + year_month() = default; + CONSTCD11 year_month(const date::year& y, const date::month& m) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + + template + CONSTCD14 year_month& operator+=(const months& dm) NOEXCEPT; + template + CONSTCD14 year_month& operator-=(const months& dm) NOEXCEPT; + CONSTCD14 year_month& operator+=(const years& dy) NOEXCEPT; + CONSTCD14 year_month& operator-=(const years& dy) NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator< (const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator> (const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year_month& x, const year_month& y) NOEXCEPT; + +template +CONSTCD14 year_month operator+(const year_month& ym, const months& dm) NOEXCEPT; +template +CONSTCD14 year_month operator+(const months& dm, const year_month& ym) NOEXCEPT; +template +CONSTCD14 year_month operator-(const year_month& ym, const months& dm) NOEXCEPT; + +CONSTCD11 months operator-(const year_month& x, const year_month& y) NOEXCEPT; +CONSTCD11 year_month operator+(const year_month& ym, const years& dy) NOEXCEPT; +CONSTCD11 year_month operator+(const years& dy, const year_month& ym) NOEXCEPT; +CONSTCD11 year_month operator-(const year_month& ym, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month& ym); + +// month_day + +class month_day +{ + date::month m_; + date::day d_; + +public: + month_day() = default; + CONSTCD11 month_day(const date::month& m, const date::day& d) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::day day() const NOEXCEPT; + + CONSTCD14 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator< (const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator> (const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month_day& x, const month_day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month_day& x, const month_day& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day& md); + +// month_day_last + +class month_day_last +{ + date::month m_; + +public: + CONSTCD11 explicit month_day_last(const date::month& m) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator< (const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator> (const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator<=(const month_day_last& x, const month_day_last& y) NOEXCEPT; +CONSTCD11 bool operator>=(const month_day_last& x, const month_day_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day_last& mdl); + +// month_weekday + +class month_weekday +{ + date::month m_; + date::weekday_indexed wdi_; +public: + CONSTCD11 month_weekday(const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday_indexed weekday_indexed() const NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const month_weekday& x, const month_weekday& y) NOEXCEPT; +CONSTCD11 bool operator!=(const month_weekday& x, const month_weekday& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday& mwd); + +// month_weekday_last + +class month_weekday_last +{ + date::month m_; + date::weekday_last wdl_; + +public: + CONSTCD11 month_weekday_last(const date::month& m, + const date::weekday_last& wd) NOEXCEPT; + + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday_last weekday_last() const NOEXCEPT; + + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday_last& mwdl); + +// class year_month_day + +class year_month_day +{ + date::year y_; + date::month m_; + date::day d_; + +public: + year_month_day() = default; + CONSTCD11 year_month_day(const date::year& y, const date::month& m, + const date::day& d) NOEXCEPT; + CONSTCD14 year_month_day(const year_month_day_last& ymdl) NOEXCEPT; + + CONSTCD14 year_month_day(sys_days dp) NOEXCEPT; + CONSTCD14 explicit year_month_day(local_days dp) NOEXCEPT; + + template + CONSTCD14 year_month_day& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_day& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_day& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_day& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::day day() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD14 bool ok() const NOEXCEPT; + +private: + static CONSTCD14 year_month_day from_days(days dp) NOEXCEPT; + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 bool operator==(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator!=(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator< (const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator> (const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator<=(const year_month_day& x, const year_month_day& y) NOEXCEPT; +CONSTCD11 bool operator>=(const year_month_day& x, const year_month_day& y) NOEXCEPT; + +template +CONSTCD14 year_month_day operator+(const year_month_day& ymd, const months& dm) NOEXCEPT; +template +CONSTCD14 year_month_day operator+(const months& dm, const year_month_day& ymd) NOEXCEPT; +template +CONSTCD14 year_month_day operator-(const year_month_day& ymd, const months& dm) NOEXCEPT; +CONSTCD11 year_month_day operator+(const year_month_day& ymd, const years& dy) NOEXCEPT; +CONSTCD11 year_month_day operator+(const years& dy, const year_month_day& ymd) NOEXCEPT; +CONSTCD11 year_month_day operator-(const year_month_day& ymd, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day& ymd); + +// year_month_day_last + +class year_month_day_last +{ + date::year y_; + date::month_day_last mdl_; + +public: + CONSTCD11 year_month_day_last(const date::year& y, + const date::month_day_last& mdl) NOEXCEPT; + + template + CONSTCD14 year_month_day_last& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_day_last& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_day_last& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_day_last& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::month_day_last month_day_last() const NOEXCEPT; + CONSTCD14 date::day day() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator< (const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator> (const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator<=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; +CONSTCD11 + bool operator>=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator+(const year_month_day_last& ymdl, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator+(const months& dm, const year_month_day_last& ymdl) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator+(const year_month_day_last& ymdl, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator+(const years& dy, const year_month_day_last& ymdl) NOEXCEPT; + +template +CONSTCD14 +year_month_day_last +operator-(const year_month_day_last& ymdl, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_day_last +operator-(const year_month_day_last& ymdl, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day_last& ymdl); + +// year_month_weekday + +class year_month_weekday +{ + date::year y_; + date::month m_; + date::weekday_indexed wdi_; + +public: + year_month_weekday() = default; + CONSTCD11 year_month_weekday(const date::year& y, const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT; + CONSTCD14 year_month_weekday(const sys_days& dp) NOEXCEPT; + CONSTCD14 explicit year_month_weekday(const local_days& dp) NOEXCEPT; + + template + CONSTCD14 year_month_weekday& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_weekday& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_weekday& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_weekday& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 unsigned index() const NOEXCEPT; + CONSTCD11 date::weekday_indexed weekday_indexed() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD14 bool ok() const NOEXCEPT; + +private: + static CONSTCD14 year_month_weekday from_days(days dp) NOEXCEPT; + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 + bool operator==(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT; +CONSTCD11 + bool operator!=(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator+(const year_month_weekday& ymwd, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator+(const months& dm, const year_month_weekday& ymwd) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator+(const year_month_weekday& ymwd, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator+(const years& dy, const year_month_weekday& ymwd) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday +operator-(const year_month_weekday& ymwd, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_weekday +operator-(const year_month_weekday& ymwd, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday& ymwdi); + +// year_month_weekday_last + +class year_month_weekday_last +{ + date::year y_; + date::month m_; + date::weekday_last wdl_; + +public: + CONSTCD11 year_month_weekday_last(const date::year& y, const date::month& m, + const date::weekday_last& wdl) NOEXCEPT; + + template + CONSTCD14 year_month_weekday_last& operator+=(const months& m) NOEXCEPT; + template + CONSTCD14 year_month_weekday_last& operator-=(const months& m) NOEXCEPT; + CONSTCD14 year_month_weekday_last& operator+=(const years& y) NOEXCEPT; + CONSTCD14 year_month_weekday_last& operator-=(const years& y) NOEXCEPT; + + CONSTCD11 date::year year() const NOEXCEPT; + CONSTCD11 date::month month() const NOEXCEPT; + CONSTCD11 date::weekday weekday() const NOEXCEPT; + CONSTCD11 date::weekday_last weekday_last() const NOEXCEPT; + + CONSTCD14 operator sys_days() const NOEXCEPT; + CONSTCD14 explicit operator local_days() const NOEXCEPT; + CONSTCD11 bool ok() const NOEXCEPT; + +private: + CONSTCD14 days to_days() const NOEXCEPT; +}; + +CONSTCD11 +bool +operator==(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT; + +CONSTCD11 +bool +operator!=(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator+(const months& dm, const year_month_weekday_last& ymwdl) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator+(const years& dy, const year_month_weekday_last& ymwdl) NOEXCEPT; + +template +CONSTCD14 +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT; + +CONSTCD11 +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday_last& ymwdl); + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +inline namespace literals +{ + +CONSTCD11 date::day operator "" _d(unsigned long long d) NOEXCEPT; +CONSTCD11 date::year operator "" _y(unsigned long long y) NOEXCEPT; + +} // inline namespace literals +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +// CONSTDATA date::month January{1}; +// CONSTDATA date::month February{2}; +// CONSTDATA date::month March{3}; +// CONSTDATA date::month April{4}; +// CONSTDATA date::month May{5}; +// CONSTDATA date::month June{6}; +// CONSTDATA date::month July{7}; +// CONSTDATA date::month August{8}; +// CONSTDATA date::month September{9}; +// CONSTDATA date::month October{10}; +// CONSTDATA date::month November{11}; +// CONSTDATA date::month December{12}; +// +// CONSTDATA date::weekday Sunday{0u}; +// CONSTDATA date::weekday Monday{1u}; +// CONSTDATA date::weekday Tuesday{2u}; +// CONSTDATA date::weekday Wednesday{3u}; +// CONSTDATA date::weekday Thursday{4u}; +// CONSTDATA date::weekday Friday{5u}; +// CONSTDATA date::weekday Saturday{6u}; + +#if HAS_VOID_T + +template > +struct is_clock + : std::false_type +{}; + +template +struct is_clock> + : std::true_type +{}; + +template inline constexpr bool is_clock_v = is_clock::value; + +#endif // HAS_VOID_T + +//----------------+ +// Implementation | +//----------------+ + +// utilities +namespace detail { + +template> +class save_istream +{ +protected: + std::basic_ios& is_; + CharT fill_; + std::ios::fmtflags flags_; + std::streamsize precision_; + std::streamsize width_; + std::basic_ostream* tie_; + std::locale loc_; + +public: + ~save_istream() + { + is_.fill(fill_); + is_.flags(flags_); + is_.precision(precision_); + is_.width(width_); + is_.imbue(loc_); + is_.tie(tie_); + } + + save_istream(const save_istream&) = delete; + save_istream& operator=(const save_istream&) = delete; + + explicit save_istream(std::basic_ios& is) + : is_(is) + , fill_(is.fill()) + , flags_(is.flags()) + , precision_(is.precision()) + , width_(is.width(0)) + , tie_(is.tie(nullptr)) + , loc_(is.getloc()) + { + if (tie_ != nullptr) + tie_->flush(); + } +}; + +template> +class save_ostream + : private save_istream +{ +public: + ~save_ostream() + { + if ((this->flags_ & std::ios::unitbuf) && +#if HAS_UNCAUGHT_EXCEPTIONS + std::uncaught_exceptions() == 0 && +#else + !std::uncaught_exception() && +#endif + this->is_.good()) + this->is_.rdbuf()->pubsync(); + } + + save_ostream(const save_ostream&) = delete; + save_ostream& operator=(const save_ostream&) = delete; + + explicit save_ostream(std::basic_ios& os) + : save_istream(os) + { + } +}; + +template +struct choose_trunc_type +{ + static const int digits = std::numeric_limits::digits; + using type = typename std::conditional + < + digits < 32, + std::int32_t, + typename std::conditional + < + digits < 64, + std::int64_t, +#ifdef __SIZEOF_INT128__ + __int128 +#else + std::int64_t +#endif + >::type + >::type; +}; + +template +CONSTCD11 +inline +typename std::enable_if +< + !std::chrono::treat_as_floating_point::value, + T +>::type +trunc(T t) NOEXCEPT +{ + return t; +} + +template +CONSTCD14 +inline +typename std::enable_if +< + std::chrono::treat_as_floating_point::value, + T +>::type +trunc(T t) NOEXCEPT +{ + using std::numeric_limits; + using I = typename choose_trunc_type::type; + CONSTDATA auto digits = numeric_limits::digits; + static_assert(digits < numeric_limits::digits, ""); + CONSTDATA auto max = I{1} << (digits-1); + CONSTDATA auto min = -max; + const auto negative = t < T{0}; + if (min <= t && t <= max && t != 0 && t == t) + { + t = static_cast(static_cast(t)); + if (t == 0 && negative) + t = -t; + } + return t; +} + +template +struct static_gcd +{ + static const std::intmax_t value = static_gcd::value; +}; + +template +struct static_gcd +{ + static const std::intmax_t value = Xp; +}; + +template <> +struct static_gcd<0, 0> +{ + static const std::intmax_t value = 1; +}; + +template +struct no_overflow +{ +private: + static const std::intmax_t gcd_n1_n2 = static_gcd::value; + static const std::intmax_t gcd_d1_d2 = static_gcd::value; + static const std::intmax_t n1 = R1::num / gcd_n1_n2; + static const std::intmax_t d1 = R1::den / gcd_d1_d2; + static const std::intmax_t n2 = R2::num / gcd_n1_n2; + static const std::intmax_t d2 = R2::den / gcd_d1_d2; +#ifdef __cpp_constexpr + static const std::intmax_t max = std::numeric_limits::max(); +#else + static const std::intmax_t max = LLONG_MAX; +#endif + + template + struct mul // overflow == false + { + static const std::intmax_t value = Xp * Yp; + }; + + template + struct mul + { + static const std::intmax_t value = 1; + }; + +public: + static const bool value = (n1 <= max / d2) && (n2 <= max / d1); + typedef std::ratio::value, + mul::value> type; +}; + +} // detail + +// trunc towards zero +template +CONSTCD11 +inline +typename std::enable_if +< + detail::no_overflow::value, + To +>::type +trunc(const std::chrono::duration& d) +{ + return To{detail::trunc(std::chrono::duration_cast(d).count())}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + !detail::no_overflow::value, + To +>::type +trunc(const std::chrono::duration& d) +{ + using std::chrono::duration_cast; + using std::chrono::duration; + using rep = typename std::common_type::type; + return To{detail::trunc(duration_cast(duration_cast>(d)).count())}; +} + +#ifndef HAS_CHRONO_ROUNDING +# if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023918 || (_MSC_FULL_VER >= 190000000 && defined (__clang__))) +# define HAS_CHRONO_ROUNDING 1 +# elif defined(__cpp_lib_chrono) && __cplusplus > 201402 && __cpp_lib_chrono >= 201510 +# define HAS_CHRONO_ROUNDING 1 +# elif defined(_LIBCPP_VERSION) && __cplusplus > 201402 && _LIBCPP_VERSION >= 3800 +# define HAS_CHRONO_ROUNDING 1 +# else +# define HAS_CHRONO_ROUNDING 0 +# endif +#endif // HAS_CHRONO_ROUNDING + +#if HAS_CHRONO_ROUNDING == 0 + +// round down +template +CONSTCD14 +inline +typename std::enable_if +< + detail::no_overflow::value, + To +>::type +floor(const std::chrono::duration& d) +{ + auto t = trunc(d); + if (t > d) + return t - To{1}; + return t; +} + +template +CONSTCD14 +inline +typename std::enable_if +< + !detail::no_overflow::value, + To +>::type +floor(const std::chrono::duration& d) +{ + using rep = typename std::common_type::type; + return floor(floor>(d)); +} + +// round to nearest, to even on tie +template +CONSTCD14 +inline +To +round(const std::chrono::duration& d) +{ + auto t0 = floor(d); + auto t1 = t0 + To{1}; + if (t1 == To{0} && t0 < To{0}) + t1 = -t1; + auto diff0 = d - t0; + auto diff1 = t1 - d; + if (diff0 == diff1) + { + if (t0 - trunc(t0/2)*2 == To{0}) + return t0; + return t1; + } + if (diff0 < diff1) + return t0; + return t1; +} + +// round up +template +CONSTCD14 +inline +To +ceil(const std::chrono::duration& d) +{ + auto t = trunc(d); + if (t < d) + return t + To{1}; + return t; +} + +template ::is_signed + >::type> +CONSTCD11 +std::chrono::duration +abs(std::chrono::duration d) +{ + return d >= d.zero() ? d : static_cast(-d); +} + +// round down +template +CONSTCD11 +inline +std::chrono::time_point +floor(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{date::floor(tp.time_since_epoch())}; +} + +// round to nearest, to even on tie +template +CONSTCD11 +inline +std::chrono::time_point +round(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{round(tp.time_since_epoch())}; +} + +// round up +template +CONSTCD11 +inline +std::chrono::time_point +ceil(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{ceil(tp.time_since_epoch())}; +} + +#else // HAS_CHRONO_ROUNDING == 1 + +using std::chrono::floor; +using std::chrono::ceil; +using std::chrono::round; +using std::chrono::abs; + +#endif // HAS_CHRONO_ROUNDING + +namespace detail +{ + +template +CONSTCD14 +inline +typename std::enable_if +< + !std::chrono::treat_as_floating_point::value, + To +>::type +round_i(const std::chrono::duration& d) +{ + return round(d); +} + +template +CONSTCD14 +inline +typename std::enable_if +< + std::chrono::treat_as_floating_point::value, + To +>::type +round_i(const std::chrono::duration& d) +{ + return d; +} + +template +CONSTCD11 +inline +std::chrono::time_point +round_i(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{round_i(tp.time_since_epoch())}; +} + +} // detail + +// trunc towards zero +template +CONSTCD11 +inline +std::chrono::time_point +trunc(const std::chrono::time_point& tp) +{ + using std::chrono::time_point; + return time_point{trunc(tp.time_since_epoch())}; +} + +// day + +CONSTCD11 inline day::day(unsigned d) NOEXCEPT : d_(static_cast(d)) {} +CONSTCD14 inline day& day::operator++() NOEXCEPT {++d_; return *this;} +CONSTCD14 inline day day::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline day& day::operator--() NOEXCEPT {--d_; return *this;} +CONSTCD14 inline day day::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} +CONSTCD14 inline day& day::operator+=(const days& d) NOEXCEPT {*this = *this + d; return *this;} +CONSTCD14 inline day& day::operator-=(const days& d) NOEXCEPT {*this = *this - d; return *this;} +CONSTCD11 inline day::operator unsigned() const NOEXCEPT {return d_;} +CONSTCD11 inline bool day::ok() const NOEXCEPT {return 1 <= d_ && d_ <= 31;} + +CONSTCD11 +inline +bool +operator==(const day& x, const day& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const day& x, const day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const day& x, const day& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const day& x, const day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const day& x, const day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const day& x, const day& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD11 +inline +days +operator-(const day& x, const day& y) NOEXCEPT +{ + return days{static_cast(static_cast(x) + - static_cast(y))}; +} + +CONSTCD11 +inline +day +operator+(const day& x, const days& y) NOEXCEPT +{ + return day{static_cast(x) + static_cast(y.count())}; +} + +CONSTCD11 +inline +day +operator+(const days& x, const day& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD11 +inline +day +operator-(const day& x, const days& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const day& d) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(d); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const day& d) +{ + detail::low_level_fmt(os, d); + if (!d.ok()) + os << " is not a valid day"; + return os; +} + +// month + +CONSTCD11 inline month::month(unsigned m) NOEXCEPT : m_(static_cast(m)) {} +CONSTCD14 inline month& month::operator++() NOEXCEPT {*this += months{1}; return *this;} +CONSTCD14 inline month month::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline month& month::operator--() NOEXCEPT {*this -= months{1}; return *this;} +CONSTCD14 inline month month::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} + +CONSTCD14 +inline +month& +month::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +CONSTCD14 +inline +month& +month::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD11 inline month::operator unsigned() const NOEXCEPT {return m_;} +CONSTCD11 inline bool month::ok() const NOEXCEPT {return 1 <= m_ && m_ <= 12;} + +CONSTCD11 +inline +bool +operator==(const month& x, const month& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const month& x, const month& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month& x, const month& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const month& x, const month& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month& x, const month& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month& x, const month& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD14 +inline +months +operator-(const month& x, const month& y) NOEXCEPT +{ + auto const d = static_cast(x) - static_cast(y); + return months(d <= 11 ? d : d + 12); +} + +CONSTCD14 +inline +month +operator+(const month& x, const months& y) NOEXCEPT +{ + auto const mu = static_cast(static_cast(x)) + y.count() - 1; + auto const yr = (mu >= 0 ? mu : mu-11) / 12; + return month{static_cast(mu - yr * 12 + 1)}; +} + +CONSTCD14 +inline +month +operator+(const months& x, const month& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD14 +inline +month +operator-(const month& x, const months& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month& m) +{ + if (m.ok()) + { + CharT fmt[] = {'%', 'b', 0}; + os << format(os.getloc(), fmt, m); + } + else + os << static_cast(m); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month& m) +{ + detail::low_level_fmt(os, m); + if (!m.ok()) + os << " is not a valid month"; + return os; +} + +// year + +CONSTCD11 inline year::year(int y) NOEXCEPT : y_(static_cast(y)) {} +CONSTCD14 inline year& year::operator++() NOEXCEPT {++y_; return *this;} +CONSTCD14 inline year year::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline year& year::operator--() NOEXCEPT {--y_; return *this;} +CONSTCD14 inline year year::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} +CONSTCD14 inline year& year::operator+=(const years& y) NOEXCEPT {*this = *this + y; return *this;} +CONSTCD14 inline year& year::operator-=(const years& y) NOEXCEPT {*this = *this - y; return *this;} +CONSTCD11 inline year year::operator-() const NOEXCEPT {return year{-y_};} +CONSTCD11 inline year year::operator+() const NOEXCEPT {return *this;} + +CONSTCD11 +inline +bool +year::is_leap() const NOEXCEPT +{ + return y_ % 4 == 0 && (y_ % 100 != 0 || y_ % 400 == 0); +} + +CONSTCD11 inline year::operator int() const NOEXCEPT {return y_;} + +CONSTCD11 +inline +bool +year::ok() const NOEXCEPT +{ + return y_ != std::numeric_limits::min(); +} + +CONSTCD11 +inline +bool +operator==(const year& x, const year& y) NOEXCEPT +{ + return static_cast(x) == static_cast(y); +} + +CONSTCD11 +inline +bool +operator!=(const year& x, const year& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year& x, const year& y) NOEXCEPT +{ + return static_cast(x) < static_cast(y); +} + +CONSTCD11 +inline +bool +operator>(const year& x, const year& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year& x, const year& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year& x, const year& y) NOEXCEPT +{ + return !(x < y); +} + +CONSTCD11 +inline +years +operator-(const year& x, const year& y) NOEXCEPT +{ + return years{static_cast(x) - static_cast(y)}; +} + +CONSTCD11 +inline +year +operator+(const year& x, const years& y) NOEXCEPT +{ + return year{static_cast(x) + y.count()}; +} + +CONSTCD11 +inline +year +operator+(const years& x, const year& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD11 +inline +year +operator-(const year& x, const years& y) NOEXCEPT +{ + return year{static_cast(x) - y.count()}; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year& y) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::internal); + os.width(4 + (y < year{0})); + os.imbue(std::locale::classic()); + os << static_cast(y); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year& y) +{ + detail::low_level_fmt(os, y); + if (!y.ok()) + os << " is not a valid year"; + return os; +} + +// weekday + +CONSTCD14 +inline +unsigned char +weekday::weekday_from_days(int z) NOEXCEPT +{ + auto u = static_cast(z); + return static_cast(z >= -4 ? (u+4) % 7 : u % 7); +} + +CONSTCD11 +inline +weekday::weekday(unsigned wd) NOEXCEPT + : wd_(static_cast(wd != 7 ? wd : 0)) + {} + +CONSTCD14 +inline +weekday::weekday(const sys_days& dp) NOEXCEPT + : wd_(weekday_from_days(dp.time_since_epoch().count())) + {} + +CONSTCD14 +inline +weekday::weekday(const local_days& dp) NOEXCEPT + : wd_(weekday_from_days(dp.time_since_epoch().count())) + {} + +CONSTCD14 inline weekday& weekday::operator++() NOEXCEPT {*this += days{1}; return *this;} +CONSTCD14 inline weekday weekday::operator++(int) NOEXCEPT {auto tmp(*this); ++(*this); return tmp;} +CONSTCD14 inline weekday& weekday::operator--() NOEXCEPT {*this -= days{1}; return *this;} +CONSTCD14 inline weekday weekday::operator--(int) NOEXCEPT {auto tmp(*this); --(*this); return tmp;} + +CONSTCD14 +inline +weekday& +weekday::operator+=(const days& d) NOEXCEPT +{ + *this = *this + d; + return *this; +} + +CONSTCD14 +inline +weekday& +weekday::operator-=(const days& d) NOEXCEPT +{ + *this = *this - d; + return *this; +} + +CONSTCD11 inline bool weekday::ok() const NOEXCEPT {return wd_ <= 6;} + +CONSTCD11 +inline +unsigned weekday::c_encoding() const NOEXCEPT +{ + return unsigned{wd_}; +} + +CONSTCD11 +inline +unsigned weekday::iso_encoding() const NOEXCEPT +{ + return unsigned{((wd_ == 0u) ? 7u : wd_)}; +} + +CONSTCD11 +inline +bool +operator==(const weekday& x, const weekday& y) NOEXCEPT +{ + return x.wd_ == y.wd_; +} + +CONSTCD11 +inline +bool +operator!=(const weekday& x, const weekday& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD14 +inline +days +operator-(const weekday& x, const weekday& y) NOEXCEPT +{ + auto const wdu = x.wd_ - y.wd_; + auto const wk = (wdu >= 0 ? wdu : wdu-6) / 7; + return days{wdu - wk * 7}; +} + +CONSTCD14 +inline +weekday +operator+(const weekday& x, const days& y) NOEXCEPT +{ + auto const wdu = static_cast(static_cast(x.wd_)) + y.count(); + auto const wk = (wdu >= 0 ? wdu : wdu-6) / 7; + return weekday{static_cast(wdu - wk * 7)}; +} + +CONSTCD14 +inline +weekday +operator+(const days& x, const weekday& y) NOEXCEPT +{ + return y + x; +} + +CONSTCD14 +inline +weekday +operator-(const weekday& x, const days& y) NOEXCEPT +{ + return x + -y; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday& wd) +{ + if (wd.ok()) + { + CharT fmt[] = {'%', 'a', 0}; + os << format(fmt, wd); + } + else + os << wd.c_encoding(); + return os; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday& wd) +{ + detail::low_level_fmt(os, wd); + if (!wd.ok()) + os << " is not a valid weekday"; + return os; +} + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +inline namespace literals +{ + +CONSTCD11 +inline +date::day +operator "" _d(unsigned long long d) NOEXCEPT +{ + return date::day{static_cast(d)}; +} + +CONSTCD11 +inline +date::year +operator "" _y(unsigned long long y) NOEXCEPT +{ + return date::year(static_cast(y)); +} +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +CONSTDATA date::last_spec last{}; + +CONSTDATA date::month jan{1}; +CONSTDATA date::month feb{2}; +CONSTDATA date::month mar{3}; +CONSTDATA date::month apr{4}; +CONSTDATA date::month may{5}; +CONSTDATA date::month jun{6}; +CONSTDATA date::month jul{7}; +CONSTDATA date::month aug{8}; +CONSTDATA date::month sep{9}; +CONSTDATA date::month oct{10}; +CONSTDATA date::month nov{11}; +CONSTDATA date::month dec{12}; + +CONSTDATA date::weekday sun{0u}; +CONSTDATA date::weekday mon{1u}; +CONSTDATA date::weekday tue{2u}; +CONSTDATA date::weekday wed{3u}; +CONSTDATA date::weekday thu{4u}; +CONSTDATA date::weekday fri{5u}; +CONSTDATA date::weekday sat{6u}; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +} // inline namespace literals +#endif + +CONSTDATA date::month January{1}; +CONSTDATA date::month February{2}; +CONSTDATA date::month March{3}; +CONSTDATA date::month April{4}; +CONSTDATA date::month May{5}; +CONSTDATA date::month June{6}; +CONSTDATA date::month July{7}; +CONSTDATA date::month August{8}; +CONSTDATA date::month September{9}; +CONSTDATA date::month October{10}; +CONSTDATA date::month November{11}; +CONSTDATA date::month December{12}; + +CONSTDATA date::weekday Monday{1}; +CONSTDATA date::weekday Tuesday{2}; +CONSTDATA date::weekday Wednesday{3}; +CONSTDATA date::weekday Thursday{4}; +CONSTDATA date::weekday Friday{5}; +CONSTDATA date::weekday Saturday{6}; +CONSTDATA date::weekday Sunday{7}; + +// weekday_indexed + +CONSTCD11 +inline +weekday +weekday_indexed::weekday() const NOEXCEPT +{ + return date::weekday{static_cast(wd_)}; +} + +CONSTCD11 inline unsigned weekday_indexed::index() const NOEXCEPT {return index_;} + +CONSTCD11 +inline +bool +weekday_indexed::ok() const NOEXCEPT +{ + return weekday().ok() && 1 <= index_ && index_ <= 5; +} + +#ifdef __GNUC__ +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wconversion" +#endif // __GNUC__ + +CONSTCD11 +inline +weekday_indexed::weekday_indexed(const date::weekday& wd, unsigned index) NOEXCEPT + : wd_(static_cast(static_cast(wd.wd_))) + , index_(static_cast(index)) + {} + +#ifdef __GNUC__ +# pragma GCC diagnostic pop +#endif // __GNUC__ + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday_indexed& wdi) +{ + return low_level_fmt(os, wdi.weekday()) << '[' << wdi.index() << ']'; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_indexed& wdi) +{ + detail::low_level_fmt(os, wdi); + if (!wdi.ok()) + os << " is not a valid weekday_indexed"; + return os; +} + +CONSTCD11 +inline +weekday_indexed +weekday::operator[](unsigned index) const NOEXCEPT +{ + return {*this, index}; +} + +CONSTCD11 +inline +bool +operator==(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT +{ + return x.weekday() == y.weekday() && x.index() == y.index(); +} + +CONSTCD11 +inline +bool +operator!=(const weekday_indexed& x, const weekday_indexed& y) NOEXCEPT +{ + return !(x == y); +} + +// weekday_last + +CONSTCD11 inline date::weekday weekday_last::weekday() const NOEXCEPT {return wd_;} +CONSTCD11 inline bool weekday_last::ok() const NOEXCEPT {return wd_.ok();} +CONSTCD11 inline weekday_last::weekday_last(const date::weekday& wd) NOEXCEPT : wd_(wd) {} + +CONSTCD11 +inline +bool +operator==(const weekday_last& x, const weekday_last& y) NOEXCEPT +{ + return x.weekday() == y.weekday(); +} + +CONSTCD11 +inline +bool +operator!=(const weekday_last& x, const weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const weekday_last& wdl) +{ + return low_level_fmt(os, wdl.weekday()) << "[last]"; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const weekday_last& wdl) +{ + detail::low_level_fmt(os, wdl); + if (!wdl.ok()) + os << " is not a valid weekday_last"; + return os; +} + +CONSTCD11 +inline +weekday_last +weekday::operator[](last_spec) const NOEXCEPT +{ + return weekday_last{*this}; +} + +// year_month + +CONSTCD11 +inline +year_month::year_month(const date::year& y, const date::month& m) NOEXCEPT + : y_(y) + , m_(m) + {} + +CONSTCD11 inline year year_month::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month::month() const NOEXCEPT {return m_;} +CONSTCD11 inline bool year_month::ok() const NOEXCEPT {return y_.ok() && m_.ok();} + +template +CONSTCD14 +inline +year_month& +year_month::operator+=(const months& dm) NOEXCEPT +{ + *this = *this + dm; + return *this; +} + +template +CONSTCD14 +inline +year_month& +year_month::operator-=(const months& dm) NOEXCEPT +{ + *this = *this - dm; + return *this; +} + +CONSTCD14 +inline +year_month& +year_month::operator+=(const years& dy) NOEXCEPT +{ + *this = *this + dy; + return *this; +} + +CONSTCD14 +inline +year_month& +year_month::operator-=(const years& dy) NOEXCEPT +{ + *this = *this - dy; + return *this; +} + +CONSTCD11 +inline +bool +operator==(const year_month& x, const year_month& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month& x, const year_month& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month() < y.month())); +} + +CONSTCD11 +inline +bool +operator>(const year_month& x, const year_month& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month& x, const year_month& y) NOEXCEPT +{ + return !(x < y); +} + +template +CONSTCD14 +inline +year_month +operator+(const year_month& ym, const months& dm) NOEXCEPT +{ + auto dmi = static_cast(static_cast(ym.month())) - 1 + dm.count(); + auto dy = (dmi >= 0 ? dmi : dmi-11) / 12; + dmi = dmi - dy * 12 + 1; + return (ym.year() + years(dy)) / month(static_cast(dmi)); +} + +template +CONSTCD14 +inline +year_month +operator+(const months& dm, const year_month& ym) NOEXCEPT +{ + return ym + dm; +} + +template +CONSTCD14 +inline +year_month +operator-(const year_month& ym, const months& dm) NOEXCEPT +{ + return ym + -dm; +} + +CONSTCD11 +inline +months +operator-(const year_month& x, const year_month& y) NOEXCEPT +{ + return (x.year() - y.year()) + + months(static_cast(x.month()) - static_cast(y.month())); +} + +CONSTCD11 +inline +year_month +operator+(const year_month& ym, const years& dy) NOEXCEPT +{ + return (ym.year() + dy) / ym.month(); +} + +CONSTCD11 +inline +year_month +operator+(const years& dy, const year_month& ym) NOEXCEPT +{ + return ym + dy; +} + +CONSTCD11 +inline +year_month +operator-(const year_month& ym, const years& dy) NOEXCEPT +{ + return ym + -dy; +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year_month& ym) +{ + low_level_fmt(os, ym.year()) << '/'; + return low_level_fmt(os, ym.month()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month& ym) +{ + detail::low_level_fmt(os, ym); + if (!ym.ok()) + os << " is not a valid year_month"; + return os; +} + +// month_day + +CONSTCD11 +inline +month_day::month_day(const date::month& m, const date::day& d) NOEXCEPT + : m_(m) + , d_(d) + {} + +CONSTCD11 inline date::month month_day::month() const NOEXCEPT {return m_;} +CONSTCD11 inline date::day month_day::day() const NOEXCEPT {return d_;} + +CONSTCD14 +inline +bool +month_day::ok() const NOEXCEPT +{ + CONSTDATA date::day d[] = + { + date::day(31), date::day(29), date::day(31), + date::day(30), date::day(31), date::day(30), + date::day(31), date::day(31), date::day(30), + date::day(31), date::day(30), date::day(31) + }; + return m_.ok() && date::day{1} <= d_ && d_ <= d[static_cast(m_)-1]; +} + +CONSTCD11 +inline +bool +operator==(const month_day& x, const month_day& y) NOEXCEPT +{ + return x.month() == y.month() && x.day() == y.day(); +} + +CONSTCD11 +inline +bool +operator!=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month_day& x, const month_day& y) NOEXCEPT +{ + return x.month() < y.month() ? true + : (x.month() > y.month() ? false + : (x.day() < y.day())); +} + +CONSTCD11 +inline +bool +operator>(const month_day& x, const month_day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month_day& x, const month_day& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_day& md) +{ + low_level_fmt(os, md.month()) << '/'; + return low_level_fmt(os, md.day()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day& md) +{ + detail::low_level_fmt(os, md); + if (!md.ok()) + os << " is not a valid month_day"; + return os; +} + +// month_day_last + +CONSTCD11 inline month month_day_last::month() const NOEXCEPT {return m_;} +CONSTCD11 inline bool month_day_last::ok() const NOEXCEPT {return m_.ok();} +CONSTCD11 inline month_day_last::month_day_last(const date::month& m) NOEXCEPT : m_(m) {} + +CONSTCD11 +inline +bool +operator==(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return x.month() == y.month(); +} + +CONSTCD11 +inline +bool +operator!=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return x.month() < y.month(); +} + +CONSTCD11 +inline +bool +operator>(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const month_day_last& x, const month_day_last& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_day_last& mdl) +{ + return low_level_fmt(os, mdl.month()) << "/last"; +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_day_last& mdl) +{ + detail::low_level_fmt(os, mdl); + if (!mdl.ok()) + os << " is not a valid month_day_last"; + return os; +} + +// month_weekday + +CONSTCD11 +inline +month_weekday::month_weekday(const date::month& m, + const date::weekday_indexed& wdi) NOEXCEPT + : m_(m) + , wdi_(wdi) + {} + +CONSTCD11 inline month month_weekday::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday_indexed +month_weekday::weekday_indexed() const NOEXCEPT +{ + return wdi_; +} + +CONSTCD11 +inline +bool +month_weekday::ok() const NOEXCEPT +{ + return m_.ok() && wdi_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const month_weekday& x, const month_weekday& y) NOEXCEPT +{ + return x.month() == y.month() && x.weekday_indexed() == y.weekday_indexed(); +} + +CONSTCD11 +inline +bool +operator!=(const month_weekday& x, const month_weekday& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_weekday& mwd) +{ + low_level_fmt(os, mwd.month()) << '/'; + return low_level_fmt(os, mwd.weekday_indexed()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday& mwd) +{ + detail::low_level_fmt(os, mwd); + if (!mwd.ok()) + os << " is not a valid month_weekday"; + return os; +} + +// month_weekday_last + +CONSTCD11 +inline +month_weekday_last::month_weekday_last(const date::month& m, + const date::weekday_last& wdl) NOEXCEPT + : m_(m) + , wdl_(wdl) + {} + +CONSTCD11 inline month month_weekday_last::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday_last +month_weekday_last::weekday_last() const NOEXCEPT +{ + return wdl_; +} + +CONSTCD11 +inline +bool +month_weekday_last::ok() const NOEXCEPT +{ + return m_.ok() && wdl_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT +{ + return x.month() == y.month() && x.weekday_last() == y.weekday_last(); +} + +CONSTCD11 +inline +bool +operator!=(const month_weekday_last& x, const month_weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const month_weekday_last& mwdl) +{ + low_level_fmt(os, mwdl.month()) << '/'; + return low_level_fmt(os, mwdl.weekday_last()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const month_weekday_last& mwdl) +{ + detail::low_level_fmt(os, mwdl); + if (!mwdl.ok()) + os << " is not a valid month_weekday_last"; + return os; +} + +// year_month_day_last + +CONSTCD11 +inline +year_month_day_last::year_month_day_last(const date::year& y, + const date::month_day_last& mdl) NOEXCEPT + : y_(y) + , mdl_(mdl) + {} + +template +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_day_last& +year_month_day_last::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_day_last::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_day_last::month() const NOEXCEPT {return mdl_.month();} + +CONSTCD11 +inline +month_day_last +year_month_day_last::month_day_last() const NOEXCEPT +{ + return mdl_; +} + +CONSTCD14 +inline +day +year_month_day_last::day() const NOEXCEPT +{ + CONSTDATA date::day d[] = + { + date::day(31), date::day(28), date::day(31), + date::day(30), date::day(31), date::day(30), + date::day(31), date::day(31), date::day(30), + date::day(31), date::day(30), date::day(31) + }; + return (month() != February || !y_.is_leap()) && mdl_.ok() ? + d[static_cast(month()) - 1] : date::day{29}; +} + +CONSTCD14 +inline +year_month_day_last::operator sys_days() const NOEXCEPT +{ + return sys_days(year()/month()/day()); +} + +CONSTCD14 +inline +year_month_day_last::operator local_days() const NOEXCEPT +{ + return local_days(year()/month()/day()); +} + +CONSTCD11 +inline +bool +year_month_day_last::ok() const NOEXCEPT +{ + return y_.ok() && mdl_.ok(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return x.year() == y.year() && x.month_day_last() == y.month_day_last(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month_day_last() < y.month_day_last())); +} + +CONSTCD11 +inline +bool +operator>(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month_day_last& x, const year_month_day_last& y) NOEXCEPT +{ + return !(x < y); +} + +namespace detail +{ + +template +std::basic_ostream& +low_level_fmt(std::basic_ostream& os, const year_month_day_last& ymdl) +{ + low_level_fmt(os, ymdl.year()) << '/'; + return low_level_fmt(os, ymdl.month_day_last()); +} + +} // namespace detail + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day_last& ymdl) +{ + detail::low_level_fmt(os, ymdl); + if (!ymdl.ok()) + os << " is not a valid year_month_day_last"; + return os; +} + +template +CONSTCD14 +inline +year_month_day_last +operator+(const year_month_day_last& ymdl, const months& dm) NOEXCEPT +{ + return (ymdl.year() / ymdl.month() + dm) / last; +} + +template +CONSTCD14 +inline +year_month_day_last +operator+(const months& dm, const year_month_day_last& ymdl) NOEXCEPT +{ + return ymdl + dm; +} + +template +CONSTCD14 +inline +year_month_day_last +operator-(const year_month_day_last& ymdl, const months& dm) NOEXCEPT +{ + return ymdl + (-dm); +} + +CONSTCD11 +inline +year_month_day_last +operator+(const year_month_day_last& ymdl, const years& dy) NOEXCEPT +{ + return {ymdl.year()+dy, ymdl.month_day_last()}; +} + +CONSTCD11 +inline +year_month_day_last +operator+(const years& dy, const year_month_day_last& ymdl) NOEXCEPT +{ + return ymdl + dy; +} + +CONSTCD11 +inline +year_month_day_last +operator-(const year_month_day_last& ymdl, const years& dy) NOEXCEPT +{ + return ymdl + (-dy); +} + +// year_month_day + +CONSTCD11 +inline +year_month_day::year_month_day(const date::year& y, const date::month& m, + const date::day& d) NOEXCEPT + : y_(y) + , m_(m) + , d_(d) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(const year_month_day_last& ymdl) NOEXCEPT + : y_(ymdl.year()) + , m_(ymdl.month()) + , d_(ymdl.day()) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(sys_days dp) NOEXCEPT + : year_month_day(from_days(dp.time_since_epoch())) + {} + +CONSTCD14 +inline +year_month_day::year_month_day(local_days dp) NOEXCEPT + : year_month_day(from_days(dp.time_since_epoch())) + {} + +CONSTCD11 inline year year_month_day::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_day::month() const NOEXCEPT {return m_;} +CONSTCD11 inline day year_month_day::day() const NOEXCEPT {return d_;} + +template +CONSTCD14 +inline +year_month_day& +year_month_day::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_day& +year_month_day::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_day& +year_month_day::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_day& +year_month_day::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD14 +inline +days +year_month_day::to_days() const NOEXCEPT +{ + static_assert(std::numeric_limits::digits >= 18, + "This algorithm has not been ported to a 16 bit unsigned integer"); + static_assert(std::numeric_limits::digits >= 20, + "This algorithm has not been ported to a 16 bit signed integer"); + auto const y = static_cast(y_) - (m_ <= February); + auto const m = static_cast(m_); + auto const d = static_cast(d_); + auto const era = (y >= 0 ? y : y-399) / 400; + auto const yoe = static_cast(y - era * 400); // [0, 399] + auto const doy = (153*(m > 2 ? m-3 : m+9) + 2)/5 + d-1; // [0, 365] + auto const doe = yoe * 365 + yoe/4 - yoe/100 + doy; // [0, 146096] + return days{era * 146097 + static_cast(doe) - 719468}; +} + +CONSTCD14 +inline +year_month_day::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_day::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD14 +inline +bool +year_month_day::ok() const NOEXCEPT +{ + if (!(y_.ok() && m_.ok())) + return false; + return date::day{1} <= d_ && d_ <= (y_ / m_ / last).day(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && x.day() == y.day(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(x == y); +} + +CONSTCD11 +inline +bool +operator<(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return x.year() < y.year() ? true + : (x.year() > y.year() ? false + : (x.month() < y.month() ? true + : (x.month() > y.month() ? false + : (x.day() < y.day())))); +} + +CONSTCD11 +inline +bool +operator>(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return y < x; +} + +CONSTCD11 +inline +bool +operator<=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(y < x); +} + +CONSTCD11 +inline +bool +operator>=(const year_month_day& x, const year_month_day& y) NOEXCEPT +{ + return !(x < y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_day& ymd) +{ + detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.imbue(std::locale::classic()); + os << static_cast(ymd.year()) << '-'; + os.width(2); + os << static_cast(ymd.month()) << '-'; + os.width(2); + os << static_cast(ymd.day()); + if (!ymd.ok()) + os << " is not a valid year_month_day"; + return os; +} + +CONSTCD14 +inline +year_month_day +year_month_day::from_days(days dp) NOEXCEPT +{ + static_assert(std::numeric_limits::digits >= 18, + "This algorithm has not been ported to a 16 bit unsigned integer"); + static_assert(std::numeric_limits::digits >= 20, + "This algorithm has not been ported to a 16 bit signed integer"); + auto const z = dp.count() + 719468; + auto const era = (z >= 0 ? z : z - 146096) / 146097; + auto const doe = static_cast(z - era * 146097); // [0, 146096] + auto const yoe = (doe - doe/1460 + doe/36524 - doe/146096) / 365; // [0, 399] + auto const y = static_cast(yoe) + era * 400; + auto const doy = doe - (365*yoe + yoe/4 - yoe/100); // [0, 365] + auto const mp = (5*doy + 2)/153; // [0, 11] + auto const d = doy - (153*mp+2)/5 + 1; // [1, 31] + auto const m = mp < 10 ? mp+3 : mp-9; // [1, 12] + return year_month_day{date::year{y + (m <= 2)}, date::month(m), date::day(d)}; +} + +template +CONSTCD14 +inline +year_month_day +operator+(const year_month_day& ymd, const months& dm) NOEXCEPT +{ + return (ymd.year() / ymd.month() + dm) / ymd.day(); +} + +template +CONSTCD14 +inline +year_month_day +operator+(const months& dm, const year_month_day& ymd) NOEXCEPT +{ + return ymd + dm; +} + +template +CONSTCD14 +inline +year_month_day +operator-(const year_month_day& ymd, const months& dm) NOEXCEPT +{ + return ymd + (-dm); +} + +CONSTCD11 +inline +year_month_day +operator+(const year_month_day& ymd, const years& dy) NOEXCEPT +{ + return (ymd.year() + dy) / ymd.month() / ymd.day(); +} + +CONSTCD11 +inline +year_month_day +operator+(const years& dy, const year_month_day& ymd) NOEXCEPT +{ + return ymd + dy; +} + +CONSTCD11 +inline +year_month_day +operator-(const year_month_day& ymd, const years& dy) NOEXCEPT +{ + return ymd + (-dy); +} + +// year_month_weekday + +CONSTCD11 +inline +year_month_weekday::year_month_weekday(const date::year& y, const date::month& m, + const date::weekday_indexed& wdi) + NOEXCEPT + : y_(y) + , m_(m) + , wdi_(wdi) + {} + +CONSTCD14 +inline +year_month_weekday::year_month_weekday(const sys_days& dp) NOEXCEPT + : year_month_weekday(from_days(dp.time_since_epoch())) + {} + +CONSTCD14 +inline +year_month_weekday::year_month_weekday(const local_days& dp) NOEXCEPT + : year_month_weekday(from_days(dp.time_since_epoch())) + {} + +template +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_weekday& +year_month_weekday::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_weekday::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_weekday::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday +year_month_weekday::weekday() const NOEXCEPT +{ + return wdi_.weekday(); +} + +CONSTCD11 +inline +unsigned +year_month_weekday::index() const NOEXCEPT +{ + return wdi_.index(); +} + +CONSTCD11 +inline +weekday_indexed +year_month_weekday::weekday_indexed() const NOEXCEPT +{ + return wdi_; +} + +CONSTCD14 +inline +year_month_weekday::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_weekday::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD14 +inline +bool +year_month_weekday::ok() const NOEXCEPT +{ + if (!y_.ok() || !m_.ok() || !wdi_.weekday().ok() || wdi_.index() < 1) + return false; + if (wdi_.index() <= 4) + return true; + auto d2 = wdi_.weekday() - date::weekday(static_cast(y_/m_/1)) + + days((wdi_.index()-1)*7 + 1); + return static_cast(d2.count()) <= static_cast((y_/m_/last).day()); +} + +CONSTCD14 +inline +year_month_weekday +year_month_weekday::from_days(days d) NOEXCEPT +{ + sys_days dp{d}; + auto const wd = date::weekday(dp); + auto const ymd = year_month_day(dp); + return {ymd.year(), ymd.month(), wd[(static_cast(ymd.day())-1)/7+1]}; +} + +CONSTCD14 +inline +days +year_month_weekday::to_days() const NOEXCEPT +{ + auto d = sys_days(y_/m_/1); + return (d + (wdi_.weekday() - date::weekday(d) + days{(wdi_.index()-1)*7}) + ).time_since_epoch(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && + x.weekday_indexed() == y.weekday_indexed(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_weekday& x, const year_month_weekday& y) NOEXCEPT +{ + return !(x == y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday& ymwdi) +{ + detail::low_level_fmt(os, ymwdi.year()) << '/'; + detail::low_level_fmt(os, ymwdi.month()) << '/'; + detail::low_level_fmt(os, ymwdi.weekday_indexed()); + if (!ymwdi.ok()) + os << " is not a valid year_month_weekday"; + return os; +} + +template +CONSTCD14 +inline +year_month_weekday +operator+(const year_month_weekday& ymwd, const months& dm) NOEXCEPT +{ + return (ymwd.year() / ymwd.month() + dm) / ymwd.weekday_indexed(); +} + +template +CONSTCD14 +inline +year_month_weekday +operator+(const months& dm, const year_month_weekday& ymwd) NOEXCEPT +{ + return ymwd + dm; +} + +template +CONSTCD14 +inline +year_month_weekday +operator-(const year_month_weekday& ymwd, const months& dm) NOEXCEPT +{ + return ymwd + (-dm); +} + +CONSTCD11 +inline +year_month_weekday +operator+(const year_month_weekday& ymwd, const years& dy) NOEXCEPT +{ + return {ymwd.year()+dy, ymwd.month(), ymwd.weekday_indexed()}; +} + +CONSTCD11 +inline +year_month_weekday +operator+(const years& dy, const year_month_weekday& ymwd) NOEXCEPT +{ + return ymwd + dy; +} + +CONSTCD11 +inline +year_month_weekday +operator-(const year_month_weekday& ymwd, const years& dy) NOEXCEPT +{ + return ymwd + (-dy); +} + +// year_month_weekday_last + +CONSTCD11 +inline +year_month_weekday_last::year_month_weekday_last(const date::year& y, + const date::month& m, + const date::weekday_last& wdl) NOEXCEPT + : y_(y) + , m_(m) + , wdl_(wdl) + {} + +template +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator+=(const months& m) NOEXCEPT +{ + *this = *this + m; + return *this; +} + +template +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator-=(const months& m) NOEXCEPT +{ + *this = *this - m; + return *this; +} + +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator+=(const years& y) NOEXCEPT +{ + *this = *this + y; + return *this; +} + +CONSTCD14 +inline +year_month_weekday_last& +year_month_weekday_last::operator-=(const years& y) NOEXCEPT +{ + *this = *this - y; + return *this; +} + +CONSTCD11 inline year year_month_weekday_last::year() const NOEXCEPT {return y_;} +CONSTCD11 inline month year_month_weekday_last::month() const NOEXCEPT {return m_;} + +CONSTCD11 +inline +weekday +year_month_weekday_last::weekday() const NOEXCEPT +{ + return wdl_.weekday(); +} + +CONSTCD11 +inline +weekday_last +year_month_weekday_last::weekday_last() const NOEXCEPT +{ + return wdl_; +} + +CONSTCD14 +inline +year_month_weekday_last::operator sys_days() const NOEXCEPT +{ + return sys_days{to_days()}; +} + +CONSTCD14 +inline +year_month_weekday_last::operator local_days() const NOEXCEPT +{ + return local_days{to_days()}; +} + +CONSTCD11 +inline +bool +year_month_weekday_last::ok() const NOEXCEPT +{ + return y_.ok() && m_.ok() && wdl_.ok(); +} + +CONSTCD14 +inline +days +year_month_weekday_last::to_days() const NOEXCEPT +{ + auto const d = sys_days(y_/m_/last); + return (d - (date::weekday{d} - wdl_.weekday())).time_since_epoch(); +} + +CONSTCD11 +inline +bool +operator==(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT +{ + return x.year() == y.year() && x.month() == y.month() && + x.weekday_last() == y.weekday_last(); +} + +CONSTCD11 +inline +bool +operator!=(const year_month_weekday_last& x, const year_month_weekday_last& y) NOEXCEPT +{ + return !(x == y); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const year_month_weekday_last& ymwdl) +{ + detail::low_level_fmt(os, ymwdl.year()) << '/'; + detail::low_level_fmt(os, ymwdl.month()) << '/'; + detail::low_level_fmt(os, ymwdl.weekday_last()); + if (!ymwdl.ok()) + os << " is not a valid year_month_weekday_last"; + return os; +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT +{ + return (ymwdl.year() / ymwdl.month() + dm) / ymwdl.weekday_last(); +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator+(const months& dm, const year_month_weekday_last& ymwdl) NOEXCEPT +{ + return ymwdl + dm; +} + +template +CONSTCD14 +inline +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const months& dm) NOEXCEPT +{ + return ymwdl + (-dm); +} + +CONSTCD11 +inline +year_month_weekday_last +operator+(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT +{ + return {ymwdl.year()+dy, ymwdl.month(), ymwdl.weekday_last()}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator+(const years& dy, const year_month_weekday_last& ymwdl) NOEXCEPT +{ + return ymwdl + dy; +} + +CONSTCD11 +inline +year_month_weekday_last +operator-(const year_month_weekday_last& ymwdl, const years& dy) NOEXCEPT +{ + return ymwdl + (-dy); +} + +// year_month from operator/() + +CONSTCD11 +inline +year_month +operator/(const year& y, const month& m) NOEXCEPT +{ + return {y, m}; +} + +CONSTCD11 +inline +year_month +operator/(const year& y, int m) NOEXCEPT +{ + return y / month(static_cast(m)); +} + +// month_day from operator/() + +CONSTCD11 +inline +month_day +operator/(const month& m, const day& d) NOEXCEPT +{ + return {m, d}; +} + +CONSTCD11 +inline +month_day +operator/(const day& d, const month& m) NOEXCEPT +{ + return m / d; +} + +CONSTCD11 +inline +month_day +operator/(const month& m, int d) NOEXCEPT +{ + return m / day(static_cast(d)); +} + +CONSTCD11 +inline +month_day +operator/(int m, const day& d) NOEXCEPT +{ + return month(static_cast(m)) / d; +} + +CONSTCD11 inline month_day operator/(const day& d, int m) NOEXCEPT {return m / d;} + +// month_day_last from operator/() + +CONSTCD11 +inline +month_day_last +operator/(const month& m, last_spec) NOEXCEPT +{ + return month_day_last{m}; +} + +CONSTCD11 +inline +month_day_last +operator/(last_spec, const month& m) NOEXCEPT +{ + return m/last; +} + +CONSTCD11 +inline +month_day_last +operator/(int m, last_spec) NOEXCEPT +{ + return month(static_cast(m))/last; +} + +CONSTCD11 +inline +month_day_last +operator/(last_spec, int m) NOEXCEPT +{ + return m/last; +} + +// month_weekday from operator/() + +CONSTCD11 +inline +month_weekday +operator/(const month& m, const weekday_indexed& wdi) NOEXCEPT +{ + return {m, wdi}; +} + +CONSTCD11 +inline +month_weekday +operator/(const weekday_indexed& wdi, const month& m) NOEXCEPT +{ + return m / wdi; +} + +CONSTCD11 +inline +month_weekday +operator/(int m, const weekday_indexed& wdi) NOEXCEPT +{ + return month(static_cast(m)) / wdi; +} + +CONSTCD11 +inline +month_weekday +operator/(const weekday_indexed& wdi, int m) NOEXCEPT +{ + return m / wdi; +} + +// month_weekday_last from operator/() + +CONSTCD11 +inline +month_weekday_last +operator/(const month& m, const weekday_last& wdl) NOEXCEPT +{ + return {m, wdl}; +} + +CONSTCD11 +inline +month_weekday_last +operator/(const weekday_last& wdl, const month& m) NOEXCEPT +{ + return m / wdl; +} + +CONSTCD11 +inline +month_weekday_last +operator/(int m, const weekday_last& wdl) NOEXCEPT +{ + return month(static_cast(m)) / wdl; +} + +CONSTCD11 +inline +month_weekday_last +operator/(const weekday_last& wdl, int m) NOEXCEPT +{ + return m / wdl; +} + +// year_month_day from operator/() + +CONSTCD11 +inline +year_month_day +operator/(const year_month& ym, const day& d) NOEXCEPT +{ + return {ym.year(), ym.month(), d}; +} + +CONSTCD11 +inline +year_month_day +operator/(const year_month& ym, int d) NOEXCEPT +{ + return ym / day(static_cast(d)); +} + +CONSTCD11 +inline +year_month_day +operator/(const year& y, const month_day& md) NOEXCEPT +{ + return y / md.month() / md.day(); +} + +CONSTCD11 +inline +year_month_day +operator/(int y, const month_day& md) NOEXCEPT +{ + return year(y) / md; +} + +CONSTCD11 +inline +year_month_day +operator/(const month_day& md, const year& y) NOEXCEPT +{ + return y / md; +} + +CONSTCD11 +inline +year_month_day +operator/(const month_day& md, int y) NOEXCEPT +{ + return year(y) / md; +} + +// year_month_day_last from operator/() + +CONSTCD11 +inline +year_month_day_last +operator/(const year_month& ym, last_spec) NOEXCEPT +{ + return {ym.year(), month_day_last{ym.month()}}; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const year& y, const month_day_last& mdl) NOEXCEPT +{ + return {y, mdl}; +} + +CONSTCD11 +inline +year_month_day_last +operator/(int y, const month_day_last& mdl) NOEXCEPT +{ + return year(y) / mdl; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const month_day_last& mdl, const year& y) NOEXCEPT +{ + return y / mdl; +} + +CONSTCD11 +inline +year_month_day_last +operator/(const month_day_last& mdl, int y) NOEXCEPT +{ + return year(y) / mdl; +} + +// year_month_weekday from operator/() + +CONSTCD11 +inline +year_month_weekday +operator/(const year_month& ym, const weekday_indexed& wdi) NOEXCEPT +{ + return {ym.year(), ym.month(), wdi}; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const year& y, const month_weekday& mwd) NOEXCEPT +{ + return {y, mwd.month(), mwd.weekday_indexed()}; +} + +CONSTCD11 +inline +year_month_weekday +operator/(int y, const month_weekday& mwd) NOEXCEPT +{ + return year(y) / mwd; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const month_weekday& mwd, const year& y) NOEXCEPT +{ + return y / mwd; +} + +CONSTCD11 +inline +year_month_weekday +operator/(const month_weekday& mwd, int y) NOEXCEPT +{ + return year(y) / mwd; +} + +// year_month_weekday_last from operator/() + +CONSTCD11 +inline +year_month_weekday_last +operator/(const year_month& ym, const weekday_last& wdl) NOEXCEPT +{ + return {ym.year(), ym.month(), wdl}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const year& y, const month_weekday_last& mwdl) NOEXCEPT +{ + return {y, mwdl.month(), mwdl.weekday_last()}; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(int y, const month_weekday_last& mwdl) NOEXCEPT +{ + return year(y) / mwdl; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const month_weekday_last& mwdl, const year& y) NOEXCEPT +{ + return y / mwdl; +} + +CONSTCD11 +inline +year_month_weekday_last +operator/(const month_weekday_last& mwdl, int y) NOEXCEPT +{ + return year(y) / mwdl; +} + +template +struct fields; + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev = nullptr, + const std::chrono::seconds* offset_sec = nullptr); + +template +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr); + +// hh_mm_ss + +namespace detail +{ + +struct undocumented {explicit undocumented() = default;}; + +// width::value is the number of fractional decimal digits in 1/n +// width<0>::value and width<1>::value are defined to be 0 +// If 1/n takes more than 18 fractional decimal digits, +// the result is truncated to 19. +// Example: width<2>::value == 1 +// Example: width<3>::value == 19 +// Example: width<4>::value == 2 +// Example: width<10>::value == 1 +// Example: width<1000>::value == 3 +template +struct width +{ + static_assert(d > 0, "width called with zero denominator"); + static CONSTDATA unsigned value = 1 + width::value; +}; + +template +struct width +{ + static CONSTDATA unsigned value = 0; +}; + +template +struct static_pow10 +{ +private: + static CONSTDATA std::uint64_t h = static_pow10::value; +public: + static CONSTDATA std::uint64_t value = h * h * (exp % 2 ? 10 : 1); +}; + +template <> +struct static_pow10<0> +{ + static CONSTDATA std::uint64_t value = 1; +}; + +template +class decimal_format_seconds +{ + using CT = typename std::common_type::type; + using rep = typename CT::rep; + static unsigned CONSTDATA trial_width = + detail::width::value; +public: + static unsigned CONSTDATA width = trial_width < 19 ? trial_width : 6u; + using precision = std::chrono::duration::value>>; + +private: + std::chrono::seconds s_; + precision sub_s_; + +public: + CONSTCD11 decimal_format_seconds() + : s_() + , sub_s_() + {} + + CONSTCD11 explicit decimal_format_seconds(const Duration& d) NOEXCEPT + : s_(std::chrono::duration_cast(d)) + , sub_s_(std::chrono::duration_cast(d - s_)) + {} + + CONSTCD14 std::chrono::seconds& seconds() NOEXCEPT {return s_;} + CONSTCD11 std::chrono::seconds seconds() const NOEXCEPT {return s_;} + CONSTCD11 precision subseconds() const NOEXCEPT {return sub_s_;} + + CONSTCD14 precision to_duration() const NOEXCEPT + { + return s_ + sub_s_; + } + + CONSTCD11 bool in_conventional_range() const NOEXCEPT + { + return sub_s_ < std::chrono::seconds{1} && s_ < std::chrono::minutes{1}; + } + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, const decimal_format_seconds& x) + { + return x.print(os, std::chrono::treat_as_floating_point{}); + } + + template + std::basic_ostream& + print(std::basic_ostream& os, std::true_type) const + { + date::detail::save_ostream _(os); + std::chrono::duration d = s_ + sub_s_; + if (d < std::chrono::seconds{10}) + os << '0'; + os.precision(width+6); + os << std::fixed << d.count(); + return os; + } + + template + std::basic_ostream& + print(std::basic_ostream& os, std::false_type) const + { + date::detail::save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << s_.count(); + if (width > 0) + { +#if !ONLY_C_LOCALE + os << std::use_facet>(os.getloc()).decimal_point(); +#else + os << '.'; +#endif + date::detail::save_ostream _s(os); + os.imbue(std::locale::classic()); + os.width(width); + os << sub_s_.count(); + } + return os; + } +}; + +template +inline +CONSTCD11 +typename std::enable_if + < + std::numeric_limits::is_signed, + std::chrono::duration + >::type +abs(std::chrono::duration d) +{ + return d >= d.zero() ? +d : -d; +} + +template +inline +CONSTCD11 +typename std::enable_if + < + !std::numeric_limits::is_signed, + std::chrono::duration + >::type +abs(std::chrono::duration d) +{ + return d; +} + +} // namespace detail + +template +class hh_mm_ss +{ + using dfs = detail::decimal_format_seconds::type>; + + std::chrono::hours h_; + std::chrono::minutes m_; + dfs s_; + bool neg_; + +public: + static unsigned CONSTDATA fractional_width = dfs::width; + using precision = typename dfs::precision; + + CONSTCD11 hh_mm_ss() NOEXCEPT + : hh_mm_ss(Duration::zero()) + {} + + CONSTCD11 explicit hh_mm_ss(Duration d) NOEXCEPT + : h_(std::chrono::duration_cast(detail::abs(d))) + , m_(std::chrono::duration_cast(detail::abs(d)) - h_) + , s_(detail::abs(d) - h_ - m_) + , neg_(d < Duration::zero()) + {} + + CONSTCD11 std::chrono::hours hours() const NOEXCEPT {return h_;} + CONSTCD11 std::chrono::minutes minutes() const NOEXCEPT {return m_;} + CONSTCD11 std::chrono::seconds seconds() const NOEXCEPT {return s_.seconds();} + CONSTCD14 std::chrono::seconds& + seconds(detail::undocumented) NOEXCEPT {return s_.seconds();} + CONSTCD11 precision subseconds() const NOEXCEPT {return s_.subseconds();} + CONSTCD11 bool is_negative() const NOEXCEPT {return neg_;} + + CONSTCD11 explicit operator precision() const NOEXCEPT {return to_duration();} + CONSTCD11 precision to_duration() const NOEXCEPT + {return (s_.to_duration() + m_ + h_) * (1-2*neg_);} + + CONSTCD11 bool in_conventional_range() const NOEXCEPT + { + return !neg_ && h_ < days{1} && m_ < std::chrono::hours{1} && + s_.in_conventional_range(); + } + +private: + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, hh_mm_ss const& tod) + { + if (tod.is_negative()) + os << '-'; + if (tod.h_ < std::chrono::hours{10}) + os << '0'; + os << tod.h_.count() << ':'; + if (tod.m_ < std::chrono::minutes{10}) + os << '0'; + os << tod.m_.count() << ':' << tod.s_; + return os; + } + + template + friend + std::basic_ostream& + date::to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev, + const std::chrono::seconds* offset_sec); + + template + friend + std::basic_istream& + date::from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, + std::basic_string* abbrev, std::chrono::minutes* offset); +}; + +inline +CONSTCD14 +bool +is_am(std::chrono::hours const& h) NOEXCEPT +{ + using std::chrono::hours; + return hours{0} <= h && h < hours{12}; +} + +inline +CONSTCD14 +bool +is_pm(std::chrono::hours const& h) NOEXCEPT +{ + using std::chrono::hours; + return hours{12} <= h && h < hours{24}; +} + +inline +CONSTCD14 +std::chrono::hours +make12(std::chrono::hours h) NOEXCEPT +{ + using std::chrono::hours; + if (h < hours{12}) + { + if (h == hours{0}) + h = hours{12}; + } + else + { + if (h != hours{12}) + h = h - hours{12}; + } + return h; +} + +inline +CONSTCD14 +std::chrono::hours +make24(std::chrono::hours h, bool is_pm) NOEXCEPT +{ + using std::chrono::hours; + if (is_pm) + { + if (h != hours{12}) + h = h + hours{12}; + } + else if (h == hours{12}) + h = hours{0}; + return h; +} + +template +using time_of_day = hh_mm_ss; + +template +CONSTCD11 +inline +hh_mm_ss> +make_time(const std::chrono::duration& d) +{ + return hh_mm_ss>(d); +} + +template +inline +typename std::enable_if +< + !std::is_convertible::value, + std::basic_ostream& +>::type +operator<<(std::basic_ostream& os, const sys_time& tp) +{ + auto const dp = date::floor(tp); + return os << year_month_day(dp) << ' ' << make_time(tp-dp); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const sys_days& dp) +{ + return os << year_month_day(dp); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const local_time& ut) +{ + return (os << sys_time{ut.time_since_epoch()}); +} + +namespace detail +{ + +template +class string_literal; + +template +inline +CONSTCD14 +string_literal::type, + N1 + N2 - 1> +operator+(const string_literal& x, const string_literal& y) NOEXCEPT; + +template +class string_literal +{ + CharT p_[N]; + + CONSTCD11 string_literal() NOEXCEPT + : p_{} + {} + +public: + using const_iterator = const CharT*; + + string_literal(string_literal const&) = default; + string_literal& operator=(string_literal const&) = delete; + + template ::type> + CONSTCD11 string_literal(CharT c) NOEXCEPT + : p_{c} + { + } + + template ::type> + CONSTCD11 string_literal(CharT c1, CharT c2) NOEXCEPT + : p_{c1, c2} + { + } + + template ::type> + CONSTCD11 string_literal(CharT c1, CharT c2, CharT c3) NOEXCEPT + : p_{c1, c2, c3} + { + } + + CONSTCD14 string_literal(const CharT(&a)[N]) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + template ::type> + CONSTCD14 string_literal(const char(&a)[N]) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + template ::value>::type> + CONSTCD14 string_literal(string_literal const& a) NOEXCEPT + : p_{} + { + for (std::size_t i = 0; i < N; ++i) + p_[i] = a[i]; + } + + CONSTCD11 const CharT* data() const NOEXCEPT {return p_;} + CONSTCD11 std::size_t size() const NOEXCEPT {return N-1;} + + CONSTCD11 const_iterator begin() const NOEXCEPT {return p_;} + CONSTCD11 const_iterator end() const NOEXCEPT {return p_ + N-1;} + + CONSTCD11 CharT const& operator[](std::size_t n) const NOEXCEPT + { + return p_[n]; + } + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, const string_literal& s) + { + return os << s.p_; + } + + template + friend + CONSTCD14 + string_literal::type, + N1 + N2 - 1> + operator+(const string_literal& x, const string_literal& y) NOEXCEPT; +}; + +template +CONSTCD11 +inline +string_literal +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + return string_literal(x[0], y[0]); +} + +template +CONSTCD11 +inline +string_literal +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + return string_literal(x[0], x[1], y[0]); +} + +template +CONSTCD14 +inline +string_literal::type, + N1 + N2 - 1> +operator+(const string_literal& x, const string_literal& y) NOEXCEPT +{ + using CT = typename std::conditional::type; + + string_literal r; + std::size_t i = 0; + for (; i < N1-1; ++i) + r.p_[i] = CT(x.p_[i]); + for (std::size_t j = 0; j < N2; ++j, ++i) + r.p_[i] = CT(y.p_[j]); + + return r; +} + + +template +inline +std::basic_string +operator+(std::basic_string x, const string_literal& y) +{ + x.append(y.data(), y.size()); + return x; +} + +#if __cplusplus >= 201402 && (!defined(__EDG_VERSION__) || __EDG_VERSION__ > 411) \ + && (!defined(__SUNPRO_CC) || __SUNPRO_CC > 0x5150) + +template ::value || + std::is_same::value || + std::is_same::value || + std::is_same::value>> +CONSTCD14 +inline +string_literal +msl(CharT c) NOEXCEPT +{ + return string_literal{c}; +} + +CONSTCD14 +inline +std::size_t +to_string_len(std::intmax_t i) +{ + std::size_t r = 0; + do + { + i /= 10; + ++r; + } while (i > 0); + return r; +} + +template +CONSTCD14 +inline +std::enable_if_t +< + N < 10, + string_literal +> +msl() NOEXCEPT +{ + return msl(char(N % 10 + '0')); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + 10 <= N, + string_literal +> +msl() NOEXCEPT +{ + return msl() + msl(char(N % 10 + '0')); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + std::ratio::type::den != 1, + string_literal::type::num) + + to_string_len(std::ratio::type::den) + 4> +> +msl(std::ratio) NOEXCEPT +{ + using R = typename std::ratio::type; + return msl(CharT{'['}) + msl() + msl(CharT{'/'}) + + msl() + msl(CharT{']'}); +} + +template +CONSTCD14 +inline +std::enable_if_t +< + std::ratio::type::den == 1, + string_literal::type::num) + 3> +> +msl(std::ratio) NOEXCEPT +{ + using R = typename std::ratio::type; + return msl(CharT{'['}) + msl() + msl(CharT{']'}); +} + + +#else // __cplusplus < 201402 || (defined(__EDG_VERSION__) && __EDG_VERSION__ <= 411) + +inline +std::string +to_string(std::uint64_t x) +{ + return std::to_string(x); +} + +template +inline +std::basic_string +to_string(std::uint64_t x) +{ + auto y = std::to_string(x); + return std::basic_string(y.begin(), y.end()); +} + +template +inline +typename std::enable_if +< + std::ratio::type::den != 1, + std::basic_string +>::type +msl(std::ratio) +{ + using R = typename std::ratio::type; + return std::basic_string(1, '[') + to_string(R::num) + CharT{'/'} + + to_string(R::den) + CharT{']'}; +} + +template +inline +typename std::enable_if +< + std::ratio::type::den == 1, + std::basic_string +>::type +msl(std::ratio) +{ + using R = typename std::ratio::type; + return std::basic_string(1, '[') + to_string(R::num) + CharT{']'}; +} + +#endif // __cplusplus < 201402 || (defined(__EDG_VERSION__) && __EDG_VERSION__ <= 411) + +template +CONSTCD11 +inline +string_literal +msl(std::atto) NOEXCEPT +{ + return string_literal{'a'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::femto) NOEXCEPT +{ + return string_literal{'f'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::pico) NOEXCEPT +{ + return string_literal{'p'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::nano) NOEXCEPT +{ + return string_literal{'n'}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + std::is_same::value, + string_literal +>::type +msl(std::micro) NOEXCEPT +{ + return string_literal{'\xC2', '\xB5'}; +} + +template +CONSTCD11 +inline +typename std::enable_if +< + !std::is_same::value, + string_literal +>::type +msl(std::micro) NOEXCEPT +{ + return string_literal{CharT{static_cast('\xB5')}}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::milli) NOEXCEPT +{ + return string_literal{'m'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::centi) NOEXCEPT +{ + return string_literal{'c'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::deca) NOEXCEPT +{ + return string_literal{'d', 'a'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::deci) NOEXCEPT +{ + return string_literal{'d'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::hecto) NOEXCEPT +{ + return string_literal{'h'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::kilo) NOEXCEPT +{ + return string_literal{'k'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::mega) NOEXCEPT +{ + return string_literal{'M'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::giga) NOEXCEPT +{ + return string_literal{'G'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::tera) NOEXCEPT +{ + return string_literal{'T'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::peta) NOEXCEPT +{ + return string_literal{'P'}; +} + +template +CONSTCD11 +inline +string_literal +msl(std::exa) NOEXCEPT +{ + return string_literal{'E'}; +} + +template +CONSTCD11 +inline +auto +get_units(Period p) + -> decltype(msl(p) + string_literal{'s'}) +{ + return msl(p) + string_literal{'s'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<1>) +{ + return string_literal{'s'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<3600>) +{ + return string_literal{'h'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<60>) +{ + return string_literal{'m', 'i', 'n'}; +} + +template +CONSTCD11 +inline +string_literal +get_units(std::ratio<86400>) +{ + return string_literal{'d'}; +} + +template > +struct make_string; + +template <> +struct make_string +{ + template + static + std::string + from(Rep n) + { + return std::to_string(n); + } +}; + +template +struct make_string +{ + template + static + std::basic_string + from(Rep n) + { + auto s = std::to_string(n); + return std::basic_string(s.begin(), s.end()); + } +}; + +template <> +struct make_string +{ + template + static + std::wstring + from(Rep n) + { + return std::to_wstring(n); + } +}; + +template +struct make_string +{ + template + static + std::basic_string + from(Rep n) + { + auto s = std::to_wstring(n); + return std::basic_string(s.begin(), s.end()); + } +}; + +} // namespace detail + +// to_stream + +CONSTDATA year nanyear{-32768}; + +template +struct fields +{ + year_month_day ymd{nanyear/0/0}; + weekday wd{8u}; + hh_mm_ss tod{}; + bool has_tod = false; + +#if !defined(__clang__) && defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ <= 409) + fields() : ymd{nanyear/0/0}, wd{8u}, tod{}, has_tod{false} {} +#else + fields() = default; +#endif + + fields(year_month_day ymd_) : ymd(ymd_) {} + fields(weekday wd_) : wd(wd_) {} + fields(hh_mm_ss tod_) : tod(tod_), has_tod(true) {} + + fields(year_month_day ymd_, weekday wd_) : ymd(ymd_), wd(wd_) {} + fields(year_month_day ymd_, hh_mm_ss tod_) : ymd(ymd_), tod(tod_), + has_tod(true) {} + + fields(weekday wd_, hh_mm_ss tod_) : wd(wd_), tod(tod_), has_tod(true) {} + + fields(year_month_day ymd_, weekday wd_, hh_mm_ss tod_) + : ymd(ymd_) + , wd(wd_) + , tod(tod_) + , has_tod(true) + {} +}; + +namespace detail +{ + +template +unsigned +extract_weekday(std::basic_ostream& os, const fields& fds) +{ + if (!fds.ymd.ok() && !fds.wd.ok()) + { + // fds does not contain a valid weekday + os.setstate(std::ios::failbit); + return 8; + } + weekday wd; + if (fds.ymd.ok()) + { + wd = weekday{sys_days(fds.ymd)}; + if (fds.wd.ok() && wd != fds.wd) + { + // fds.ymd and fds.wd are inconsistent + os.setstate(std::ios::failbit); + return 8; + } + } + else + wd = fds.wd; + return static_cast((wd - Sunday).count()); +} + +template +unsigned +extract_month(std::basic_ostream& os, const fields& fds) +{ + if (!fds.ymd.month().ok()) + { + // fds does not contain a valid month + os.setstate(std::ios::failbit); + return 0; + } + return static_cast(fds.ymd.month()); +} + +} // namespace detail + +#if ONLY_C_LOCALE + +namespace detail +{ + +inline +std::pair +weekday_names() +{ + static const std::string nm[] = + { + "Sunday", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sun", + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +inline +std::pair +month_names() +{ + static const std::string nm[] = + { + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +inline +std::pair +ampm_names() +{ + static const std::string nm[] = + { + "AM", + "PM" + }; + return std::make_pair(nm, nm+sizeof(nm)/sizeof(nm[0])); +} + +template +FwdIter +scan_keyword(std::basic_istream& is, FwdIter kb, FwdIter ke) +{ + size_t nkw = static_cast(std::distance(kb, ke)); + const unsigned char doesnt_match = '\0'; + const unsigned char might_match = '\1'; + const unsigned char does_match = '\2'; + unsigned char statbuf[100]; + unsigned char* status = statbuf; + std::unique_ptr stat_hold(0, free); + if (nkw > sizeof(statbuf)) + { + status = (unsigned char*)std::malloc(nkw); + if (status == nullptr) + throw std::bad_alloc(); + stat_hold.reset(status); + } + size_t n_might_match = nkw; // At this point, any keyword might match + size_t n_does_match = 0; // but none of them definitely do + // Initialize all statuses to might_match, except for "" keywords are does_match + unsigned char* st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (!ky->empty()) + *st = might_match; + else + { + *st = does_match; + --n_might_match; + ++n_does_match; + } + } + // While there might be a match, test keywords against the next CharT + for (size_t indx = 0; is && n_might_match > 0; ++indx) + { + // Peek at the next CharT but don't consume it + auto ic = is.peek(); + if (ic == EOF) + { + is.setstate(std::ios::eofbit); + break; + } + auto c = static_cast(toupper(static_cast(ic))); + bool consume = false; + // For each keyword which might match, see if the indx character is c + // If a match if found, consume c + // If a match is found, and that is the last character in the keyword, + // then that keyword matches. + // If the keyword doesn't match this character, then change the keyword + // to doesn't match + st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (*st == might_match) + { + if (c == static_cast(toupper(static_cast((*ky)[indx])))) + { + consume = true; + if (ky->size() == indx+1) + { + *st = does_match; + --n_might_match; + ++n_does_match; + } + } + else + { + *st = doesnt_match; + --n_might_match; + } + } + } + // consume if we matched a character + if (consume) + { + (void)is.get(); + // If we consumed a character and there might be a matched keyword that + // was marked matched on a previous iteration, then such keywords + // are now marked as not matching. + if (n_might_match + n_does_match > 1) + { + st = status; + for (auto ky = kb; ky != ke; ++ky, ++st) + { + if (*st == does_match && ky->size() != indx+1) + { + *st = doesnt_match; + --n_does_match; + } + } + } + } + } + // We've exited the loop because we hit eof and/or we have no more "might matches". + // Return the first matching result + for (st = status; kb != ke; ++kb, ++st) + if (*st == does_match) + break; + if (kb == ke) + is.setstate(std::ios::failbit); + return kb; +} + +} // namespace detail + +#endif // ONLY_C_LOCALE + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const fields& fds, const std::string* abbrev, + const std::chrono::seconds* offset_sec) +{ +#if ONLY_C_LOCALE + using detail::weekday_names; + using detail::month_names; + using detail::ampm_names; +#endif + using detail::save_ostream; + using detail::get_units; + using detail::extract_weekday; + using detail::extract_month; + using std::ios; + using std::chrono::duration_cast; + using std::chrono::seconds; + using std::chrono::minutes; + using std::chrono::hours; + date::detail::save_ostream ss(os); + os.fill(' '); + os.flags(std::ios::skipws | std::ios::dec); + os.width(0); + tm tm{}; + bool insert_negative = fds.has_tod && fds.tod.to_duration() < Duration::zero(); +#if !ONLY_C_LOCALE + auto& facet = std::use_facet>(os.getloc()); +#endif + const CharT* command = nullptr; + CharT modified = CharT{}; + for (; *fmt; ++fmt) + { + switch (*fmt) + { + case 'a': + case 'A': + if (command) + { + if (modified == CharT{}) + { + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else // ONLY_C_LOCALE + os << weekday_names().first[tm.tm_wday+7*(*fmt == 'a')]; +#endif // ONLY_C_LOCALE + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'b': + case 'B': + case 'h': + if (command) + { + if (modified == CharT{}) + { + tm.tm_mon = static_cast(extract_month(os, fds)) - 1; +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else // ONLY_C_LOCALE + os << month_names().first[tm.tm_mon+12*(*fmt != 'B')]; +#endif // ONLY_C_LOCALE + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'c': + case 'x': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + if (*fmt == 'c' && !fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + tm = std::tm{}; + auto const& ymd = fds.ymd; + auto ld = local_days(ymd); + if (*fmt == 'c') + { + tm.tm_sec = static_cast(fds.tod.seconds().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_hour = static_cast(fds.tod.hours().count()); + } + tm.tm_mday = static_cast(static_cast(ymd.day())); + tm.tm_mon = static_cast(extract_month(os, fds) - 1); + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + CharT f[3] = {'%'}; + auto fe = std::begin(f) + 1; + if (modified == CharT{'E'}) + *fe++ = modified; + *fe++ = *fmt; + facet.put(os, os, os.fill(), &tm, std::begin(f), fe); +#else // ONLY_C_LOCALE + if (*fmt == 'c') + { + auto wd = static_cast(extract_weekday(os, fds)); + os << weekday_names().first[static_cast(wd)+7] + << ' '; + os << month_names().first[extract_month(os, fds)-1+12] << ' '; + auto d = static_cast(static_cast(fds.ymd.day())); + if (d < 10) + os << ' '; + os << d << ' ' + << make_time(duration_cast(fds.tod.to_duration())) + << ' ' << fds.ymd.year(); + + } + else // *fmt == 'x' + { + auto const& ymd = fds.ymd; + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(ymd.month()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.day()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.year()) % 100; + } +#endif // ONLY_C_LOCALE + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'C': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = static_cast(fds.ymd.year()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + if (y >= 0) + { + os.width(2); + os << y/100; + } + else + { + os << CharT{'-'}; + os.width(2); + os << -(y-99)/100; + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + tm.tm_year = y - 1900; + CharT f[3] = {'%', 'E', 'C'}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'd': + case 'e': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.day().ok()) + os.setstate(std::ios::failbit); + auto d = static_cast(static_cast(fds.ymd.day())); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + if (*fmt == CharT{'d'}) + os.fill('0'); + else + os.fill(' '); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << d; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + tm.tm_mday = d; + CharT f[3] = {'%', 'O', *fmt}; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'D': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto const& ymd = fds.ymd; + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << static_cast(ymd.month()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.day()) << CharT{'/'}; + os.width(2); + os << static_cast(ymd.year()) % 100; + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'F': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto const& ymd = fds.ymd; + save_ostream _(os); + os.imbue(std::locale::classic()); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(4); + os << static_cast(ymd.year()) << CharT{'-'}; + os.width(2); + os << static_cast(ymd.month()) << CharT{'-'}; + os.width(2); + os << static_cast(ymd.day()); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'g': + case 'G': + if (command) + { + if (modified == CharT{}) + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(fds.ymd); + auto y = year_month_day{ld + days{3}}.year(); + auto start = local_days((y-years{1})/December/Thursday[last]) + + (Monday-Thursday); + if (ld < start) + --y; + if (*fmt == CharT{'G'}) + os << y; + else + { + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(2); + os << std::abs(static_cast(y)) % 100; + } + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'H': + case 'I': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } + auto hms = fds.tod; +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto h = *fmt == CharT{'I'} ? date::make12(hms.hours()) : hms.hours(); + if (h < hours{10}) + os << CharT{'0'}; + os << h.count(); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_hour = static_cast(hms.hours().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'j': + if (command) + { + if (modified == CharT{}) + { + if (fds.ymd.ok() || fds.has_tod) + { + days doy; + if (fds.ymd.ok()) + { + auto ld = local_days(fds.ymd); + auto y = fds.ymd.year(); + doy = ld - local_days(y/January/1) + days{1}; + } + else + { + doy = duration_cast(fds.tod.to_duration()); + } + save_ostream _(os); + os.fill('0'); + os.flags(std::ios::dec | std::ios::right); + os.width(3); + os << doy.count(); + } + else + { + os.setstate(std::ios::failbit); + } + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'm': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.month().ok()) + os.setstate(std::ios::failbit); + auto m = static_cast(fds.ymd.month()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + if (m < 10) + os << CharT{'0'}; + os << m; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_mon = static_cast(m-1); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'M': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + if (fds.tod.minutes() < minutes{10}) + os << CharT{'0'}; + os << fds.tod.minutes().count(); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_min = static_cast(fds.tod.minutes().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'n': + if (command) + { + if (modified == CharT{}) + os << CharT{'\n'}; + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'p': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + tm.tm_hour = static_cast(fds.tod.hours().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else + if (date::is_am(fds.tod.hours())) + os << ampm_names().first[0]; + else + os << ampm_names().first[1]; +#endif + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'Q': + case 'q': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + auto d = fds.tod.to_duration(); + if (*fmt == 'q') + os << get_units(typename decltype(d)::period::type{}); + else + os << d.count(); + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'r': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + const CharT f[] = {'%', *fmt}; + tm.tm_hour = static_cast(fds.tod.hours().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_sec = static_cast(fds.tod.seconds().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); +#else + hh_mm_ss tod(duration_cast(fds.tod.to_duration())); + save_ostream _(os); + os.fill('0'); + os.width(2); + os << date::make12(tod.hours()).count() << CharT{':'}; + os.width(2); + os << tod.minutes().count() << CharT{':'}; + os.width(2); + os << tod.seconds().count() << CharT{' '}; + if (date::is_am(tod.hours())) + os << ampm_names().first[0]; + else + os << ampm_names().first[1]; +#endif + } + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'R': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (fds.tod.hours() < hours{10}) + os << CharT{'0'}; + os << fds.tod.hours().count() << CharT{':'}; + if (fds.tod.minutes() < minutes{10}) + os << CharT{'0'}; + os << fds.tod.minutes().count(); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'S': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + if (insert_negative) + { + os << '-'; + insert_negative = false; + } +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + os << fds.tod.s_; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_sec = static_cast(fds.tod.s_.seconds().count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 't': + if (command) + { + if (modified == CharT{}) + os << CharT{'\t'}; + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'T': + if (command) + { + if (modified == CharT{}) + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); + os << fds.tod; + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'u': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto wd = extract_weekday(os, fds); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + os << (wd != 0 ? wd : 7u); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_wday = static_cast(wd); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'U': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto const& ymd = fds.ymd; + if (!ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto st = local_days(Sunday[1]/January/ymd.year()); + if (ld < st) + os << CharT{'0'} << CharT{'0'}; + else + { + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } + } + #if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'V': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(fds.ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto y = year_month_day{ld + days{3}}.year(); + auto st = local_days((y-years{1})/12/Thursday[last]) + + (Monday-Thursday); + if (ld < st) + { + --y; + st = local_days((y - years{1})/12/Thursday[last]) + + (Monday-Thursday); + } + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + auto const& ymd = fds.ymd; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'w': + if (command) + { + auto wd = extract_weekday(os, fds); + if (os.fail()) + return os; +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + os << wd; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_wday = static_cast(wd); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + else + { + os << CharT{'%'} << modified << *fmt; + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'W': + if (command) + { + if (modified == CharT{'E'}) + os << CharT{'%'} << modified << *fmt; + else + { + auto const& ymd = fds.ymd; + if (!ymd.ok()) + os.setstate(std::ios::failbit); + auto ld = local_days(ymd); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + auto st = local_days(Monday[1]/January/ymd.year()); + if (ld < st) + os << CharT{'0'} << CharT{'0'}; + else + { + auto wn = duration_cast(ld - st).count() + 1; + if (wn < 10) + os << CharT{'0'}; + os << wn; + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(ymd.year()) - 1900; + tm.tm_wday = static_cast(extract_weekday(os, fds)); + if (os.fail()) + return os; + tm.tm_yday = static_cast((ld - local_days(ymd.year()/1/1)).count()); + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'X': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.has_tod) + os.setstate(std::ios::failbit); +#if !ONLY_C_LOCALE + tm = std::tm{}; + tm.tm_sec = static_cast(fds.tod.seconds().count()); + tm.tm_min = static_cast(fds.tod.minutes().count()); + tm.tm_hour = static_cast(fds.tod.hours().count()); + CharT f[3] = {'%'}; + auto fe = std::begin(f) + 1; + if (modified == CharT{'E'}) + *fe++ = modified; + *fe++ = *fmt; + facet.put(os, os, os.fill(), &tm, std::begin(f), fe); +#else + os << fds.tod; +#endif + } + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'y': + if (command) + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = static_cast(fds.ymd.year()); +#if !ONLY_C_LOCALE + if (modified == CharT{}) + { +#endif + y = std::abs(y) % 100; + if (y < 10) + os << CharT{'0'}; + os << y; +#if !ONLY_C_LOCALE + } + else + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = y - 1900; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'Y': + if (command) + { + if (modified == CharT{'O'}) + os << CharT{'%'} << modified << *fmt; + else + { + if (!fds.ymd.year().ok()) + os.setstate(std::ios::failbit); + auto y = fds.ymd.year(); +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + save_ostream _(os); + os.imbue(std::locale::classic()); + os << y; + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + const CharT f[] = {'%', modified, *fmt}; + tm.tm_year = static_cast(y) - 1900; + facet.put(os, os, os.fill(), &tm, std::begin(f), std::end(f)); + } +#endif + } + modified = CharT{}; + command = nullptr; + } + else + os << *fmt; + break; + case 'z': + if (command) + { + if (offset_sec == nullptr) + { + // Can not format %z with unknown offset + os.setstate(ios::failbit); + return os; + } + auto m = duration_cast(*offset_sec); + auto neg = m < minutes{0}; + m = date::abs(m); + auto h = duration_cast(m); + m -= h; + if (neg) + os << CharT{'-'}; + else + os << CharT{'+'}; + if (h < hours{10}) + os << CharT{'0'}; + os << h.count(); + if (modified != CharT{}) + os << CharT{':'}; + if (m < minutes{10}) + os << CharT{'0'}; + os << m.count(); + command = nullptr; + modified = CharT{}; + } + else + os << *fmt; + break; + case 'Z': + if (command) + { + if (modified == CharT{}) + { + if (abbrev == nullptr) + { + // Can not format %Z with unknown time_zone + os.setstate(ios::failbit); + return os; + } + for (auto c : *abbrev) + os << CharT(c); + } + else + { + os << CharT{'%'} << modified << *fmt; + modified = CharT{}; + } + command = nullptr; + } + else + os << *fmt; + break; + case 'E': + case 'O': + if (command) + { + if (modified == CharT{}) + { + modified = *fmt; + } + else + { + os << CharT{'%'} << modified << *fmt; + command = nullptr; + modified = CharT{}; + } + } + else + os << *fmt; + break; + case '%': + if (command) + { + if (modified == CharT{}) + { + os << CharT{'%'}; + command = nullptr; + } + else + { + os << CharT{'%'} << modified << CharT{'%'}; + command = nullptr; + modified = CharT{}; + } + } + else + command = fmt; + break; + default: + if (command) + { + os << CharT{'%'}; + command = nullptr; + } + if (modified != CharT{}) + { + os << modified; + modified = CharT{}; + } + os << *fmt; + break; + } + } + if (command) + os << CharT{'%'}; + if (modified != CharT{}) + os << modified; + return os; +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const year& y) +{ + using CT = std::chrono::seconds; + fields fds{y/0/0}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const month& m) +{ + using CT = std::chrono::seconds; + fields fds{m/0/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const day& d) +{ + using CT = std::chrono::seconds; + fields fds{d/0/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const weekday& wd) +{ + using CT = std::chrono::seconds; + fields fds{wd}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const year_month& ym) +{ + using CT = std::chrono::seconds; + fields fds{ym/0}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, const month_day& md) +{ + using CT = std::chrono::seconds; + fields fds{md/nanyear}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const year_month_day& ymd) +{ + using CT = std::chrono::seconds; + fields fds{ymd}; + return to_stream(os, fmt, fds); +} + +template +inline +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const std::chrono::duration& d) +{ + using Duration = std::chrono::duration; + using CT = typename std::common_type::type; + fields fds{hh_mm_ss{d}}; + return to_stream(os, fmt, fds); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const local_time& tp, const std::string* abbrev = nullptr, + const std::chrono::seconds* offset_sec = nullptr) +{ + using CT = typename std::common_type::type; + auto ld = std::chrono::time_point_cast(tp); + fields fds; + if (ld <= tp) + fds = fields{year_month_day{ld}, hh_mm_ss{tp-local_seconds{ld}}}; + else + fds = fields{year_month_day{ld - days{1}}, + hh_mm_ss{days{1} - (local_seconds{ld} - tp)}}; + return to_stream(os, fmt, fds, abbrev, offset_sec); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const sys_time& tp) +{ + using std::chrono::seconds; + using CT = typename std::common_type::type; + const std::string abbrev("UTC"); + CONSTDATA seconds offset{0}; + auto sd = std::chrono::time_point_cast(tp); + fields fds; + if (sd <= tp) + fds = fields{year_month_day{sd}, hh_mm_ss{tp-sys_seconds{sd}}}; + else + fds = fields{year_month_day{sd - days{1}}, + hh_mm_ss{days{1} - (sys_seconds{sd} - tp)}}; + return to_stream(os, fmt, fds, &abbrev, &offset); +} + +// format + +template +auto +format(const std::locale& loc, const CharT* fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt, tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + os.imbue(loc); + to_stream(os, fmt, tp); + return os.str(); +} + +template +auto +format(const CharT* fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt, tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + to_stream(os, fmt, tp); + return os.str(); +} + +template +auto +format(const std::locale& loc, const std::basic_string& fmt, + const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt.c_str(), tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + os.imbue(loc); + to_stream(os, fmt.c_str(), tp); + return os.str(); +} + +template +auto +format(const std::basic_string& fmt, const Streamable& tp) + -> decltype(to_stream(std::declval&>(), fmt.c_str(), tp), + std::basic_string{}) +{ + std::basic_ostringstream os; + os.exceptions(std::ios::failbit | std::ios::badbit); + to_stream(os, fmt.c_str(), tp); + return os.str(); +} + +// parse + +namespace detail +{ + +template +bool +read_char(std::basic_istream& is, CharT fmt, std::ios::iostate& err) +{ + auto ic = is.get(); + if (Traits::eq_int_type(ic, Traits::eof()) || + !Traits::eq(Traits::to_char_type(ic), fmt)) + { + err |= std::ios::failbit; + is.setstate(std::ios::failbit); + return false; + } + return true; +} + +template +unsigned +read_unsigned(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + unsigned x = 0; + unsigned count = 0; + while (true) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + break; + auto c = static_cast(Traits::to_char_type(ic)); + if (!('0' <= c && c <= '9')) + break; + (void)is.get(); + ++count; + x = 10*x + static_cast(c - '0'); + if (count == M) + break; + } + if (count < m) + is.setstate(std::ios::failbit); + return x; +} + +template +int +read_signed(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + auto ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (('0' <= c && c <= '9') || c == '-' || c == '+') + { + if (c == '-' || c == '+') + (void)is.get(); + auto x = static_cast(read_unsigned(is, std::max(m, 1u), M)); + if (!is.fail()) + { + if (c == '-') + x = -x; + return x; + } + } + } + if (m > 0) + is.setstate(std::ios::failbit); + return 0; +} + +template +long double +read_long_double(std::basic_istream& is, unsigned m = 1, unsigned M = 10) +{ + unsigned count = 0; + unsigned fcount = 0; + unsigned long long i = 0; + unsigned long long f = 0; + bool parsing_fraction = false; +#if ONLY_C_LOCALE + typename Traits::int_type decimal_point = '.'; +#else + auto decimal_point = Traits::to_int_type( + std::use_facet>(is.getloc()).decimal_point()); +#endif + while (true) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + break; + if (Traits::eq_int_type(ic, decimal_point)) + { + decimal_point = Traits::eof(); + parsing_fraction = true; + } + else + { + auto c = static_cast(Traits::to_char_type(ic)); + if (!('0' <= c && c <= '9')) + break; + if (!parsing_fraction) + { + i = 10*i + static_cast(c - '0'); + } + else + { + f = 10*f + static_cast(c - '0'); + ++fcount; + } + } + (void)is.get(); + if (++count == M) + break; + } + if (count < m) + { + is.setstate(std::ios::failbit); + return 0; + } + return static_cast(i) + static_cast(f)/std::pow(10.L, fcount); +} + +struct rs +{ + int& i; + unsigned m; + unsigned M; +}; + +struct ru +{ + int& i; + unsigned m; + unsigned M; +}; + +struct rld +{ + long double& i; + unsigned m; + unsigned M; +}; + +template +void +read(std::basic_istream&) +{ +} + +template +void +read(std::basic_istream& is, CharT a0, Args&& ...args); + +template +void +read(std::basic_istream& is, rs a0, Args&& ...args); + +template +void +read(std::basic_istream& is, ru a0, Args&& ...args); + +template +void +read(std::basic_istream& is, int a0, Args&& ...args); + +template +void +read(std::basic_istream& is, rld a0, Args&& ...args); + +template +void +read(std::basic_istream& is, CharT a0, Args&& ...args) +{ + // No-op if a0 == CharT{} + if (a0 != CharT{}) + { + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + { + is.setstate(std::ios::failbit | std::ios::eofbit); + return; + } + if (!Traits::eq(Traits::to_char_type(ic), a0)) + { + is.setstate(std::ios::failbit); + return; + } + (void)is.get(); + } + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, rs a0, Args&& ...args) +{ + auto x = read_signed(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = x; + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, ru a0, Args&& ...args) +{ + auto x = read_unsigned(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = static_cast(x); + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, int a0, Args&& ...args) +{ + if (a0 != -1) + { + auto u = static_cast(a0); + CharT buf[std::numeric_limits::digits10+2u] = {}; + auto e = buf; + do + { + *e++ = static_cast(CharT(u % 10) + CharT{'0'}); + u /= 10; + } while (u > 0); + std::reverse(buf, e); + for (auto p = buf; p != e && is.rdstate() == std::ios::goodbit; ++p) + read(is, *p); + } + if (is.rdstate() == std::ios::goodbit) + read(is, std::forward(args)...); +} + +template +void +read(std::basic_istream& is, rld a0, Args&& ...args) +{ + auto x = read_long_double(is, a0.m, a0.M); + if (is.fail()) + return; + a0.i = x; + read(is, std::forward(args)...); +} + +template +inline +void +checked_set(T& value, T from, T not_a_value, std::basic_ios& is) +{ + if (!is.fail()) + { + if (value == not_a_value) + value = std::move(from); + else if (value != from) + is.setstate(std::ios::failbit); + } +} + +} // namespace detail; + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + fields& fds, std::basic_string* abbrev, + std::chrono::minutes* offset) +{ + using std::numeric_limits; + using std::ios; + using std::chrono::duration; + using std::chrono::duration_cast; + using std::chrono::seconds; + using std::chrono::minutes; + using std::chrono::hours; + using detail::round_i; + typename std::basic_istream::sentry ok{is, true}; + if (ok) + { + date::detail::save_istream ss(is); + is.fill(' '); + is.flags(std::ios::skipws | std::ios::dec); + is.width(0); +#if !ONLY_C_LOCALE + auto& f = std::use_facet>(is.getloc()); + std::tm tm{}; +#endif + const CharT* command = nullptr; + auto modified = CharT{}; + auto width = -1; + + CONSTDATA int not_a_year = numeric_limits::min(); + CONSTDATA int not_a_2digit_year = 100; + CONSTDATA int not_a_century = not_a_year / 100; + CONSTDATA int not_a_month = 0; + CONSTDATA int not_a_day = 0; + CONSTDATA int not_a_hour = numeric_limits::min(); + CONSTDATA int not_a_hour_12_value = 0; + CONSTDATA int not_a_minute = not_a_hour; + CONSTDATA Duration not_a_second = Duration::min(); + CONSTDATA int not_a_doy = -1; + CONSTDATA int not_a_weekday = 8; + CONSTDATA int not_a_week_num = 100; + CONSTDATA int not_a_ampm = -1; + CONSTDATA minutes not_a_offset = minutes::min(); + + int Y = not_a_year; // c, F, Y * + int y = not_a_2digit_year; // D, x, y * + int g = not_a_2digit_year; // g * + int G = not_a_year; // G * + int C = not_a_century; // C * + int m = not_a_month; // b, B, h, m, c, D, F, x * + int d = not_a_day; // c, d, D, e, F, x * + int j = not_a_doy; // j * + int wd = not_a_weekday; // a, A, u, w * + int H = not_a_hour; // c, H, R, T, X * + int I = not_a_hour_12_value; // I, r * + int p = not_a_ampm; // p, r * + int M = not_a_minute; // c, M, r, R, T, X * + Duration s = not_a_second; // c, r, S, T, X * + int U = not_a_week_num; // U * + int V = not_a_week_num; // V * + int W = not_a_week_num; // W * + std::basic_string temp_abbrev; // Z * + minutes temp_offset = not_a_offset; // z * + + using detail::read; + using detail::rs; + using detail::ru; + using detail::rld; + using detail::checked_set; + for (; *fmt != CharT{} && !is.fail(); ++fmt) + { + switch (*fmt) + { + case 'a': + case 'A': + case 'u': + case 'w': // wd: a, A, u, w + if (command) + { + int trial_wd = not_a_weekday; + if (*fmt == 'a' || *fmt == 'A') + { + if (modified == CharT{}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (!is.fail()) + trial_wd = tm.tm_wday; +#else + auto nm = detail::weekday_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + if (!is.fail()) + trial_wd = i % 7; +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + } + else // *fmt == 'u' || *fmt == 'w' + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + read(is, ru{trial_wd, 1, width == -1 ? + 1u : static_cast(width)}); + if (!is.fail()) + { + if (*fmt == 'u') + { + if (!(1 <= trial_wd && trial_wd <= 7)) + { + trial_wd = not_a_weekday; + is.setstate(ios::failbit); + } + else if (trial_wd == 7) + trial_wd = 0; + } + else // *fmt == 'w' + { + if (!(0 <= trial_wd && trial_wd <= 6)) + { + trial_wd = not_a_weekday; + is.setstate(ios::failbit); + } + } + } + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (!is.fail()) + trial_wd = tm.tm_wday; + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + } + if (trial_wd != not_a_weekday) + checked_set(wd, trial_wd, not_a_weekday, is); + } + else // !command + read(is, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + break; + case 'b': + case 'B': + case 'h': + if (command) + { + if (modified == CharT{}) + { + int ttm = not_a_month; +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + ttm = tm.tm_mon + 1; + is.setstate(err); +#else + auto nm = detail::month_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + if (!is.fail()) + ttm = i % 12 + 1; +#endif + checked_set(m, ttm, not_a_month, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'c': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + checked_set(m, tm.tm_mon + 1, not_a_month, is); + checked_set(d, tm.tm_mday, not_a_day, is); + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_minute, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%a %b %e %T %Y" + auto nm = detail::weekday_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(wd, static_cast(i % 7), not_a_weekday, is); + ws(is); + nm = detail::month_names(); + i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(m, static_cast(i % 12 + 1), not_a_month, is); + ws(is); + int td = not_a_day; + read(is, rs{td, 1, 2}); + checked_set(d, td, not_a_day, is); + ws(is); + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH; + int tM; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + ws(is); + int tY = not_a_year; + read(is, rs{tY, 1, 4u}); + checked_set(Y, tY, not_a_year, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'x': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + checked_set(m, tm.tm_mon + 1, not_a_month, is); + checked_set(d, tm.tm_mday, not_a_day, is); + } + is.setstate(err); +#else + // "%m/%d/%y" + int ty = not_a_2digit_year; + int tm = not_a_month; + int td = not_a_day; + read(is, ru{tm, 1, 2}, CharT{'/'}, ru{td, 1, 2}, CharT{'/'}, + rs{ty, 1, 2}); + checked_set(y, ty, not_a_2digit_year, is); + checked_set(m, tm, not_a_month, is); + checked_set(d, td, not_a_day, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'X': + if (command) + { + if (modified != CharT{'O'}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_minute, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%T" + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH = not_a_hour; + int tM = not_a_minute; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'C': + if (command) + { + int tC = not_a_century; +#if !ONLY_C_LOCALE + if (modified == CharT{}) + { +#endif + read(is, rs{tC, 1, width == -1 ? 2u : static_cast(width)}); +#if !ONLY_C_LOCALE + } + else + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + auto tY = tm.tm_year + 1900; + tC = (tY >= 0 ? tY : tY-99) / 100; + } + is.setstate(err); + } +#endif + checked_set(C, tC, not_a_century, is); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'D': + if (command) + { + if (modified == CharT{}) + { + int tn = not_a_month; + int td = not_a_day; + int ty = not_a_2digit_year; + read(is, ru{tn, 1, 2}, CharT{'\0'}, CharT{'/'}, CharT{'\0'}, + ru{td, 1, 2}, CharT{'\0'}, CharT{'/'}, CharT{'\0'}, + rs{ty, 1, 2}); + checked_set(y, ty, not_a_2digit_year, is); + checked_set(m, tn, not_a_month, is); + checked_set(d, td, not_a_day, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'F': + if (command) + { + if (modified == CharT{}) + { + int tY = not_a_year; + int tn = not_a_month; + int td = not_a_day; + read(is, rs{tY, 1, width == -1 ? 4u : static_cast(width)}, + CharT{'-'}, ru{tn, 1, 2}, CharT{'-'}, ru{td, 1, 2}); + checked_set(Y, tY, not_a_year, is); + checked_set(m, tn, not_a_month, is); + checked_set(d, td, not_a_day, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'd': + case 'e': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int td = not_a_day; + read(is, rs{td, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(d, td, not_a_day, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + command = nullptr; + width = -1; + modified = CharT{}; + if ((err & ios::failbit) == 0) + checked_set(d, tm.tm_mday, not_a_day, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'H': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tH = not_a_hour; + read(is, ru{tH, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(H, tH, not_a_hour, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(H, tm.tm_hour, not_a_hour, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'I': + if (command) + { + if (modified == CharT{}) + { + int tI = not_a_hour_12_value; + // reads in an hour into I, but most be in [1, 12] + read(is, rs{tI, 1, width == -1 ? 2u : static_cast(width)}); + if (!(1 <= tI && tI <= 12)) + is.setstate(ios::failbit); + checked_set(I, tI, not_a_hour_12_value, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'j': + if (command) + { + if (modified == CharT{}) + { + int tj = not_a_doy; + read(is, ru{tj, 1, width == -1 ? 3u : static_cast(width)}); + checked_set(j, tj, not_a_doy, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'M': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tM = not_a_minute; + read(is, ru{tM, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(M, tM, not_a_minute, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(M, tm.tm_min, not_a_minute, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'm': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + int tn = not_a_month; + read(is, rs{tn, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(m, tn, not_a_month, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(m, tm.tm_mon + 1, not_a_month, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'n': + case 't': + if (command) + { + if (modified == CharT{}) + { + // %n matches a single white space character + // %t matches 0 or 1 white space characters + auto ic = is.peek(); + if (Traits::eq_int_type(ic, Traits::eof())) + { + ios::iostate err = ios::eofbit; + if (*fmt == 'n') + err |= ios::failbit; + is.setstate(err); + break; + } + if (isspace(ic)) + { + (void)is.get(); + } + else if (*fmt == 'n') + is.setstate(ios::failbit); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'p': + if (command) + { + if (modified == CharT{}) + { + int tp = not_a_ampm; +#if !ONLY_C_LOCALE + tm = std::tm{}; + tm.tm_hour = 1; + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + is.setstate(err); + if (tm.tm_hour == 1) + tp = 0; + else if (tm.tm_hour == 13) + tp = 1; + else + is.setstate(err); +#else + auto nm = detail::ampm_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + tp = static_cast(i); +#endif + checked_set(p, tp, not_a_ampm, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + + break; + case 'r': + if (command) + { + if (modified == CharT{}) + { +#if !ONLY_C_LOCALE + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + { + checked_set(H, tm.tm_hour, not_a_hour, is); + checked_set(M, tm.tm_min, not_a_hour, is); + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + } + is.setstate(err); +#else + // "%I:%M:%S %p" + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + long double S{}; + int tI = not_a_hour_12_value; + int tM = not_a_minute; + read(is, ru{tI, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(I, tI, not_a_hour_12_value, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + ws(is); + auto nm = detail::ampm_names(); + auto i = detail::scan_keyword(is, nm.first, nm.second) - nm.first; + checked_set(p, static_cast(i), not_a_ampm, is); +#endif + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'R': + if (command) + { + if (modified == CharT{}) + { + int tH = not_a_hour; + int tM = not_a_minute; + read(is, ru{tH, 1, 2}, CharT{'\0'}, CharT{':'}, CharT{'\0'}, + ru{tM, 1, 2}, CharT{'\0'}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'S': + if (command) + { + #if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'E'}) +#endif + { + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + long double S{}; + read(is, rld{S, 1, width == -1 ? w : static_cast(width)}); + checked_set(s, round_i(duration{S}), + not_a_second, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'O'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(s, duration_cast(seconds{tm.tm_sec}), + not_a_second, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'T': + if (command) + { + if (modified == CharT{}) + { + using dfs = detail::decimal_format_seconds; + CONSTDATA auto w = Duration::period::den == 1 ? 2 : 3 + dfs::width; + int tH = not_a_hour; + int tM = not_a_minute; + long double S{}; + read(is, ru{tH, 1, 2}, CharT{':'}, ru{tM, 1, 2}, + CharT{':'}, rld{S, 1, w}); + checked_set(H, tH, not_a_hour, is); + checked_set(M, tM, not_a_minute, is); + checked_set(s, round_i(duration{S}), + not_a_second, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'Y': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#else + if (modified != CharT{'O'}) +#endif + { + int tY = not_a_year; + read(is, rs{tY, 1, width == -1 ? 4u : static_cast(width)}); + checked_set(Y, tY, not_a_year, is); + } +#if !ONLY_C_LOCALE + else if (modified == CharT{'E'}) + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + is.setstate(err); + } +#endif + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'y': + if (command) + { +#if !ONLY_C_LOCALE + if (modified == CharT{}) +#endif + { + int ty = not_a_2digit_year; + read(is, ru{ty, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(y, ty, not_a_2digit_year, is); + } +#if !ONLY_C_LOCALE + else + { + ios::iostate err = ios::goodbit; + f.get(is, nullptr, is, err, &tm, command, fmt+1); + if ((err & ios::failbit) == 0) + checked_set(Y, tm.tm_year + 1900, not_a_year, is); + is.setstate(err); + } +#endif + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'g': + if (command) + { + if (modified == CharT{}) + { + int tg = not_a_2digit_year; + read(is, ru{tg, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(g, tg, not_a_2digit_year, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'G': + if (command) + { + if (modified == CharT{}) + { + int tG = not_a_year; + read(is, rs{tG, 1, width == -1 ? 4u : static_cast(width)}); + checked_set(G, tG, not_a_year, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'U': + if (command) + { + if (modified == CharT{}) + { + int tU = not_a_week_num; + read(is, ru{tU, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(U, tU, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'V': + if (command) + { + if (modified == CharT{}) + { + int tV = not_a_week_num; + read(is, ru{tV, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(V, tV, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'W': + if (command) + { + if (modified == CharT{}) + { + int tW = not_a_week_num; + read(is, ru{tW, 1, width == -1 ? 2u : static_cast(width)}); + checked_set(W, tW, not_a_week_num, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'E': + case 'O': + if (command) + { + if (modified == CharT{}) + { + modified = *fmt; + } + else + { + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + } + else + read(is, *fmt); + break; + case '%': + if (command) + { + if (modified == CharT{}) + read(is, *fmt); + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + command = fmt; + break; + case 'z': + if (command) + { + int tH, tM; + minutes toff = not_a_offset; + bool neg = false; + auto ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (c == '-') + neg = true; + } + if (modified == CharT{}) + { + read(is, rs{tH, 2, 2}); + if (!is.fail()) + toff = hours{std::abs(tH)}; + if (is.good()) + { + ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if ('0' <= c && c <= '9') + { + read(is, ru{tM, 2, 2}); + if (!is.fail()) + toff += minutes{tM}; + } + } + } + } + else + { + read(is, rs{tH, 1, 2}); + if (!is.fail()) + toff = hours{std::abs(tH)}; + if (is.good()) + { + ic = is.peek(); + if (!Traits::eq_int_type(ic, Traits::eof())) + { + auto c = static_cast(Traits::to_char_type(ic)); + if (c == ':') + { + (void)is.get(); + read(is, ru{tM, 2, 2}); + if (!is.fail()) + toff += minutes{tM}; + } + } + } + } + if (neg) + toff = -toff; + checked_set(temp_offset, toff, not_a_offset, is); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + case 'Z': + if (command) + { + if (modified == CharT{}) + { + std::basic_string buf; + while (is.rdstate() == std::ios::goodbit) + { + auto i = is.rdbuf()->sgetc(); + if (Traits::eq_int_type(i, Traits::eof())) + { + is.setstate(ios::eofbit); + break; + } + auto wc = Traits::to_char_type(i); + auto c = static_cast(wc); + // is c a valid time zone name or abbreviation character? + if (!(CharT{1} < wc && wc < CharT{127}) || !(isalnum(c) || + c == '_' || c == '/' || c == '-' || c == '+')) + break; + buf.push_back(c); + is.rdbuf()->sbumpc(); + } + if (buf.empty()) + is.setstate(ios::failbit); + checked_set(temp_abbrev, buf, {}, is); + } + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + else + read(is, *fmt); + break; + default: + if (command) + { + if (width == -1 && modified == CharT{} && '0' <= *fmt && *fmt <= '9') + { + width = static_cast(*fmt) - '0'; + while ('0' <= fmt[1] && fmt[1] <= '9') + width = 10*width + static_cast(*++fmt) - '0'; + } + else + { + if (modified == CharT{}) + read(is, CharT{'%'}, width, *fmt); + else + read(is, CharT{'%'}, width, modified, *fmt); + command = nullptr; + width = -1; + modified = CharT{}; + } + } + else // !command + { + if (isspace(static_cast(*fmt))) + { + // space matches 0 or more white space characters + if (is.good()) + ws(is); + } + else + read(is, *fmt); + } + break; + } + } + // is.fail() || *fmt == CharT{} + if (is.rdstate() == ios::goodbit && command) + { + if (modified == CharT{}) + read(is, CharT{'%'}, width); + else + read(is, CharT{'%'}, width, modified); + } + if (!is.fail()) + { + if (y != not_a_2digit_year) + { + // Convert y and an optional C to Y + if (!(0 <= y && y <= 99)) + goto broken; + if (C == not_a_century) + { + if (Y == not_a_year) + { + if (y >= 69) + C = 19; + else + C = 20; + } + else + { + C = (Y >= 0 ? Y : Y-100) / 100; + } + } + int tY; + if (C >= 0) + tY = 100*C + y; + else + tY = 100*(C+1) - (y == 0 ? 100 : y); + if (Y != not_a_year && Y != tY) + goto broken; + Y = tY; + } + if (g != not_a_2digit_year) + { + // Convert g and an optional C to G + if (!(0 <= g && g <= 99)) + goto broken; + if (C == not_a_century) + { + if (G == not_a_year) + { + if (g >= 69) + C = 19; + else + C = 20; + } + else + { + C = (G >= 0 ? G : G-100) / 100; + } + } + int tG; + if (C >= 0) + tG = 100*C + g; + else + tG = 100*(C+1) - (g == 0 ? 100 : g); + if (G != not_a_year && G != tG) + goto broken; + G = tG; + } + if (Y < static_cast(year::min()) || Y > static_cast(year::max())) + Y = not_a_year; + bool computed = false; + if (G != not_a_year && V != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{G-1}/December/Thursday[last]) + + (Monday-Thursday) + weeks{V-1} + + (weekday{static_cast(wd)}-Monday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (Y != not_a_year && U != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{Y}/January/Sunday[1]) + + weeks{U-1} + + (weekday{static_cast(wd)} - Sunday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (Y != not_a_year && W != not_a_week_num && wd != not_a_weekday) + { + year_month_day ymd_trial = sys_days(year{Y}/January/Monday[1]) + + weeks{W-1} + + (weekday{static_cast(wd)} - Monday); + if (Y == not_a_year) + Y = static_cast(ymd_trial.year()); + else if (year{Y} != ymd_trial.year()) + goto broken; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + computed = true; + } + if (j != not_a_doy && Y != not_a_year) + { + auto ymd_trial = year_month_day{local_days(year{Y}/1/1) + days{j-1}}; + if (m == not_a_month) + m = static_cast(static_cast(ymd_trial.month())); + else if (month(static_cast(m)) != ymd_trial.month()) + goto broken; + if (d == not_a_day) + d = static_cast(static_cast(ymd_trial.day())); + else if (day(static_cast(d)) != ymd_trial.day()) + goto broken; + j = not_a_doy; + } + auto ymd = year{Y}/m/d; + if (ymd.ok()) + { + if (wd == not_a_weekday) + wd = static_cast((weekday(sys_days(ymd)) - Sunday).count()); + else if (wd != static_cast((weekday(sys_days(ymd)) - Sunday).count())) + goto broken; + if (!computed) + { + if (G != not_a_year || V != not_a_week_num) + { + sys_days sd = ymd; + auto G_trial = year_month_day{sd + days{3}}.year(); + auto start = sys_days((G_trial - years{1})/December/Thursday[last]) + + (Monday - Thursday); + if (sd < start) + { + --G_trial; + if (V != not_a_week_num) + start = sys_days((G_trial - years{1})/December/Thursday[last]) + + (Monday - Thursday); + } + if (G != not_a_year && G != static_cast(G_trial)) + goto broken; + if (V != not_a_week_num) + { + auto V_trial = duration_cast(sd - start).count() + 1; + if (V != V_trial) + goto broken; + } + } + if (U != not_a_week_num) + { + auto start = sys_days(Sunday[1]/January/ymd.year()); + auto U_trial = floor(sys_days(ymd) - start).count() + 1; + if (U != U_trial) + goto broken; + } + if (W != not_a_week_num) + { + auto start = sys_days(Monday[1]/January/ymd.year()); + auto W_trial = floor(sys_days(ymd) - start).count() + 1; + if (W != W_trial) + goto broken; + } + } + } + fds.ymd = ymd; + if (I != not_a_hour_12_value) + { + if (!(1 <= I && I <= 12)) + goto broken; + if (p != not_a_ampm) + { + // p is in [0, 1] == [AM, PM] + // Store trial H in I + if (I == 12) + --p; + I += p*12; + // Either set H from I or make sure H and I are consistent + if (H == not_a_hour) + H = I; + else if (I != H) + goto broken; + } + else // p == not_a_ampm + { + // if H, make sure H and I could be consistent + if (H != not_a_hour) + { + if (I == 12) + { + if (H != 0 && H != 12) + goto broken; + } + else if (!(I == H || I == H+12)) + { + goto broken; + } + } + else // I is ambiguous, AM or PM? + goto broken; + } + } + if (H != not_a_hour) + { + fds.has_tod = true; + fds.tod = hh_mm_ss{hours{H}}; + } + if (M != not_a_minute) + { + fds.has_tod = true; + fds.tod.m_ = minutes{M}; + } + if (s != not_a_second) + { + fds.has_tod = true; + fds.tod.s_ = detail::decimal_format_seconds{s}; + } + if (j != not_a_doy) + { + fds.has_tod = true; + fds.tod.h_ += hours{days{j}}; + } + if (wd != not_a_weekday) + fds.wd = weekday{static_cast(wd)}; + if (abbrev != nullptr) + *abbrev = std::move(temp_abbrev); + if (offset != nullptr && temp_offset != not_a_offset) + *offset = temp_offset; + } + return is; + } +broken: + is.setstate(ios::failbit); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, year& y, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.year().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + y = fds.ymd.year(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, month& m, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + m = fds.ymd.month(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, day& d, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.day().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + d = fds.ymd.day(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, weekday& wd, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.wd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + wd = fds.wd; + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, year_month& ym, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + ym = fds.ymd.year()/fds.ymd.month(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, month_day& md, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.month().ok() || !fds.ymd.day().ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + md = fds.ymd.month()/fds.ymd.day(); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + year_month_day& ymd, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = std::chrono::seconds; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + ymd = fds.ymd; + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + sys_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = typename std::common_type::type; + using detail::round_i; + std::chrono::minutes offset_local{}; + auto offptr = offset ? offset : &offset_local; + fields fds{}; + fds.has_tod = true; + date::from_stream(is, fmt, fds, abbrev, offptr); + if (!fds.ymd.ok() || !fds.tod.in_conventional_range()) + is.setstate(std::ios::failbit); + if (!is.fail()) + tp = round_i(sys_days(fds.ymd) - *offptr + fds.tod.to_duration()); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + local_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using CT = typename std::common_type::type; + using detail::round_i; + fields fds{}; + fds.has_tod = true; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.ymd.ok() || !fds.tod.in_conventional_range()) + is.setstate(std::ios::failbit); + if (!is.fail()) + tp = round_i(local_seconds{local_days(fds.ymd)} + fds.tod.to_duration()); + return is; +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + std::chrono::duration& d, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using Duration = std::chrono::duration; + using CT = typename std::common_type::type; + using detail::round_i; + fields fds{}; + date::from_stream(is, fmt, fds, abbrev, offset); + if (!fds.has_tod) + is.setstate(std::ios::failbit); + if (!is.fail()) + d = round_i(fds.tod.to_duration()); + return is; +} + +template , + class Alloc = std::allocator> +struct parse_manip +{ + const std::basic_string format_; + Parsable& tp_; + std::basic_string* abbrev_; + std::chrono::minutes* offset_; + +public: + parse_manip(std::basic_string format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(std::move(format)) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} + +#if HAS_STRING_VIEW + parse_manip(const CharT* format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(format) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} + + parse_manip(std::basic_string_view format, Parsable& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) + : format_(format) + , tp_(tp) + , abbrev_(abbrev) + , offset_(offset) + {} +#endif // HAS_STRING_VIEW +}; + +template +std::basic_istream& +operator>>(std::basic_istream& is, + const parse_manip& x) +{ + return date::from_stream(is, x.format_.c_str(), x.tp_, x.abbrev_, x.offset_); +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp), + parse_manip{format, tp}) +{ + return {format, tp}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::basic_string& abbrev) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, &abbrev), + parse_manip{format, tp, &abbrev}) +{ + return {format, tp, &abbrev}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, + std::declval*>(), + &offset), + parse_manip{format, tp, nullptr, &offset}) +{ + return {format, tp, nullptr, &offset}; +} + +template +inline +auto +parse(const std::basic_string& format, Parsable& tp, + std::basic_string& abbrev, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), + format.c_str(), tp, &abbrev, &offset), + parse_manip{format, tp, &abbrev, &offset}) +{ + return {format, tp, &abbrev, &offset}; +} + +// const CharT* formats + +template +inline +auto +parse(const CharT* format, Parsable& tp) + -> decltype(date::from_stream(std::declval&>(), format, tp), + parse_manip{format, tp}) +{ + return {format, tp}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, std::basic_string& abbrev) + -> decltype(date::from_stream(std::declval&>(), format, + tp, &abbrev), + parse_manip{format, tp, &abbrev}) +{ + return {format, tp, &abbrev}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), format, + tp, std::declval*>(), &offset), + parse_manip{format, tp, nullptr, &offset}) +{ + return {format, tp, nullptr, &offset}; +} + +template +inline +auto +parse(const CharT* format, Parsable& tp, + std::basic_string& abbrev, std::chrono::minutes& offset) + -> decltype(date::from_stream(std::declval&>(), format, + tp, &abbrev, &offset), + parse_manip{format, tp, &abbrev, &offset}) +{ + return {format, tp, &abbrev, &offset}; +} + +// duration streaming + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, + const std::chrono::duration& d) +{ + return os << detail::make_string::from(d.count()) + + detail::get_units(typename Period::type{}); +} + +} // namespace date +} // namespace arrow_vendored + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#ifdef __GNUC__ +# pragma GCC diagnostic pop +#endif + +#endif // DATE_H diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h new file mode 100644 index 0000000000000000000000000000000000000000..acad28d13b558b9ce97ad2100e90d0835aaadc0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/ios.h @@ -0,0 +1,53 @@ +// +// ios.h +// DateTimeLib +// +// The MIT License (MIT) +// +// Copyright (c) 2016 Alexander Kormanovsky +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ios_hpp +#define ios_hpp + +#if __APPLE__ +# include +# if TARGET_OS_IPHONE +# include + + namespace arrow_vendored + { + namespace date + { + namespace iOSUtils + { + + std::string get_tzdata_path(); + std::string get_current_timezone(); + + } // namespace iOSUtils + } // namespace date + } // namespace arrow_vendored + +# endif // TARGET_OS_IPHONE +#else // !__APPLE__ +# define TARGET_OS_IPHONE 0 +#endif // !__APPLE__ +#endif // ios_hpp diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h new file mode 100644 index 0000000000000000000000000000000000000000..467db6d19979375f310374d5bc20eeb1d09a3034 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz.h @@ -0,0 +1,2801 @@ +#ifndef TZ_H +#define TZ_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016, 2017 Howard Hinnant +// Copyright (c) 2017 Jiangang Zhuang +// Copyright (c) 2017 Aaron Bishop +// Copyright (c) 2017 Tomasz Kamiński +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +// Get more recent database at http://www.iana.org/time-zones + +// The notion of "current timezone" is something the operating system is expected to "just +// know". How it knows this is system specific. It's often a value set by the user at OS +// installation time and recorded by the OS somewhere. On Linux and Mac systems the current +// timezone name is obtained by looking at the name or contents of a particular file on +// disk. On Windows the current timezone name comes from the registry. In either method, +// there is no guarantee that the "native" current timezone name obtained will match any +// of the "Standard" names in this library's "database". On Linux, the names usually do +// seem to match so mapping functions to map from native to "Standard" are typically not +// required. On Windows, the names are never "Standard" so mapping is always required. +// Technically any OS may use the mapping process but currently only Windows does use it. + +// NOTE(ARROW): If this is not set, then the library will attempt to +// use libcurl to obtain a timezone database, and we probably do not want this. +#ifndef _WIN32 +#define USE_OS_TZDB 1 +#endif + +#ifndef USE_OS_TZDB +# define USE_OS_TZDB 0 +#endif + +#ifndef HAS_REMOTE_API +# if USE_OS_TZDB == 0 +# ifdef _WIN32 +# define HAS_REMOTE_API 0 +# else +# define HAS_REMOTE_API 1 +# endif +# else // HAS_REMOTE_API makes no sense when using the OS timezone database +# define HAS_REMOTE_API 0 +# endif +#endif + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wconstant-logical-operand" +#endif + +static_assert(!(USE_OS_TZDB && HAS_REMOTE_API), + "USE_OS_TZDB and HAS_REMOTE_API can not be used together"); + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + +#ifndef AUTO_DOWNLOAD +# define AUTO_DOWNLOAD HAS_REMOTE_API +#endif + +static_assert(HAS_REMOTE_API == 0 ? AUTO_DOWNLOAD == 0 : true, + "AUTO_DOWNLOAD can not be turned on without HAS_REMOTE_API"); + +#ifndef USE_SHELL_API +# define USE_SHELL_API 1 +#endif + +#if USE_OS_TZDB +# ifdef _WIN32 +# error "USE_OS_TZDB can not be used on Windows" +# endif +#endif + +#ifndef HAS_DEDUCTION_GUIDES +# if __cplusplus >= 201703 +# define HAS_DEDUCTION_GUIDES 1 +# else +# define HAS_DEDUCTION_GUIDES 0 +# endif +#endif // HAS_DEDUCTION_GUIDES + +#include "date.h" + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +#include "tz_private.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +# ifdef DATE_BUILD_DLL +# define DATE_API __declspec(dllexport) +# elif defined(DATE_USE_DLL) +# define DATE_API __declspec(dllimport) +# else +# define DATE_API +# endif +#else +# ifdef DATE_BUILD_DLL +# define DATE_API __attribute__ ((visibility ("default"))) +# else +# define DATE_API +# endif +#endif + +namespace arrow_vendored +{ +namespace date +{ + +enum class choose {earliest, latest}; + +namespace detail +{ + struct undocumented; + + template + struct nodeduct + { + using type = T; + }; + + template + using nodeduct_t = typename nodeduct::type; +} + +struct sys_info +{ + sys_seconds begin; + sys_seconds end; + std::chrono::seconds offset; + std::chrono::minutes save; + std::string abbrev; +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const sys_info& r) +{ + os << r.begin << '\n'; + os << r.end << '\n'; + os << make_time(r.offset) << "\n"; + os << make_time(r.save) << "\n"; + os << r.abbrev << '\n'; + return os; +} + +struct local_info +{ + enum {unique, nonexistent, ambiguous} result; + sys_info first; + sys_info second; +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const local_info& r) +{ + if (r.result == local_info::nonexistent) + os << "nonexistent between\n"; + else if (r.result == local_info::ambiguous) + os << "ambiguous between\n"; + os << r.first; + if (r.result != local_info::unique) + { + os << "and\n"; + os << r.second; + } + return os; +} + +class nonexistent_local_time + : public std::runtime_error +{ +public: + template + nonexistent_local_time(local_time tp, const local_info& i); + +private: + template + static + std::string + make_msg(local_time tp, const local_info& i); +}; + +template +inline +nonexistent_local_time::nonexistent_local_time(local_time tp, + const local_info& i) + : std::runtime_error(make_msg(tp, i)) +{ +} + +template +std::string +nonexistent_local_time::make_msg(local_time tp, const local_info& i) +{ + assert(i.result == local_info::nonexistent); + std::ostringstream os; + os << tp << " is in a gap between\n" + << local_seconds{i.first.end.time_since_epoch()} + i.first.offset << ' ' + << i.first.abbrev << " and\n" + << local_seconds{i.second.begin.time_since_epoch()} + i.second.offset << ' ' + << i.second.abbrev + << " which are both equivalent to\n" + << i.first.end << " UTC"; + return os.str(); +} + +class ambiguous_local_time + : public std::runtime_error +{ +public: + template + ambiguous_local_time(local_time tp, const local_info& i); + +private: + template + static + std::string + make_msg(local_time tp, const local_info& i); +}; + +template +inline +ambiguous_local_time::ambiguous_local_time(local_time tp, const local_info& i) + : std::runtime_error(make_msg(tp, i)) +{ +} + +template +std::string +ambiguous_local_time::make_msg(local_time tp, const local_info& i) +{ + assert(i.result == local_info::ambiguous); + std::ostringstream os; + os << tp << " is ambiguous. It could be\n" + << tp << ' ' << i.first.abbrev << " == " + << tp - i.first.offset << " UTC or\n" + << tp << ' ' << i.second.abbrev << " == " + << tp - i.second.offset << " UTC"; + return os.str(); +} + +class time_zone; + +#if HAS_STRING_VIEW +DATE_API const time_zone* locate_zone(std::string_view tz_name); +#else +DATE_API const time_zone* locate_zone(const std::string& tz_name); +#endif + +DATE_API const time_zone* current_zone(); + +template +struct zoned_traits +{ +}; + +template <> +struct zoned_traits +{ + static + const time_zone* + default_zone() + { + return date::locate_zone("Etc/UTC"); + } + +#if HAS_STRING_VIEW + + static + const time_zone* + locate_zone(std::string_view name) + { + return date::locate_zone(name); + } + +#else // !HAS_STRING_VIEW + + static + const time_zone* + locate_zone(const std::string& name) + { + return date::locate_zone(name); + } + + static + const time_zone* + locate_zone(const char* name) + { + return date::locate_zone(name); + } + +#endif // !HAS_STRING_VIEW +}; + +template +class zoned_time; + +template +bool +operator==(const zoned_time& x, + const zoned_time& y); + +template +class zoned_time +{ +public: + using duration = typename std::common_type::type; + +private: + TimeZonePtr zone_; + sys_time tp_; + +public: +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::default_zone())> +#endif + zoned_time(); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::default_zone())> +#endif + zoned_time(const sys_time& st); + explicit zoned_time(TimeZonePtr z); + +#if HAS_STRING_VIEW + template ::locate_zone(std::string_view())) + >::value + >::type> + explicit zoned_time(std::string_view name); +#else +# if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())) + >::value + >::type> +# endif + explicit zoned_time(const std::string& name); +#endif + + template , + sys_time>::value + >::type> + zoned_time(const zoned_time& zt) NOEXCEPT; + + zoned_time(TimeZonePtr z, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ()->to_sys(local_time{})), + sys_time + >::value + >::type> +#endif + zoned_time(TimeZonePtr z, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ()->to_sys(local_time{}, + choose::earliest)), + sys_time + >::value + >::type> +#endif + zoned_time(TimeZonePtr z, const local_time& tp, choose c); + + template , + sys_time>::value + >::type> + zoned_time(TimeZonePtr z, const zoned_time& zt); + + template , + sys_time>::value + >::type> + zoned_time(TimeZonePtr z, const zoned_time& zt, choose); + +#if HAS_STRING_VIEW + + template ::locate_zone(std::string_view())), + sys_time + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> st); + + template ::locate_zone(std::string_view())), + local_time + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> tp); + + template ::locate_zone(std::string_view())), + local_time, + choose + >::value + >::type> + zoned_time(std::string_view name, detail::nodeduct_t&> tp, choose c); + + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string_view())), + zoned_time + >::value + >::type> + zoned_time(std::string_view name, const zoned_time& zt); + + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string_view())), + zoned_time, + choose + >::value + >::type> + zoned_time(std::string_view name, const zoned_time& zt, choose); + +#else // !HAS_STRING_VIEW + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + sys_time + >::value + >::type> +#endif + zoned_time(const std::string& name, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + sys_time + >::value + >::type> +#endif + zoned_time(const char* name, const sys_time& st); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time + >::value + >::type> +#endif + zoned_time(const std::string& name, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time + >::value + >::type> +#endif + zoned_time(const char* name, const local_time& tp); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time, + choose + >::value + >::type> +#endif + zoned_time(const std::string& name, const local_time& tp, choose c); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template ::locate_zone(std::string())), + local_time, + choose + >::value + >::type> +#endif + zoned_time(const char* name, const local_time& tp, choose c); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time + >::value + >::type> +#else + template +#endif + zoned_time(const std::string& name, const zoned_time& zt); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time + >::value + >::type> +#else + template +#endif + zoned_time(const char* name, const zoned_time& zt); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time, + choose + >::value + >::type> +#else + template +#endif + zoned_time(const std::string& name, const zoned_time& zt, + choose); + +#if !defined(_MSC_VER) || (_MSC_VER > 1916) + template , + sys_time>::value && + std::is_constructible + < + zoned_time, + decltype(zoned_traits::locate_zone(std::string())), + zoned_time, + choose + >::value + >::type> +#else + template +#endif + zoned_time(const char* name, const zoned_time& zt, + choose); + +#endif // !HAS_STRING_VIEW + + zoned_time& operator=(const sys_time& st); + zoned_time& operator=(const local_time& ut); + + explicit operator sys_time() const; + explicit operator local_time() const; + + TimeZonePtr get_time_zone() const; + local_time get_local_time() const; + sys_time get_sys_time() const; + sys_info get_info() const; + + template + friend + bool + operator==(const zoned_time& x, + const zoned_time& y); + + template + friend + std::basic_ostream& + operator<<(std::basic_ostream& os, + const zoned_time& t); + +private: + template friend class zoned_time; + + template + static + TimeZonePtr2&& + check(TimeZonePtr2&& p); +}; + +using zoned_seconds = zoned_time; + +#if HAS_DEDUCTION_GUIDES + +namespace detail +{ + template + using time_zone_representation = + std::conditional_t + < + std::is_convertible::value, + time_zone const*, + std::remove_cv_t> + >; +} + +zoned_time() + -> zoned_time; + +template +zoned_time(sys_time) + -> zoned_time>; + +template +zoned_time(TimeZonePtrOrName&&) + -> zoned_time>; + +template +zoned_time(TimeZonePtrOrName&&, sys_time) + -> zoned_time, detail::time_zone_representation>; + +template +zoned_time(TimeZonePtrOrName&&, local_time, choose = choose::earliest) + -> zoned_time, detail::time_zone_representation>; + +template +zoned_time(TimeZonePtrOrName&&, zoned_time, choose = choose::earliest) + -> zoned_time, detail::time_zone_representation>; + +#endif // HAS_DEDUCTION_GUIDES + +template +inline +bool +operator==(const zoned_time& x, + const zoned_time& y) +{ + return x.zone_ == y.zone_ && x.tp_ == y.tp_; +} + +template +inline +bool +operator!=(const zoned_time& x, + const zoned_time& y) +{ + return !(x == y); +} + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + +namespace detail +{ +# if USE_OS_TZDB + struct transition; + struct expanded_ttinfo; +# else // !USE_OS_TZDB + struct zonelet; + class Rule; +# endif // !USE_OS_TZDB +} + +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + +class time_zone +{ +private: + std::string name_; +#if USE_OS_TZDB + std::vector transitions_; + std::vector ttinfos_; +#else // !USE_OS_TZDB + std::vector zonelets_; +#endif // !USE_OS_TZDB + std::unique_ptr adjusted_; + +public: +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + time_zone(time_zone&&) = default; + time_zone& operator=(time_zone&&) = default; +#else // defined(_MSC_VER) && (_MSC_VER < 1900) + time_zone(time_zone&& src); + time_zone& operator=(time_zone&& src); +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + + DATE_API explicit time_zone(const std::string& s, detail::undocumented); + + const std::string& name() const NOEXCEPT; + + template sys_info get_info(sys_time st) const; + template local_info get_info(local_time tp) const; + + template + sys_time::type> + to_sys(local_time tp) const; + + template + sys_time::type> + to_sys(local_time tp, choose z) const; + + template + local_time::type> + to_local(sys_time tp) const; + + friend bool operator==(const time_zone& x, const time_zone& y) NOEXCEPT; + friend bool operator< (const time_zone& x, const time_zone& y) NOEXCEPT; + friend DATE_API std::ostream& operator<<(std::ostream& os, const time_zone& z); + +#if !USE_OS_TZDB + DATE_API void add(const std::string& s); +#endif // !USE_OS_TZDB + +private: + DATE_API sys_info get_info_impl(sys_seconds tp) const; + DATE_API local_info get_info_impl(local_seconds tp) const; + + template + sys_time::type> + to_sys_impl(local_time tp, choose z, std::false_type) const; + template + sys_time::type> + to_sys_impl(local_time tp, choose, std::true_type) const; + +#if USE_OS_TZDB + DATE_API void init() const; + DATE_API void init_impl(); + DATE_API sys_info + load_sys_info(std::vector::const_iterator i) const; + + template + DATE_API void + load_data(std::istream& inf, std::int32_t tzh_leapcnt, std::int32_t tzh_timecnt, + std::int32_t tzh_typecnt, std::int32_t tzh_charcnt); +#else // !USE_OS_TZDB + DATE_API sys_info get_info_impl(sys_seconds tp, int tz_int) const; + DATE_API void adjust_infos(const std::vector& rules); + DATE_API void parse_info(std::istream& in); +#endif // !USE_OS_TZDB +}; + +#if defined(_MSC_VER) && (_MSC_VER < 1900) + +inline +time_zone::time_zone(time_zone&& src) + : name_(std::move(src.name_)) + , zonelets_(std::move(src.zonelets_)) + , adjusted_(std::move(src.adjusted_)) + {} + +inline +time_zone& +time_zone::operator=(time_zone&& src) +{ + name_ = std::move(src.name_); + zonelets_ = std::move(src.zonelets_); + adjusted_ = std::move(src.adjusted_); + return *this; +} + +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + +inline +const std::string& +time_zone::name() const NOEXCEPT +{ + return name_; +} + +template +inline +sys_info +time_zone::get_info(sys_time st) const +{ + return get_info_impl(date::floor(st)); +} + +template +inline +local_info +time_zone::get_info(local_time tp) const +{ + return get_info_impl(date::floor(tp)); +} + +template +inline +sys_time::type> +time_zone::to_sys(local_time tp) const +{ + return to_sys_impl(tp, choose{}, std::true_type{}); +} + +template +inline +sys_time::type> +time_zone::to_sys(local_time tp, choose z) const +{ + return to_sys_impl(tp, z, std::false_type{}); +} + +template +inline +local_time::type> +time_zone::to_local(sys_time tp) const +{ + using LT = local_time::type>; + auto i = get_info(tp); + return LT{(tp + i.offset).time_since_epoch()}; +} + +inline bool operator==(const time_zone& x, const time_zone& y) NOEXCEPT {return x.name_ == y.name_;} +inline bool operator< (const time_zone& x, const time_zone& y) NOEXCEPT {return x.name_ < y.name_;} + +inline bool operator!=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(x == y);} +inline bool operator> (const time_zone& x, const time_zone& y) NOEXCEPT {return y < x;} +inline bool operator<=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(y < x);} +inline bool operator>=(const time_zone& x, const time_zone& y) NOEXCEPT {return !(x < y);} + +template +sys_time::type> +time_zone::to_sys_impl(local_time tp, choose z, std::false_type) const +{ + auto i = get_info(tp); + if (i.result == local_info::nonexistent) + { + return i.first.end; + } + else if (i.result == local_info::ambiguous) + { + if (z == choose::latest) + return sys_time{tp.time_since_epoch()} - i.second.offset; + } + return sys_time{tp.time_since_epoch()} - i.first.offset; +} + +template +sys_time::type> +time_zone::to_sys_impl(local_time tp, choose, std::true_type) const +{ + auto i = get_info(tp); + if (i.result == local_info::nonexistent) + throw nonexistent_local_time(tp, i); + else if (i.result == local_info::ambiguous) + throw ambiguous_local_time(tp, i); + return sys_time{tp.time_since_epoch()} - i.first.offset; +} + +#if !USE_OS_TZDB + +class time_zone_link +{ +private: + std::string name_; + std::string target_; +public: + DATE_API explicit time_zone_link(const std::string& s); + + const std::string& name() const {return name_;} + const std::string& target() const {return target_;} + + friend bool operator==(const time_zone_link& x, const time_zone_link& y) {return x.name_ == y.name_;} + friend bool operator< (const time_zone_link& x, const time_zone_link& y) {return x.name_ < y.name_;} + + friend DATE_API std::ostream& operator<<(std::ostream& os, const time_zone_link& x); +}; + +using link = time_zone_link; + +inline bool operator!=(const time_zone_link& x, const time_zone_link& y) {return !(x == y);} +inline bool operator> (const time_zone_link& x, const time_zone_link& y) {return y < x;} +inline bool operator<=(const time_zone_link& x, const time_zone_link& y) {return !(y < x);} +inline bool operator>=(const time_zone_link& x, const time_zone_link& y) {return !(x < y);} + +#endif // !USE_OS_TZDB + +class leap_second +{ +private: + sys_seconds date_; + +public: +#if USE_OS_TZDB + DATE_API explicit leap_second(const sys_seconds& s, detail::undocumented); +#else + DATE_API explicit leap_second(const std::string& s, detail::undocumented); +#endif + + sys_seconds date() const {return date_;} + + friend bool operator==(const leap_second& x, const leap_second& y) {return x.date_ == y.date_;} + friend bool operator< (const leap_second& x, const leap_second& y) {return x.date_ < y.date_;} + + template + friend + bool + operator==(const leap_second& x, const sys_time& y) + { + return x.date_ == y; + } + + template + friend + bool + operator< (const leap_second& x, const sys_time& y) + { + return x.date_ < y; + } + + template + friend + bool + operator< (const sys_time& x, const leap_second& y) + { + return x < y.date_; + } + + friend DATE_API std::ostream& operator<<(std::ostream& os, const leap_second& x); +}; + +inline bool operator!=(const leap_second& x, const leap_second& y) {return !(x == y);} +inline bool operator> (const leap_second& x, const leap_second& y) {return y < x;} +inline bool operator<=(const leap_second& x, const leap_second& y) {return !(y < x);} +inline bool operator>=(const leap_second& x, const leap_second& y) {return !(x < y);} + +template +inline +bool +operator==(const sys_time& x, const leap_second& y) +{ + return y == x; +} + +template +inline +bool +operator!=(const leap_second& x, const sys_time& y) +{ + return !(x == y); +} + +template +inline +bool +operator!=(const sys_time& x, const leap_second& y) +{ + return !(x == y); +} + +template +inline +bool +operator> (const leap_second& x, const sys_time& y) +{ + return y < x; +} + +template +inline +bool +operator> (const sys_time& x, const leap_second& y) +{ + return y < x; +} + +template +inline +bool +operator<=(const leap_second& x, const sys_time& y) +{ + return !(y < x); +} + +template +inline +bool +operator<=(const sys_time& x, const leap_second& y) +{ + return !(y < x); +} + +template +inline +bool +operator>=(const leap_second& x, const sys_time& y) +{ + return !(x < y); +} + +template +inline +bool +operator>=(const sys_time& x, const leap_second& y) +{ + return !(x < y); +} + +using leap = leap_second; + +#ifdef _WIN32 + +namespace detail +{ + +// The time zone mapping is modelled after this data file: +// http://unicode.org/repos/cldr/trunk/common/supplemental/windowsZones.xml +// and the field names match the element names from the mapZone element +// of windowsZones.xml. +// The website displays this file here: +// http://www.unicode.org/cldr/charts/latest/supplemental/zone_tzid.html +// The html view is sorted before being displayed but is otherwise the same +// There is a mapping between the os centric view (in this case windows) +// the html displays uses and the generic view the xml file. +// That mapping is this: +// display column "windows" -> xml field "other". +// display column "region" -> xml field "territory". +// display column "tzid" -> xml field "type". +// This structure uses the generic terminology because it could be +// used to to support other os/native name conversions, not just windows, +// and using the same generic names helps retain the connection to the +// origin of the data that we are using. +struct timezone_mapping +{ + timezone_mapping(const char* other, const char* territory, const char* type) + : other(other), territory(territory), type(type) + { + } + timezone_mapping() = default; + std::string other; + std::string territory; + std::string type; +}; + +} // detail + +#endif // _WIN32 + +struct tzdb +{ + std::string version = "unknown"; + std::vector zones; +#if !USE_OS_TZDB + std::vector links; +#endif + std::vector leap_seconds; +#if !USE_OS_TZDB + std::vector rules; +#endif +#ifdef _WIN32 + std::vector mappings; +#endif + tzdb* next = nullptr; + + tzdb() = default; +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + tzdb(tzdb&&) = default; + tzdb& operator=(tzdb&&) = default; +#else // defined(_MSC_VER) && (_MSC_VER < 1900) + tzdb(tzdb&& src) + : version(std::move(src.version)) + , zones(std::move(src.zones)) + , links(std::move(src.links)) + , leap_seconds(std::move(src.leap_seconds)) + , rules(std::move(src.rules)) + , mappings(std::move(src.mappings)) + {} + + tzdb& operator=(tzdb&& src) + { + version = std::move(src.version); + zones = std::move(src.zones); + links = std::move(src.links); + leap_seconds = std::move(src.leap_seconds); + rules = std::move(src.rules); + mappings = std::move(src.mappings); + return *this; + } +#endif // defined(_MSC_VER) && (_MSC_VER < 1900) + +#if HAS_STRING_VIEW + const time_zone* locate_zone(std::string_view tz_name) const; +#else + const time_zone* locate_zone(const std::string& tz_name) const; +#endif + const time_zone* current_zone() const; +}; + +using TZ_DB = tzdb; + +DATE_API std::ostream& +operator<<(std::ostream& os, const tzdb& db); + +DATE_API const tzdb& get_tzdb(); + +class tzdb_list +{ + std::atomic head_{nullptr}; + +public: + ~tzdb_list(); + tzdb_list() = default; + tzdb_list(tzdb_list&& x) NOEXCEPT; + + const tzdb& front() const NOEXCEPT {return *head_;} + tzdb& front() NOEXCEPT {return *head_;} + + class const_iterator; + + const_iterator begin() const NOEXCEPT; + const_iterator end() const NOEXCEPT; + + const_iterator cbegin() const NOEXCEPT; + const_iterator cend() const NOEXCEPT; + + const_iterator erase_after(const_iterator p) NOEXCEPT; + + struct undocumented_helper; +private: + void push_front(tzdb* tzdb) NOEXCEPT; +}; + +class tzdb_list::const_iterator +{ + tzdb* p_ = nullptr; + + explicit const_iterator(tzdb* p) NOEXCEPT : p_{p} {} +public: + const_iterator() = default; + + using iterator_category = std::forward_iterator_tag; + using value_type = tzdb; + using reference = const value_type&; + using pointer = const value_type*; + using difference_type = std::ptrdiff_t; + + reference operator*() const NOEXCEPT {return *p_;} + pointer operator->() const NOEXCEPT {return p_;} + + const_iterator& operator++() NOEXCEPT {p_ = p_->next; return *this;} + const_iterator operator++(int) NOEXCEPT {auto t = *this; ++(*this); return t;} + + friend + bool + operator==(const const_iterator& x, const const_iterator& y) NOEXCEPT + {return x.p_ == y.p_;} + + friend + bool + operator!=(const const_iterator& x, const const_iterator& y) NOEXCEPT + {return !(x == y);} + + friend class tzdb_list; +}; + +inline +tzdb_list::const_iterator +tzdb_list::begin() const NOEXCEPT +{ + return const_iterator{head_}; +} + +inline +tzdb_list::const_iterator +tzdb_list::end() const NOEXCEPT +{ + return const_iterator{nullptr}; +} + +inline +tzdb_list::const_iterator +tzdb_list::cbegin() const NOEXCEPT +{ + return begin(); +} + +inline +tzdb_list::const_iterator +tzdb_list::cend() const NOEXCEPT +{ + return end(); +} + +DATE_API tzdb_list& get_tzdb_list(); + +#if !USE_OS_TZDB + +DATE_API const tzdb& reload_tzdb(); +DATE_API void set_install(const std::string& install); + +#endif // !USE_OS_TZDB + +#if HAS_REMOTE_API + +DATE_API std::string remote_version(); +// if provided error_buffer size should be at least CURL_ERROR_SIZE +DATE_API bool remote_download(const std::string& version, char* error_buffer = nullptr); +DATE_API bool remote_install(const std::string& version); + +#endif + +// zoned_time + +namespace detail +{ + +template +inline +T* +to_raw_pointer(T* p) NOEXCEPT +{ + return p; +} + +template +inline +auto +to_raw_pointer(Pointer p) NOEXCEPT + -> decltype(detail::to_raw_pointer(p.operator->())) +{ + return detail::to_raw_pointer(p.operator->()); +} + +} // namespace detail + +template +template +inline +TimeZonePtr2&& +zoned_time::check(TimeZonePtr2&& p) +{ + if (detail::to_raw_pointer(p) == nullptr) + throw std::runtime_error( + "zoned_time constructed with a time zone pointer == nullptr"); + return std::forward(p); +} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time() + : zone_(check(zoned_traits::default_zone())) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const sys_time& st) + : zone_(check(zoned_traits::default_zone())) + , tp_(st) + {} + +template +inline +zoned_time::zoned_time(TimeZonePtr z) + : zone_(check(std::move(z))) + {} + +#if HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(std::string_view name) + : zoned_time(zoned_traits::locate_zone(name)) + {} + +#else // !HAS_STRING_VIEW + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name) + : zoned_time(zoned_traits::locate_zone(name)) + {} + +#endif // !HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(const zoned_time& zt) NOEXCEPT + : zone_(zt.zone_) + , tp_(zt.tp_) + {} + +template +inline +zoned_time::zoned_time(TimeZonePtr z, const sys_time& st) + : zone_(check(std::move(z))) + , tp_(st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(TimeZonePtr z, const local_time& t) + : zone_(check(std::move(z))) + , tp_(zone_->to_sys(t)) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(TimeZonePtr z, const local_time& t, + choose c) + : zone_(check(std::move(z))) + , tp_(zone_->to_sys(t, c)) + {} + +template +template +inline +zoned_time::zoned_time(TimeZonePtr z, + const zoned_time& zt) + : zone_(check(std::move(z))) + , tp_(zt.tp_) + {} + +template +template +inline +zoned_time::zoned_time(TimeZonePtr z, + const zoned_time& zt, choose) + : zoned_time(std::move(z), zt) + {} + +#if HAS_STRING_VIEW + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + detail::nodeduct_t&> t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +template +inline +zoned_time::zoned_time(std::string_view name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +#else // !HAS_STRING_VIEW + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const sys_time& st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const sys_time& st) + : zoned_time(zoned_traits::locate_zone(name), st) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const local_time& t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const local_time& t) + : zoned_time(zoned_traits::locate_zone(name), t) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const local_time& t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#endif +inline +zoned_time::zoned_time(const char* name, + const local_time& t, choose c) + : zoned_time(zoned_traits::locate_zone(name), t, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const char* name, + const zoned_time& zt) + : zoned_time(zoned_traits::locate_zone(name), zt) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const std::string& name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +template +#if !defined(_MSC_VER) || (_MSC_VER > 1916) +template +#else +template +#endif +inline +zoned_time::zoned_time(const char* name, + const zoned_time& zt, + choose c) + : zoned_time(zoned_traits::locate_zone(name), zt, c) + {} + +#endif // HAS_STRING_VIEW + +template +inline +zoned_time& +zoned_time::operator=(const sys_time& st) +{ + tp_ = st; + return *this; +} + +template +inline +zoned_time& +zoned_time::operator=(const local_time& ut) +{ + tp_ = zone_->to_sys(ut); + return *this; +} + +template +inline +zoned_time::operator local_time::duration>() const +{ + return get_local_time(); +} + +template +inline +zoned_time::operator sys_time::duration>() const +{ + return get_sys_time(); +} + +template +inline +TimeZonePtr +zoned_time::get_time_zone() const +{ + return zone_; +} + +template +inline +local_time::duration> +zoned_time::get_local_time() const +{ + return zone_->to_local(tp_); +} + +template +inline +sys_time::duration> +zoned_time::get_sys_time() const +{ + return tp_; +} + +template +inline +sys_info +zoned_time::get_info() const +{ + return zone_->get_info(tp_); +} + +// make_zoned_time + +inline +zoned_time +make_zoned() +{ + return zoned_time(); +} + +template +inline +zoned_time::type> +make_zoned(const sys_time& tp) +{ + return zoned_time::type>(tp); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class + < + typename std::decay + < + decltype(*detail::to_raw_pointer(std::declval())) + >::type + >{} + >::type +#endif +#endif + > +inline +zoned_time +make_zoned(TimeZonePtr z) +{ + return zoned_time(std::move(z)); +} + +inline +zoned_seconds +make_zoned(const std::string& name) +{ + return zoned_seconds(name); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const local_time& tp) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), tp); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const local_time& tp, choose c) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), tp, c); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const local_time& tp) +{ + return zoned_time::type>(name, tp); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const local_time& tp, choose c) +{ + return zoned_time::type>(name, tp, c); +} + +template +inline +zoned_time +make_zoned(TimeZonePtr zone, const zoned_time& zt) +{ + return zoned_time(std::move(zone), zt); +} + +template +inline +zoned_time +make_zoned(const std::string& name, const zoned_time& zt) +{ + return zoned_time(name, zt); +} + +template +inline +zoned_time +make_zoned(TimeZonePtr zone, const zoned_time& zt, choose c) +{ + return zoned_time(std::move(zone), zt, c); +} + +template +inline +zoned_time +make_zoned(const std::string& name, const zoned_time& zt, choose c) +{ + return zoned_time(name, zt, c); +} + +template 1916) +#if !defined(__INTEL_COMPILER) || (__INTEL_COMPILER > 1600) + , class = typename std::enable_if + < + std::is_class())>::type>{} + >::type +#endif +#endif + > +inline +zoned_time::type, TimeZonePtr> +make_zoned(TimeZonePtr zone, const sys_time& st) +{ + return zoned_time::type, + TimeZonePtr>(std::move(zone), st); +} + +template +inline +zoned_time::type> +make_zoned(const std::string& name, const sys_time& st) +{ + return zoned_time::type>(name, st); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const zoned_time& tp) +{ + using duration = typename zoned_time::duration; + using LT = local_time; + auto const st = tp.get_sys_time(); + auto const info = tp.get_time_zone()->get_info(st); + return to_stream(os, fmt, LT{(st+info.offset).time_since_epoch()}, + &info.abbrev, &info.offset); +} + +template +inline +std::basic_ostream& +operator<<(std::basic_ostream& os, const zoned_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', ' ', '%', 'Z', CharT{}}; + return to_stream(os, fmt, t); +} + +class utc_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static CONSTDATA bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_sys(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + from_sys(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&); + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&); +}; + +template + using utc_time = std::chrono::time_point; + +using utc_seconds = utc_time; + +template +utc_time::type> +utc_clock::from_sys(const sys_time& st) +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + auto const& leaps = get_tzdb().leap_seconds; + auto const lt = std::upper_bound(leaps.begin(), leaps.end(), st); + return utc_time{st.time_since_epoch() + seconds{lt-leaps.begin()}}; +} + +// Return pair +// first is true if ut is during a leap second insertion, otherwise false. +// If ut is during a leap second insertion, that leap second is included in the count +template +std::pair +is_leap_second(date::utc_time const& ut) +{ + using std::chrono::seconds; + using duration = typename std::common_type::type; + auto const& leaps = get_tzdb().leap_seconds; + auto tp = sys_time{ut.time_since_epoch()}; + auto const lt = std::upper_bound(leaps.begin(), leaps.end(), tp); + auto ds = seconds{lt-leaps.begin()}; + tp -= ds; + auto ls = false; + if (lt > leaps.begin()) + { + if (tp < lt[-1]) + { + if (tp >= lt[-1].date() - seconds{1}) + ls = true; + else + --ds; + } + } + return {ls, ds}; +} + +struct leap_second_info +{ + bool is_leap_second; + std::chrono::seconds elapsed; +}; + +template +leap_second_info +get_leap_second_info(date::utc_time const& ut) +{ + auto p = is_leap_second(ut); + return {p.first, p.second}; +} + +template +sys_time::type> +utc_clock::to_sys(const utc_time& ut) +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + auto ls = is_leap_second(ut); + auto tp = sys_time{ut.time_since_epoch() - ls.second}; + if (ls.first) + tp = floor(tp) + seconds{1} - CD{1}; + return tp; +} + +inline +utc_clock::time_point +utc_clock::now() +{ + return from_sys(std::chrono::system_clock::now()); +} + +template +utc_time::type> +utc_clock::from_local(const local_time& st) +{ + return from_sys(sys_time{st.time_since_epoch()}); +} + +template +local_time::type> +utc_clock::to_local(const utc_time& ut) +{ + using CD = typename std::common_type::type; + return local_time{to_sys(ut).time_since_epoch()}; +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const utc_time& t) +{ + using std::chrono::seconds; + using CT = typename std::common_type::type; + const std::string abbrev("UTC"); + CONSTDATA seconds offset{0}; + auto ls = is_leap_second(t); + auto tp = sys_time{t.time_since_epoch() - ls.second}; + auto const sd = floor(tp); + year_month_day ymd = sd; + auto time = make_time(tp - sys_seconds{sd}); + time.seconds(detail::undocumented{}) += seconds{ls.first}; + fields fds{ymd, time}; + return to_stream(os, fmt, fds, &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const utc_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + utc_time& tp, std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + using std::chrono::seconds; + using std::chrono::minutes; + using CT = typename std::common_type::type; + minutes offset_local{}; + auto offptr = offset ? offset : &offset_local; + fields fds{}; + fds.has_tod = true; + from_stream(is, fmt, fds, abbrev, offptr); + if (!fds.ymd.ok()) + is.setstate(std::ios::failbit); + if (!is.fail()) + { + bool is_60_sec = fds.tod.seconds() == seconds{60}; + if (is_60_sec) + fds.tod.seconds(detail::undocumented{}) -= seconds{1}; + auto tmp = utc_clock::from_sys(sys_days(fds.ymd) - *offptr + fds.tod.to_duration()); + if (is_60_sec) + tmp += seconds{1}; + if (is_60_sec != is_leap_second(tmp).first || !fds.tod.in_conventional_range()) + { + is.setstate(std::ios::failbit); + return is; + } + tp = std::chrono::time_point_cast(tmp); + } + return is; +} + +// tai_clock + +class tai_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static const bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&) NOEXCEPT; +}; + +template + using tai_time = std::chrono::time_point; + +using tai_seconds = tai_time; + +template +inline +utc_time::type> +tai_clock::to_utc(const tai_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return utc_time{t.time_since_epoch()} - + (sys_days(year{1970}/January/1) - sys_days(year{1958}/January/1) + seconds{10}); +} + +template +inline +tai_time::type> +tai_clock::from_utc(const utc_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return tai_time{t.time_since_epoch()} + + (sys_days(year{1970}/January/1) - sys_days(year{1958}/January/1) + seconds{10}); +} + +inline +tai_clock::time_point +tai_clock::now() +{ + return from_utc(utc_clock::now()); +} + +template +inline +local_time::type> +tai_clock::to_local(const tai_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return local_time{t.time_since_epoch()} - + (local_days(year{1970}/January/1) - local_days(year{1958}/January/1)); +} + +template +inline +tai_time::type> +tai_clock::from_local(const local_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return tai_time{t.time_since_epoch()} + + (local_days(year{1970}/January/1) - local_days(year{1958}/January/1)); +} + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const tai_time& t) +{ + const std::string abbrev("TAI"); + CONSTDATA std::chrono::seconds offset{0}; + return to_stream(os, fmt, tai_clock::to_local(t), &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const tai_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + tai_time& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + local_time lp; + from_stream(is, fmt, lp, abbrev, offset); + if (!is.fail()) + tp = tai_clock::from_local(lp); + return is; +} + +// gps_clock + +class gps_clock +{ +public: + using duration = std::chrono::system_clock::duration; + using rep = duration::rep; + using period = duration::period; + using time_point = std::chrono::time_point; + static const bool is_steady = false; + + static time_point now(); + + template + static + std::chrono::time_point::type> + to_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_utc(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + to_local(const std::chrono::time_point&) NOEXCEPT; + + template + static + std::chrono::time_point::type> + from_local(const std::chrono::time_point&) NOEXCEPT; +}; + +template + using gps_time = std::chrono::time_point; + +using gps_seconds = gps_time; + +template +inline +utc_time::type> +gps_clock::to_utc(const gps_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return utc_time{t.time_since_epoch()} + + (sys_days(year{1980}/January/Sunday[1]) - sys_days(year{1970}/January/1) + + seconds{9}); +} + +template +inline +gps_time::type> +gps_clock::from_utc(const utc_time& t) NOEXCEPT +{ + using std::chrono::seconds; + using CD = typename std::common_type::type; + return gps_time{t.time_since_epoch()} - + (sys_days(year{1980}/January/Sunday[1]) - sys_days(year{1970}/January/1) + + seconds{9}); +} + +inline +gps_clock::time_point +gps_clock::now() +{ + return from_utc(utc_clock::now()); +} + +template +inline +local_time::type> +gps_clock::to_local(const gps_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return local_time{t.time_since_epoch()} + + (local_days(year{1980}/January/Sunday[1]) - local_days(year{1970}/January/1)); +} + +template +inline +gps_time::type> +gps_clock::from_local(const local_time& t) NOEXCEPT +{ + using CD = typename std::common_type::type; + return gps_time{t.time_since_epoch()} - + (local_days(year{1980}/January/Sunday[1]) - local_days(year{1970}/January/1)); +} + + +template +std::basic_ostream& +to_stream(std::basic_ostream& os, const CharT* fmt, + const gps_time& t) +{ + const std::string abbrev("GPS"); + CONSTDATA std::chrono::seconds offset{0}; + return to_stream(os, fmt, gps_clock::to_local(t), &abbrev, &offset); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const gps_time& t) +{ + const CharT fmt[] = {'%', 'F', ' ', '%', 'T', CharT{}}; + return to_stream(os, fmt, t); +} + +template > +std::basic_istream& +from_stream(std::basic_istream& is, const CharT* fmt, + gps_time& tp, + std::basic_string* abbrev = nullptr, + std::chrono::minutes* offset = nullptr) +{ + local_time lp; + from_stream(is, fmt, lp, abbrev, offset); + if (!is.fail()) + tp = gps_clock::from_local(lp); + return is; +} + +// clock_time_conversion + +template +struct clock_time_conversion +{}; + +template <> +struct clock_time_conversion +{ + template + CONSTCD14 + sys_time + operator()(const sys_time& st) const + { + return st; + } +}; + +template <> +struct clock_time_conversion +{ + template + CONSTCD14 + utc_time + operator()(const utc_time& ut) const + { + return ut; + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + local_time + operator()(const local_time& lt) const + { + return lt; + } +}; + +template <> +struct clock_time_conversion +{ + template + utc_time::type> + operator()(const sys_time& st) const + { + return utc_clock::from_sys(st); + } +}; + +template <> +struct clock_time_conversion +{ + template + sys_time::type> + operator()(const utc_time& ut) const + { + return utc_clock::to_sys(ut); + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + local_time + operator()(const sys_time& st) const + { + return local_time{st.time_since_epoch()}; + } +}; + +template<> +struct clock_time_conversion +{ + template + CONSTCD14 + sys_time + operator()(const local_time& lt) const + { + return sys_time{lt.time_since_epoch()}; + } +}; + +template<> +struct clock_time_conversion +{ + template + utc_time::type> + operator()(const local_time& lt) const + { + return utc_clock::from_local(lt); + } +}; + +template<> +struct clock_time_conversion +{ + template + local_time::type> + operator()(const utc_time& ut) const + { + return utc_clock::to_local(ut); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + std::chrono::time_point + operator()(const std::chrono::time_point& tp) const + { + return tp; + } +}; + +namespace ctc_detail +{ + +template + using time_point = std::chrono::time_point; + +using std::declval; +using std::chrono::system_clock; + +//Check if TimePoint is time for given clock, +//if not emits hard error +template +struct return_clock_time +{ + using clock_time_point = time_point; + using type = TimePoint; + + static_assert(std::is_same::value, + "time point with appropariate clock shall be returned"); +}; + +// Check if Clock has to_sys method accepting TimePoint with given duration const& and +// returning sys_time. If so has nested type member equal to return type to_sys. +template +struct return_to_sys +{}; + +template +struct return_to_sys + < + Clock, Duration, + decltype(Clock::to_sys(declval const&>()), void()) + > + : return_clock_time + < + system_clock, + decltype(Clock::to_sys(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_from_sys +{}; + +template +struct return_from_sys + < + Clock, Duration, + decltype(Clock::from_sys(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_sys(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_to_utc +{}; + +template +struct return_to_utc + < + Clock, Duration, + decltype(Clock::to_utc(declval const&>()), void()) + > + : return_clock_time + < + utc_clock, + decltype(Clock::to_utc(declval const&>()))> +{}; + +// Similiar to above +template +struct return_from_utc +{}; + +template +struct return_from_utc + < + Clock, Duration, + decltype(Clock::from_utc(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_utc(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_to_local +{}; + +template +struct return_to_local + < + Clock, Duration, + decltype(Clock::to_local(declval const&>()), + void()) + > + : return_clock_time + < + local_t, + decltype(Clock::to_local(declval const&>())) + > +{}; + +// Similiar to above +template +struct return_from_local +{}; + +template +struct return_from_local + < + Clock, Duration, + decltype(Clock::from_local(declval const&>()), + void()) + > + : return_clock_time + < + Clock, + decltype(Clock::from_local(declval const&>())) + > +{}; + +} // namespace ctc_detail + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_sys::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_sys(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_sys::type + operator()(const sys_time& st) const + { + return DstClock::from_sys(st); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_utc::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_utc(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_utc::type + operator()(const utc_time& ut) const + { + return DstClock::from_utc(ut); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_to_local::type + operator()(const std::chrono::time_point& tp) const + { + return SrcClock::to_local(tp); + } +}; + +template +struct clock_time_conversion +{ + template + CONSTCD14 + typename ctc_detail::return_from_local::type + operator()(const local_time& lt) const + { + return DstClock::from_local(lt); + } +}; + +namespace clock_cast_detail +{ + +template + using time_point = std::chrono::time_point; +using std::chrono::system_clock; + +template +CONSTCD14 +auto +conv_clock(const time_point& t) + -> decltype(std::declval>()(t)) +{ + return clock_time_conversion{}(t); +} + +//direct trait conversion, 1st candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const time_point*) + -> decltype(conv_clock(t)) +{ + return conv_clock(t); +} + +//conversion through sys, 2nd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const void*) + -> decltype(conv_clock(conv_clock(t))) +{ + return conv_clock(conv_clock(t)); +} + +//conversion through utc, 2nd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, const void*) + -> decltype(0, // MSVC_WORKAROUND + conv_clock(conv_clock(t))) +{ + return conv_clock(conv_clock(t)); +} + +//conversion through sys and utc, 3rd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, ...) + -> decltype(conv_clock(conv_clock(conv_clock(t)))) +{ + return conv_clock(conv_clock(conv_clock(t))); +} + +//conversion through utc and sys, 3rd candidate +template +CONSTCD14 +auto +cc_impl(const time_point& t, ...) + -> decltype(0, // MSVC_WORKAROUND + conv_clock(conv_clock(conv_clock(t)))) +{ + return conv_clock(conv_clock(conv_clock(t))); +} + +} // namespace clock_cast_detail + +template +CONSTCD14 +auto +clock_cast(const std::chrono::time_point& tp) + -> decltype(clock_cast_detail::cc_impl(tp, &tp)) +{ + return clock_cast_detail::cc_impl(tp, &tp); +} + +// Deprecated API + +template +inline +sys_time::type> +to_sys_time(const utc_time& t) +{ + return utc_clock::to_sys(t); +} + +template +inline +sys_time::type> +to_sys_time(const tai_time& t) +{ + return utc_clock::to_sys(tai_clock::to_utc(t)); +} + +template +inline +sys_time::type> +to_sys_time(const gps_time& t) +{ + return utc_clock::to_sys(gps_clock::to_utc(t)); +} + + +template +inline +utc_time::type> +to_utc_time(const sys_time& t) +{ + return utc_clock::from_sys(t); +} + +template +inline +utc_time::type> +to_utc_time(const tai_time& t) +{ + return tai_clock::to_utc(t); +} + +template +inline +utc_time::type> +to_utc_time(const gps_time& t) +{ + return gps_clock::to_utc(t); +} + + +template +inline +tai_time::type> +to_tai_time(const sys_time& t) +{ + return tai_clock::from_utc(utc_clock::from_sys(t)); +} + +template +inline +tai_time::type> +to_tai_time(const utc_time& t) +{ + return tai_clock::from_utc(t); +} + +template +inline +tai_time::type> +to_tai_time(const gps_time& t) +{ + return tai_clock::from_utc(gps_clock::to_utc(t)); +} + + +template +inline +gps_time::type> +to_gps_time(const sys_time& t) +{ + return gps_clock::from_utc(utc_clock::from_sys(t)); +} + +template +inline +gps_time::type> +to_gps_time(const utc_time& t) +{ + return gps_clock::from_utc(t); +} + +template +inline +gps_time::type> +to_gps_time(const tai_time& t) +{ + return gps_clock::from_utc(tai_clock::to_utc(t)); +} + +} // namespace date +} // namespace arrow_vendored + +#endif // TZ_H diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h new file mode 100644 index 0000000000000000000000000000000000000000..6b7a91493e1033a7c2808040ee65b72cef5efc6d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/tz_private.h @@ -0,0 +1,319 @@ +#ifndef TZ_PRIVATE_H +#define TZ_PRIVATE_H + +// The MIT License (MIT) +// +// Copyright (c) 2015, 2016 Howard Hinnant +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// Our apologies. When the previous paragraph was written, lowercase had not yet +// been invented (that would involve another several millennia of evolution). +// We did not mean to shout. + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +#include "tz.h" +#else +#include "date.h" +#include +#endif + +namespace arrow_vendored +{ +namespace date +{ + +namespace detail +{ + +#if !USE_OS_TZDB + +enum class tz {utc, local, standard}; + +//forward declare to avoid warnings in gcc 6.2 +class MonthDayTime; +std::istream& operator>>(std::istream& is, MonthDayTime& x); +std::ostream& operator<<(std::ostream& os, const MonthDayTime& x); + + +class MonthDayTime +{ +private: + struct pair + { +#if defined(_MSC_VER) && (_MSC_VER < 1900) + pair() : month_day_(date::jan / 1), weekday_(0U) {} + + pair(const date::month_day& month_day, const date::weekday& weekday) + : month_day_(month_day), weekday_(weekday) {} +#endif + + date::month_day month_day_; + date::weekday weekday_; + }; + + enum Type {month_day, month_last_dow, lteq, gteq}; + + Type type_{month_day}; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + union U +#else + struct U +#endif + { + date::month_day month_day_; + date::month_weekday_last month_weekday_last_; + pair month_day_weekday_; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + U() : month_day_{date::jan/1} {} +#else + U() : + month_day_(date::jan/1), + month_weekday_last_(date::month(0U), date::weekday_last(date::weekday(0U))) + {} + +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1900) + + U& operator=(const date::month_day& x); + U& operator=(const date::month_weekday_last& x); + U& operator=(const pair& x); + } u; + + std::chrono::hours h_{0}; + std::chrono::minutes m_{0}; + std::chrono::seconds s_{0}; + tz zone_{tz::local}; + +public: + MonthDayTime() = default; + MonthDayTime(local_seconds tp, tz timezone); + MonthDayTime(const date::month_day& md, tz timezone); + + date::day day() const; + date::month month() const; + tz zone() const {return zone_;} + + void canonicalize(date::year y); + + sys_seconds + to_sys(date::year y, std::chrono::seconds offset, std::chrono::seconds save) const; + sys_days to_sys_days(date::year y) const; + + sys_seconds to_time_point(date::year y) const; + int compare(date::year y, const MonthDayTime& x, date::year yx, + std::chrono::seconds offset, std::chrono::minutes prev_save) const; + + friend std::istream& operator>>(std::istream& is, MonthDayTime& x); + friend std::ostream& operator<<(std::ostream& os, const MonthDayTime& x); +}; + +// A Rule specifies one or more set of datetimes without using an offset. +// Multiple dates are specified with multiple years. The years in effect +// go from starting_year_ to ending_year_, inclusive. starting_year_ <= +// ending_year_. save_ is in effect for times from the specified time +// onward, including the specified time. When the specified time is +// local, it uses the save_ from the chronologically previous Rule, or if +// there is none, 0. + +//forward declare to avoid warnings in gcc 6.2 +class Rule; +bool operator==(const Rule& x, const Rule& y); +bool operator<(const Rule& x, const Rule& y); +bool operator==(const Rule& x, const date::year& y); +bool operator<(const Rule& x, const date::year& y); +bool operator==(const date::year& x, const Rule& y); +bool operator<(const date::year& x, const Rule& y); +bool operator==(const Rule& x, const std::string& y); +bool operator<(const Rule& x, const std::string& y); +bool operator==(const std::string& x, const Rule& y); +bool operator<(const std::string& x, const Rule& y); +std::ostream& operator<<(std::ostream& os, const Rule& r); + +class Rule +{ +private: + std::string name_; + date::year starting_year_{0}; + date::year ending_year_{0}; + MonthDayTime starting_at_; + std::chrono::minutes save_{0}; + std::string abbrev_; + +public: + Rule() = default; + explicit Rule(const std::string& s); + Rule(const Rule& r, date::year starting_year, date::year ending_year); + + const std::string& name() const {return name_;} + const std::string& abbrev() const {return abbrev_;} + + const MonthDayTime& mdt() const {return starting_at_;} + const date::year& starting_year() const {return starting_year_;} + const date::year& ending_year() const {return ending_year_;} + const std::chrono::minutes& save() const {return save_;} + + static void split_overlaps(std::vector& rules); + + friend bool operator==(const Rule& x, const Rule& y); + friend bool operator<(const Rule& x, const Rule& y); + friend bool operator==(const Rule& x, const date::year& y); + friend bool operator<(const Rule& x, const date::year& y); + friend bool operator==(const date::year& x, const Rule& y); + friend bool operator<(const date::year& x, const Rule& y); + friend bool operator==(const Rule& x, const std::string& y); + friend bool operator<(const Rule& x, const std::string& y); + friend bool operator==(const std::string& x, const Rule& y); + friend bool operator<(const std::string& x, const Rule& y); + + friend std::ostream& operator<<(std::ostream& os, const Rule& r); + +private: + date::day day() const; + date::month month() const; + static void split_overlaps(std::vector& rules, std::size_t i, std::size_t& e); + static bool overlaps(const Rule& x, const Rule& y); + static void split(std::vector& rules, std::size_t i, std::size_t k, + std::size_t& e); +}; + +inline bool operator!=(const Rule& x, const Rule& y) {return !(x == y);} +inline bool operator> (const Rule& x, const Rule& y) {return y < x;} +inline bool operator<=(const Rule& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const Rule& y) {return !(x < y);} + +inline bool operator!=(const Rule& x, const date::year& y) {return !(x == y);} +inline bool operator> (const Rule& x, const date::year& y) {return y < x;} +inline bool operator<=(const Rule& x, const date::year& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const date::year& y) {return !(x < y);} + +inline bool operator!=(const date::year& x, const Rule& y) {return !(x == y);} +inline bool operator> (const date::year& x, const Rule& y) {return y < x;} +inline bool operator<=(const date::year& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const date::year& x, const Rule& y) {return !(x < y);} + +inline bool operator!=(const Rule& x, const std::string& y) {return !(x == y);} +inline bool operator> (const Rule& x, const std::string& y) {return y < x;} +inline bool operator<=(const Rule& x, const std::string& y) {return !(y < x);} +inline bool operator>=(const Rule& x, const std::string& y) {return !(x < y);} + +inline bool operator!=(const std::string& x, const Rule& y) {return !(x == y);} +inline bool operator> (const std::string& x, const Rule& y) {return y < x;} +inline bool operator<=(const std::string& x, const Rule& y) {return !(y < x);} +inline bool operator>=(const std::string& x, const Rule& y) {return !(x < y);} + +struct zonelet +{ + enum tag {has_rule, has_save, is_empty}; + + std::chrono::seconds gmtoff_; + tag tag_ = has_rule; + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) + union U +#else + struct U +#endif + { + std::string rule_; + std::chrono::minutes save_; + + ~U() {} + U() {} + U(const U&) {} + U& operator=(const U&) = delete; + } u; + + std::string format_; + date::year until_year_{0}; + MonthDayTime until_date_; + sys_seconds until_utc_; + local_seconds until_std_; + local_seconds until_loc_; + std::chrono::minutes initial_save_{0}; + std::string initial_abbrev_; + std::pair first_rule_{nullptr, date::year::min()}; + std::pair last_rule_{nullptr, date::year::max()}; + + ~zonelet(); + zonelet(); + zonelet(const zonelet& i); + zonelet& operator=(const zonelet&) = delete; +}; + +#else // USE_OS_TZDB + +struct ttinfo +{ + std::int32_t tt_gmtoff; + unsigned char tt_isdst; + unsigned char tt_abbrind; + unsigned char pad[2]; +}; + +static_assert(sizeof(ttinfo) == 8, ""); + +struct expanded_ttinfo +{ + std::chrono::seconds offset; + std::string abbrev; + bool is_dst; +}; + +struct transition +{ + sys_seconds timepoint; + const expanded_ttinfo* info; + + transition(sys_seconds tp, const expanded_ttinfo* i = nullptr) + : timepoint(tp) + , info(i) + {} + + friend + std::ostream& + operator<<(std::ostream& os, const transition& t) + { + using date::operator<<; + os << t.timepoint << "Z "; + if (t.info->offset >= std::chrono::seconds{0}) + os << '+'; + os << make_time(t.info->offset); + if (t.info->is_dst > 0) + os << " daylight "; + else + os << " standard "; + os << t.info->abbrev; + return os; + } +}; + +#endif // USE_OS_TZDB + +} // namespace detail + +} // namespace date +} // namespace arrow_vendored + +#if defined(_MSC_VER) && (_MSC_VER < 1900) +#include "tz.h" +#endif + +#endif // TZ_PRIVATE_H diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..ae031238d85ac81f91bdaccc8c519c250a6fa056 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/datetime/visibility.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(ARROW_STATIC) +// intentially empty +#elif defined(ARROW_EXPORTING) +#define DATE_BUILD_DLL +#else +#define DATE_USE_DLL +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..f56239e8e88956a319aca3ec25fa48e0db4d6547 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h @@ -0,0 +1,86 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_ +#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +enum BignumDtoaMode { + // Return the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate but + // correct) 0.3. + BIGNUM_DTOA_SHORTEST, + // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats. + BIGNUM_DTOA_SHORTEST_SINGLE, + // Return a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + BIGNUM_DTOA_FIXED, + // Return a fixed number of digits, no matter what the exponent is. + BIGNUM_DTOA_PRECISION +}; + +// Converts the given double 'v' to ascii. +// The result should be interpreted as buffer * 10^(point-length). +// The buffer will be null-terminated. +// +// The input v must be > 0 and different from NaN, and Infinity. +// +// The output depends on the given mode: +// - SHORTEST: produce the least amount of digits for which the internal +// identity requirement is still satisfied. If the digits are printed +// (together with the correct exponent) then reading this number will give +// 'v' again. The buffer will choose the representation that is closest to +// 'v'. If there are two at the same distance, than the number is round up. +// In this mode the 'requested_digits' parameter is ignored. +// - FIXED: produces digits necessary to print a given number with +// 'requested_digits' digits after the decimal point. The produced digits +// might be too short in which case the caller has to fill the gaps with '0's. +// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. +// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns +// buffer="2", point=0. +// Note: the length of the returned buffer has no meaning wrt the significance +// of its digits. That is, just because it contains '0's does not mean that +// any other digit would not satisfy the internal identity requirement. +// - PRECISION: produces 'requested_digits' where the first digit is not '0'. +// Even though the length of produced digits usually equals +// 'requested_digits', the function is allowed to return fewer digits, in +// which case the caller has to fill the missing digits with '0's. +// Halfway cases are again rounded up. +// 'BignumDtoa' expects the given buffer to be big enough to hold all digits +// and a terminating null-character. +void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, + Vector buffer, int* length, int* point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h new file mode 100644 index 0000000000000000000000000000000000000000..68fd82d8059957a5af0099382b10e0ac8a9bac58 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h @@ -0,0 +1,66 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_ +#define DOUBLE_CONVERSION_CACHED_POWERS_H_ + +#include "diy-fp.h" + +namespace arrow_vendored { +namespace double_conversion { + +namespace PowersOfTenCache { + + // Not all powers of ten are cached. The decimal exponent of two neighboring + // cached numbers will differ by kDecimalExponentDistance. + static const int kDecimalExponentDistance = 8; + + static const int kMinDecimalExponent = -348; + static const int kMaxDecimalExponent = 340; + + // Returns a cached power-of-ten with a binary exponent in the range + // [min_exponent; max_exponent] (boundaries included). + void GetCachedPowerForBinaryExponentRange(int min_exponent, + int max_exponent, + DiyFp* power, + int* decimal_exponent); + + // Returns a cached power of ten x ~= 10^k such that + // k <= decimal_exponent < k + kCachedPowersDecimalDistance. + // The given decimal_exponent must satisfy + // kMinDecimalExponent <= requested_exponent, and + // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance. + void GetCachedPowerForDecimalExponent(int requested_exponent, + DiyFp* power, + int* found_exponent); + +} // namespace PowersOfTenCache + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h new file mode 100644 index 0000000000000000000000000000000000000000..f3367b9392a32cd41d3204009120c7654be866de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h @@ -0,0 +1,139 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DIY_FP_H_ +#define DOUBLE_CONVERSION_DIY_FP_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// This "Do It Yourself Floating Point" class implements a floating-point number +// with a uint64 significand and an int exponent. Normalized DiyFp numbers will +// have the most significant bit of the significand set. +// Multiplication and Subtraction do not normalize their results. +// DiyFp store only non-negative numbers and are not designed to contain special +// doubles (NaN and Infinity). +class DiyFp { + public: + static const int kSignificandSize = 64; + + DiyFp() : f_(0), e_(0) {} + DiyFp(const uint64_t significand, const int32_t exponent) : f_(significand), e_(exponent) {} + + // this -= other. + // The exponents of both numbers must be the same and the significand of this + // must be greater or equal than the significand of other. + // The result will not be normalized. + void Subtract(const DiyFp& other) { + DOUBLE_CONVERSION_ASSERT(e_ == other.e_); + DOUBLE_CONVERSION_ASSERT(f_ >= other.f_); + f_ -= other.f_; + } + + // Returns a - b. + // The exponents of both numbers must be the same and a must be greater + // or equal than b. The result will not be normalized. + static DiyFp Minus(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Subtract(b); + return result; + } + + // this *= other. + void Multiply(const DiyFp& other) { + // Simply "emulates" a 128 bit multiplication. + // However: the resulting number only contains 64 bits. The least + // significant 64 bits are only used for rounding the most significant 64 + // bits. + const uint64_t kM32 = 0xFFFFFFFFU; + const uint64_t a = f_ >> 32; + const uint64_t b = f_ & kM32; + const uint64_t c = other.f_ >> 32; + const uint64_t d = other.f_ & kM32; + const uint64_t ac = a * c; + const uint64_t bc = b * c; + const uint64_t ad = a * d; + const uint64_t bd = b * d; + // By adding 1U << 31 to tmp we round the final result. + // Halfway cases will be rounded up. + const uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32) + (1U << 31); + e_ += other.e_ + 64; + f_ = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32); + } + + // returns a * b; + static DiyFp Times(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Multiply(b); + return result; + } + + void Normalize() { + DOUBLE_CONVERSION_ASSERT(f_ != 0); + uint64_t significand = f_; + int32_t exponent = e_; + + // This method is mainly called for normalizing boundaries. In general, + // boundaries need to be shifted by 10 bits, and we optimize for this case. + const uint64_t k10MSBits = DOUBLE_CONVERSION_UINT64_2PART_C(0xFFC00000, 00000000); + while ((significand & k10MSBits) == 0) { + significand <<= 10; + exponent -= 10; + } + while ((significand & kUint64MSB) == 0) { + significand <<= 1; + exponent--; + } + f_ = significand; + e_ = exponent; + } + + static DiyFp Normalize(const DiyFp& a) { + DiyFp result = a; + result.Normalize(); + return result; + } + + uint64_t f() const { return f_; } + int32_t e() const { return e_; } + + void set_f(uint64_t new_value) { f_ = new_value; } + void set_e(int32_t new_value) { e_ = new_value; } + + private: + static const uint64_t kUint64MSB = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000); + + uint64_t f_; + int32_t e_; +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DIY_FP_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h new file mode 100644 index 0000000000000000000000000000000000000000..90a88b902d6ea12d3adf917cdbf9e63b818d71ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h @@ -0,0 +1,472 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ +#define DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class DoubleToStringConverter { + public: + // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint + // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the + // function returns false. + static const int kMaxFixedDigitsBeforePoint = 60; + static const int kMaxFixedDigitsAfterPoint = 100; + + // When calling ToExponential with a requested_digits + // parameter > kMaxExponentialDigits then the function returns false. + static const int kMaxExponentialDigits = 120; + + // When calling ToPrecision with a requested_digits + // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits + // then the function returns false. + static const int kMinPrecisionDigits = 1; + static const int kMaxPrecisionDigits = 120; + + // The maximal number of digits that are needed to emit a double in base 10. + // A higher precision can be achieved by using more digits, but the shortest + // accurate representation of any double will never use more digits than + // kBase10MaximalLength. + // Note that DoubleToAscii null-terminates its input. So the given buffer + // should be at least kBase10MaximalLength + 1 characters long. + static const int kBase10MaximalLength = 17; + + // The maximal number of digits that are needed to emit a single in base 10. + // A higher precision can be achieved by using more digits, but the shortest + // accurate representation of any single will never use more digits than + // kBase10MaximalLengthSingle. + static const int kBase10MaximalLengthSingle = 9; + + // The length of the longest string that 'ToShortest' can produce when the + // converter is instantiated with EcmaScript defaults (see + // 'EcmaScriptConverter') + // This value does not include the trailing '\0' character. + // This amount of characters is needed for negative values that hit the + // 'decimal_in_shortest_low' limit. For example: "-0.0000033333333333333333" + static const int kMaxCharsEcmaScriptShortest = 25; + + enum Flags { + NO_FLAGS = 0, + EMIT_POSITIVE_EXPONENT_SIGN = 1, + EMIT_TRAILING_DECIMAL_POINT = 2, + EMIT_TRAILING_ZERO_AFTER_POINT = 4, + UNIQUE_ZERO = 8, + NO_TRAILING_ZERO = 16, + EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL = 32, + EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL = 64 + }; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent + // form, emits a '+' for positive exponents. Example: 1.2e+2. + // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is + // converted into decimal format then a trailing decimal point is appended. + // Example: 2345.0 is converted to "2345.". + // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point + // emits a trailing '0'-character. This flag requires the + // EMIT_TRAILING_DECIMAL_POINT flag. + // Example: 2345.0 is converted to "2345.0". + // - UNIQUE_ZERO: "-0.0" is converted to "0.0". + // - NO_TRAILING_ZERO: Trailing zeros are removed from the fractional portion + // of the result in precision mode. Matches printf's %g. + // When EMIT_TRAILING_ZERO_AFTER_POINT is also given, one trailing zero is + // preserved. + // - EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL: when the input number has + // exactly one significant digit and is converted into exponent form then a + // trailing decimal point is appended to the significand in shortest mode + // or in precision mode with one requested digit. + // - EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL: in addition to a trailing + // decimal point emits a trailing '0'-character. This flag requires the + // EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag. + // + // Infinity symbol and nan_symbol provide the string representation for these + // special values. If the string is NULL and the special value is encountered + // then the conversion functions return false. + // + // The exponent_character is used in exponential representations. It is + // usually 'e' or 'E'. + // + // When converting to the shortest representation the converter will + // represent input numbers in decimal format if they are in the interval + // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[ + // (lower boundary included, greater boundary excluded). + // Example: with decimal_in_shortest_low = -6 and + // decimal_in_shortest_high = 21: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // When converting to precision mode the converter may add + // max_leading_padding_zeroes before returning the number in exponential + // format. + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarly the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + // + // When converting numbers with exactly one significant digit to exponent + // form in shortest mode or in precision mode with one requested digit, the + // EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT flags have + // no effect. Use the EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag to + // append a decimal point in this case and the + // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL flag to also append a + // '0'-character in this case. + // Example with decimal_in_shortest_low = 0: + // ToShortest(0.0009) -> "9e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL deactivated. + // ToShortest(0.0009) -> "9.e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated. + // ToShortest(0.0009) -> "9.0e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated and + // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL activated. + // + // The min_exponent_width is used for exponential representations. + // The converter adds leading '0's to the exponent until the exponent + // is at least min_exponent_width digits long. + // The min_exponent_width is clamped to 5. + // As such, the exponent may never have more than 5 digits in total. + DoubleToStringConverter(int flags, + const char* infinity_symbol, + const char* nan_symbol, + char exponent_character, + int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode, + int min_exponent_width = 0) + : flags_(flags), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol), + exponent_character_(exponent_character), + decimal_in_shortest_low_(decimal_in_shortest_low), + decimal_in_shortest_high_(decimal_in_shortest_high), + max_leading_padding_zeroes_in_precision_mode_( + max_leading_padding_zeroes_in_precision_mode), + max_trailing_padding_zeroes_in_precision_mode_( + max_trailing_padding_zeroes_in_precision_mode), + min_exponent_width_(min_exponent_width) { + // When 'trailing zero after the point' is set, then 'trailing point' + // must be set too. + DOUBLE_CONVERSION_ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) || + !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0)); + } + + // Returns a converter following the EcmaScript specification. + // + // Flags: UNIQUE_ZERO and EMIT_POSITIVE_EXPONENT_SIGN. + // Special values: "Infinity" and "NaN". + // Lower case 'e' for exponential values. + // decimal_in_shortest_low: -6 + // decimal_in_shortest_high: 21 + // max_leading_padding_zeroes_in_precision_mode: 6 + // max_trailing_padding_zeroes_in_precision_mode: 0 + static const DoubleToStringConverter& EcmaScriptConverter(); + + // Computes the shortest string of digits that correctly represent the input + // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high + // (see constructor) it then either returns a decimal representation, or an + // exponential representation. + // Example with decimal_in_shortest_low = -6, + // decimal_in_shortest_high = 21, + // EMIT_POSITIVE_EXPONENT_SIGN activated, and + // EMIT_TRAILING_DECIMAL_POINT deactivated: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // Note: the conversion may round the output if the returned string + // is accurate enough to uniquely identify the input-number. + // For example the most precise representation of the double 9e59 equals + // "899999999999999918767229449717619953810131273674690656206848", but + // the converter will return the shorter (but still correct) "9e59". + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except when the input value is special and no infinity_symbol or + // nan_symbol has been given to the constructor. + // + // The length of the longest result is the maximum of the length of the + // following string representations (each with possible examples): + // - NaN and negative infinity: "NaN", "-Infinity", "-inf". + // - -10^(decimal_in_shortest_high - 1): + // "-100000000000000000000", "-1000000000000000.0" + // - the longest string in range [0; -10^decimal_in_shortest_low]. Generally, + // this string is 3 + kBase10MaximalLength - decimal_in_shortest_low. + // (Sign, '0', decimal point, padding zeroes for decimal_in_shortest_low, + // and the significant digits). + // "-0.0000033333333333333333", "-0.0012345678901234567" + // - the longest exponential representation. (A negative number with + // kBase10MaximalLength significant digits). + // "-1.7976931348623157e+308", "-1.7976931348623157E308" + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToShortest(double value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST); + } + + // Same as ToShortest, but for single-precision floats. + bool ToShortestSingle(float value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE); + } + + + // Computes a decimal representation with a fixed number of digits after the + // decimal point. The last emitted digit is rounded. + // + // Examples: + // ToFixed(3.12, 1) -> "3.1" + // ToFixed(3.1415, 3) -> "3.142" + // ToFixed(1234.56789, 4) -> "1234.5679" + // ToFixed(1.23, 5) -> "1.23000" + // ToFixed(0.1, 4) -> "0.1000" + // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00" + // ToFixed(0.1, 30) -> "0.100000000000000005551115123126" + // ToFixed(0.1, 17) -> "0.10000000000000001" + // + // If requested_digits equals 0, then the tail of the result depends on + // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples, for requested_digits == 0, + // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be + // - false and false: then 123.45 -> 123 + // 0.678 -> 1 + // - true and false: then 123.45 -> 123. + // 0.678 -> 1. + // - true and true: then 123.45 -> 123.0 + // 0.678 -> 1.0 + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'value' > 10^kMaxFixedDigitsBeforePoint, or + // - 'requested_digits' > kMaxFixedDigitsAfterPoint. + // The last two conditions imply that the result for non-special values never + // contains more than + // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters + // (one additional character for the sign, and one for the decimal point). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToFixed(double value, + int requested_digits, + StringBuilder* result_builder) const; + + // Computes a representation in exponential format with requested_digits + // after the decimal point. The last emitted digit is rounded. + // If requested_digits equals -1, then the shortest exponential representation + // is computed. + // + // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and + // exponent_character set to 'e'. + // ToExponential(3.12, 1) -> "3.1e0" + // ToExponential(5.0, 3) -> "5.000e0" + // ToExponential(0.001, 2) -> "1.00e-3" + // ToExponential(3.1415, -1) -> "3.1415e0" + // ToExponential(3.1415, 4) -> "3.1415e0" + // ToExponential(3.1415, 3) -> "3.142e0" + // ToExponential(123456789000000, 3) -> "1.235e14" + // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30" + // ToExponential(1000000000000000019884624838656.0, 32) -> + // "1.00000000000000001988462483865600e30" + // ToExponential(1234, 0) -> "1e3" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'requested_digits' > kMaxExponentialDigits. + // + // The last condition implies that the result never contains more than + // kMaxExponentialDigits + 8 characters (the sign, the digit before the + // decimal point, the decimal point, the exponent character, the + // exponent's sign, and at most 3 exponent digits). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToExponential(double value, + int requested_digits, + StringBuilder* result_builder) const; + + + // Computes 'precision' leading digits of the given 'value' and returns them + // either in exponential or decimal format, depending on + // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the + // constructor). + // The last computed digit is rounded. + // + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarly the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no + // EMIT_TRAILING_ZERO_AFTER_POINT: + // ToPrecision(123450.0, 6) -> "123450" + // ToPrecision(123450.0, 5) -> "123450" + // ToPrecision(123450.0, 4) -> "123500" + // ToPrecision(123450.0, 3) -> "123000" + // ToPrecision(123450.0, 2) -> "1.2e5" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - precision < kMinPericisionDigits + // - precision > kMaxPrecisionDigits + // + // The last condition implies that the result never contains more than + // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the + // exponent character, the exponent's sign, and at most 3 exponent digits). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToPrecision(double value, + int precision, + StringBuilder* result_builder) const; + + enum DtoaMode { + // Produce the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate + // but correct) 0.3. + SHORTEST, + // Same as SHORTEST, but for single-precision floats. + SHORTEST_SINGLE, + // Produce a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + FIXED, + // Fixed number of digits (independent of the decimal point). + PRECISION + }; + + // Converts the given double 'v' to digit characters. 'v' must not be NaN, + // +Infinity, or -Infinity. In SHORTEST_SINGLE-mode this restriction also + // applies to 'v' after it has been casted to a single-precision float. That + // is, in this mode static_cast(v) must not be NaN, +Infinity or + // -Infinity. + // + // The result should be interpreted as buffer * 10^(point-length). + // + // The digits are written to the buffer in the platform's charset, which is + // often UTF-8 (with ASCII-range digits) but may be another charset, such + // as EBCDIC. + // + // The output depends on the given mode: + // - SHORTEST: produce the least amount of digits for which the internal + // identity requirement is still satisfied. If the digits are printed + // (together with the correct exponent) then reading this number will give + // 'v' again. The buffer will choose the representation that is closest to + // 'v'. If there are two at the same distance, than the one farther away + // from 0 is chosen (halfway cases - ending with 5 - are rounded up). + // In this mode the 'requested_digits' parameter is ignored. + // - SHORTEST_SINGLE: same as SHORTEST but with single-precision. + // - FIXED: produces digits necessary to print a given number with + // 'requested_digits' digits after the decimal point. The produced digits + // might be too short in which case the caller has to fill the remainder + // with '0's. + // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. + // Halfway cases are rounded towards +/-Infinity (away from 0). The call + // toFixed(0.15, 2) thus returns buffer="2", point=0. + // The returned buffer may contain digits that would be truncated from the + // shortest representation of the input. + // - PRECISION: produces 'requested_digits' where the first digit is not '0'. + // Even though the length of produced digits usually equals + // 'requested_digits', the function is allowed to return fewer digits, in + // which case the caller has to fill the missing digits with '0's. + // Halfway cases are again rounded away from 0. + // DoubleToAscii expects the given buffer to be big enough to hold all + // digits and a terminating null-character. In SHORTEST-mode it expects a + // buffer of at least kBase10MaximalLength + 1. In all other modes the + // requested_digits parameter and the padding-zeroes limit the size of the + // output. Don't forget the decimal point, the exponent character and the + // terminating null-character when computing the maximal output size. + // The given length is only used in debug mode to ensure the buffer is big + // enough. + static void DoubleToAscii(double v, + DtoaMode mode, + int requested_digits, + char* buffer, + int buffer_length, + bool* sign, + int* length, + int* point); + + private: + // Implementation for ToShortest and ToShortestSingle. + bool ToShortestIeeeNumber(double value, + StringBuilder* result_builder, + DtoaMode mode) const; + + // If the value is a special value (NaN or Infinity) constructs the + // corresponding string using the configured infinity/nan-symbol. + // If either of them is NULL or the value is not special then the + // function returns false. + bool HandleSpecialValues(double value, StringBuilder* result_builder) const; + // Constructs an exponential representation (i.e. 1.234e56). + // The given exponent assumes a decimal point after the first decimal digit. + void CreateExponentialRepresentation(const char* decimal_digits, + int length, + int exponent, + StringBuilder* result_builder) const; + // Creates a decimal representation (i.e 1234.5678). + void CreateDecimalRepresentation(const char* decimal_digits, + int length, + int decimal_point, + int digits_after_point, + StringBuilder* result_builder) const; + + const int flags_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + const char exponent_character_; + const int decimal_in_shortest_low_; + const int decimal_in_shortest_high_; + const int max_leading_padding_zeroes_in_precision_mode_; + const int max_trailing_padding_zeroes_in_precision_mode_; + const int min_exponent_width_; + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..ddd0f04dcf02c8222b24e8e5c55654e80d127684 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h @@ -0,0 +1,90 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_ +#define DOUBLE_CONVERSION_FAST_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +enum FastDtoaMode { + // Computes the shortest representation of the given input. The returned + // result will be the most accurate number of this length. Longer + // representations might be more accurate. + FAST_DTOA_SHORTEST, + // Same as FAST_DTOA_SHORTEST but for single-precision floats. + FAST_DTOA_SHORTEST_SINGLE, + // Computes a representation where the precision (number of digits) is + // given as input. The precision is independent of the decimal point. + FAST_DTOA_PRECISION +}; + +// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not +// include the terminating '\0' character. +static const int kFastDtoaMaximalLength = 17; +// Same for single-precision numbers. +static const int kFastDtoaMaximalSingleLength = 9; + +// Provides a decimal representation of v. +// The result should be interpreted as buffer * 10^(point - length). +// +// Precondition: +// * v must be a strictly positive finite double. +// +// Returns true if it succeeds, otherwise the result can not be trusted. +// There will be *length digits inside the buffer followed by a null terminator. +// If the function returns true and mode equals +// - FAST_DTOA_SHORTEST, then +// the parameter requested_digits is ignored. +// The result satisfies +// v == (double) (buffer * 10^(point - length)). +// The digits in the buffer are the shortest representation possible. E.g. +// if 0.099999999999 and 0.1 represent the same double then "1" is returned +// with point = 0. +// The last digit will be closest to the actual v. That is, even if several +// digits might correctly yield 'v' when read again, the buffer will contain +// the one closest to v. +// - FAST_DTOA_PRECISION, then +// the buffer contains requested_digits digits. +// the difference v - (buffer * 10^(point-length)) is closest to zero for +// all possible representations of requested_digits digits. +// If there are two values that are equally close, then FastDtoa returns +// false. +// For both modes the buffer must be large enough to hold the result. +bool FastDtoa(double d, + FastDtoaMode mode, + int requested_digits, + Vector buffer, + int* length, + int* decimal_point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_FAST_DTOA_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..cf2a59a9805193063997ae621e6e3367d3cc029d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h @@ -0,0 +1,58 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_ +#define DOUBLE_CONVERSION_FIXED_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// Produces digits necessary to print a given number with +// 'fractional_count' digits after the decimal point. +// The buffer must be big enough to hold the result plus one terminating null +// character. +// +// The produced digits might be too short in which case the caller has to fill +// the gaps with '0's. +// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and +// decimal_point = -2. +// Halfway cases are rounded towards +/-Infinity (away from 0). The call +// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0. +// The returned buffer may contain digits that would be truncated from the +// shortest representation of the input. +// +// This method only works for some parameters. If it can't handle the input it +// returns false. The output is null-terminated when the function succeeds. +bool FastFixedDtoa(double v, int fractional_count, + Vector buffer, int* length, int* decimal_point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h new file mode 100644 index 0000000000000000000000000000000000000000..4cedc0bee04e6470ce02d27b9390068e0ec0fcc1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h @@ -0,0 +1,449 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_H_ +#define DOUBLE_CONVERSION_DOUBLE_H_ + +#include "diy-fp.h" + +namespace arrow_vendored { +namespace double_conversion { + +// We assume that doubles and uint64_t have the same endianness. +static uint64_t double_to_uint64(double d) { return BitCast(d); } +static double uint64_to_double(uint64_t d64) { return BitCast(d64); } +static uint32_t float_to_uint32(float f) { return BitCast(f); } +static float uint32_to_float(uint32_t d32) { return BitCast(d32); } + +// Helper functions for doubles. +class Double { + public: + static const uint64_t kSignMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000); + static const uint64_t kExponentMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000); + static const uint64_t kSignificandMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x000FFFFF, FFFFFFFF); + static const uint64_t kHiddenBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00100000, 00000000); + static const uint64_t kQuietNanBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00080000, 00000000); + static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit. + static const int kSignificandSize = 53; + static const int kExponentBias = 0x3FF + kPhysicalSignificandSize; + static const int kMaxExponent = 0x7FF - kExponentBias; + + Double() : d64_(0) {} + explicit Double(double d) : d64_(double_to_uint64(d)) {} + explicit Double(uint64_t d64) : d64_(d64) {} + explicit Double(DiyFp diy_fp) + : d64_(DiyFpToUint64(diy_fp)) {} + + // The value encoded by this Double must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + DOUBLE_CONVERSION_ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // The value encoded by this Double must be strictly greater than 0. + DiyFp AsNormalizedDiyFp() const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + uint64_t f = Significand(); + int e = Exponent(); + + // The current double could be a denormal. + while ((f & kHiddenBit) == 0) { + f <<= 1; + e--; + } + // Do the final shifts in one go. + f <<= DiyFp::kSignificandSize - kSignificandSize; + e -= DiyFp::kSignificandSize - kSignificandSize; + return DiyFp(f, e); + } + + // Returns the double's bit as uint64. + uint64_t AsUint64() const { + return d64_; + } + + // Returns the next greater double. Returns +infinity on input +infinity. + double NextDouble() const { + if (d64_ == kInfinity) return Double(kInfinity).value(); + if (Sign() < 0 && Significand() == 0) { + // -0.0 + return 0.0; + } + if (Sign() < 0) { + return Double(d64_ - 1).value(); + } else { + return Double(d64_ + 1).value(); + } + } + + double PreviousDouble() const { + if (d64_ == (kInfinity | kSignMask)) return -Infinity(); + if (Sign() < 0) { + return Double(d64_ + 1).value(); + } else { + if (Significand() == 0) return -0.0; + return Double(d64_ - 1).value(); + } + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint64_t d64 = AsUint64(); + int biased_e = + static_cast((d64 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint64_t Significand() const { + uint64_t d64 = AsUint64(); + uint64_t significand = d64 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the double is a denormal. + bool IsDenormal() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) != 0); + } + + bool IsQuietNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint64() & kQuietNanBit) == 0); +#else + return IsNan() && ((AsUint64() & kQuietNanBit) != 0); +#endif + } + + bool IsSignalingNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint64() & kQuietNanBit) != 0); +#else + return IsNan() && ((AsUint64() & kQuietNanBit) == 0); +#endif + } + + + bool IsInfinite() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) == 0); + } + + int Sign() const { + uint64_t d64 = AsUint64(); + return (d64 & kSignMask) == 0? 1: -1; + } + + // Precondition: the value encoded by this Double must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Double must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + double value() const { return uint64_to_double(d64_); } + + // Returns the significand size for a given order of magnitude. + // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude. + // This function returns the number of significant binary digits v will have + // once it's encoded into a double. In almost all cases this is equal to + // kSignificandSize. The only exceptions are denormals. They start with + // leading zeroes and their effective significand-size is hence smaller. + static int SignificandSizeForOrderOfMagnitude(int order) { + if (order >= (kDenormalExponent + kSignificandSize)) { + return kSignificandSize; + } + if (order <= kDenormalExponent) return 0; + return order - kDenormalExponent; + } + + static double Infinity() { + return Double(kInfinity).value(); + } + + static double NaN() { + return Double(kNaN).value(); + } + + private: + static const int kDenormalExponent = -kExponentBias + 1; + static const uint64_t kInfinity = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000); +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF7FFFF, FFFFFFFF); +#else + static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF80000, 00000000); +#endif + + + const uint64_t d64_; + + static uint64_t DiyFpToUint64(DiyFp diy_fp) { + uint64_t significand = diy_fp.f(); + int exponent = diy_fp.e(); + while (significand > kHiddenBit + kSignificandMask) { + significand >>= 1; + exponent++; + } + if (exponent >= kMaxExponent) { + return kInfinity; + } + if (exponent < kDenormalExponent) { + return 0; + } + while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) { + significand <<= 1; + exponent--; + } + uint64_t biased_exponent; + if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) { + biased_exponent = 0; + } else { + biased_exponent = static_cast(exponent + kExponentBias); + } + return (significand & kSignificandMask) | + (biased_exponent << kPhysicalSignificandSize); + } + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Double); +}; + +class Single { + public: + static const uint32_t kSignMask = 0x80000000; + static const uint32_t kExponentMask = 0x7F800000; + static const uint32_t kSignificandMask = 0x007FFFFF; + static const uint32_t kHiddenBit = 0x00800000; + static const uint32_t kQuietNanBit = 0x00400000; + static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit. + static const int kSignificandSize = 24; + + Single() : d32_(0) {} + explicit Single(float f) : d32_(float_to_uint32(f)) {} + explicit Single(uint32_t d32) : d32_(d32) {} + + // The value encoded by this Single must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + DOUBLE_CONVERSION_ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // Returns the single's bit as uint64. + uint32_t AsUint32() const { + return d32_; + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint32_t d32 = AsUint32(); + int biased_e = + static_cast((d32 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint32_t Significand() const { + uint32_t d32 = AsUint32(); + uint32_t significand = d32 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the single is a denormal. + bool IsDenormal() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) != 0); + } + + bool IsQuietNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint32() & kQuietNanBit) == 0); +#else + return IsNan() && ((AsUint32() & kQuietNanBit) != 0); +#endif + } + + bool IsSignalingNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint32() & kQuietNanBit) != 0); +#else + return IsNan() && ((AsUint32() & kQuietNanBit) == 0); +#endif + } + + + bool IsInfinite() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) == 0); + } + + int Sign() const { + uint32_t d32 = AsUint32(); + return (d32 & kSignMask) == 0? 1: -1; + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Single must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + // Precondition: the value encoded by this Single must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + float value() const { return uint32_to_float(d32_); } + + static float Infinity() { + return Single(kInfinity).value(); + } + + static float NaN() { + return Single(kNaN).value(); + } + + private: + static const int kExponentBias = 0x7F + kPhysicalSignificandSize; + static const int kDenormalExponent = -kExponentBias + 1; + static const int kMaxExponent = 0xFF - kExponentBias; + static const uint32_t kInfinity = 0x7F800000; +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + static const uint32_t kNaN = 0x7FBFFFFF; +#else + static const uint32_t kNaN = 0x7FC00000; +#endif + + const uint32_t d32_; + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Single); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DOUBLE_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h new file mode 100644 index 0000000000000000000000000000000000000000..83eb6fec5f44400cf9a81d45862c4bfd71ac52fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h @@ -0,0 +1,240 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ +#define DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class StringToDoubleConverter { + public: + // Enumeration for allowing octals and ignoring junk when converting + // strings to numbers. + enum Flags { + NO_FLAGS = 0, + ALLOW_HEX = 1, + ALLOW_OCTALS = 2, + ALLOW_TRAILING_JUNK = 4, + ALLOW_LEADING_SPACES = 8, + ALLOW_TRAILING_SPACES = 16, + ALLOW_SPACES_AFTER_SIGN = 32, + ALLOW_CASE_INSENSITIVITY = 64, + ALLOW_CASE_INSENSIBILITY = 64, // Deprecated + ALLOW_HEX_FLOATS = 128, + }; + + static const uc16 kNoSeparator = '\0'; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers. + // Ex: StringToDouble("0x1234") -> 4660.0 + // In StringToDouble("0x1234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK, + // the string will not be parsed as "0" followed by junk. + // + // - ALLOW_OCTALS: recognizes the prefix "0" for octals: + // If a sequence of octal digits starts with '0', then the number is + // read as octal integer. Octal numbers may only be integers. + // Ex: StringToDouble("01234") -> 668.0 + // StringToDouble("012349") -> 12349.0 // Not a sequence of octal + // // digits. + // In StringToDouble("01234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // In StringToDouble("01234e56") the characters "e56" are trailing + // junk, too. + // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of + // a double literal. + // - ALLOW_LEADING_SPACES: skip over leading whitespace, including spaces, + // new-lines, and tabs. + // - ALLOW_TRAILING_SPACES: ignore trailing whitespace. + // - ALLOW_SPACES_AFTER_SIGN: ignore whitespace after the sign. + // Ex: StringToDouble("- 123.2") -> -123.2. + // StringToDouble("+ 123.2") -> 123.2 + // - ALLOW_CASE_INSENSITIVITY: ignore case of characters for special values: + // infinity and nan. + // - ALLOW_HEX_FLOATS: allows hexadecimal float literals. + // This *must* start with "0x" and separate the exponent with "p". + // Examples: 0x1.2p3 == 9.0 + // 0x10.1p0 == 16.0625 + // ALLOW_HEX and ALLOW_HEX_FLOATS are indented. + // + // empty_string_value is returned when an empty string is given as input. + // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string + // containing only spaces is converted to the 'empty_string_value', too. + // + // junk_string_value is returned when + // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not + // part of a double-literal) is found. + // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a + // double literal. + // + // infinity_symbol and nan_symbol are strings that are used to detect + // inputs that represent infinity and NaN. They can be null, in which case + // they are ignored. + // The conversion routine first reads any possible signs. Then it compares the + // following character of the input-string with the first character of + // the infinity, and nan-symbol. If either matches, the function assumes, that + // a match has been found, and expects the following input characters to match + // the remaining characters of the special-value symbol. + // This means that the following restrictions apply to special-value symbols: + // - they must not start with signs ('+', or '-'), + // - they must not have the same first character. + // - they must not start with digits. + // + // If the separator character is not kNoSeparator, then that specific + // character is ignored when in between two valid digits of the significant. + // It is not allowed to appear in the exponent. + // It is not allowed to lead or trail the number. + // It is not allowed to appear twice next to each other. + // + // Examples: + // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = "infinity", + // nan_symbol = "nan": + // StringToDouble("0x1234") -> 4660.0. + // StringToDouble("0x1234K") -> 4660.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> NaN // junk_string_value. + // StringToDouble(" 1") -> NaN // junk_string_value. + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("-123.45") -> -123.45. + // StringToDouble("--123.45") -> NaN // junk_string_value. + // StringToDouble("123e45") -> 123e45. + // StringToDouble("123E45") -> 123e45. + // StringToDouble("123e+45") -> 123e45. + // StringToDouble("123E-45") -> 123e-45. + // StringToDouble("123e") -> 123.0 // trailing junk ignored. + // StringToDouble("123e-") -> 123.0 // trailing junk ignored. + // StringToDouble("+NaN") -> NaN // NaN string literal. + // StringToDouble("-infinity") -> -inf. // infinity literal. + // StringToDouble("Infinity") -> NaN // junk_string_value. + // + // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = NULL, + // nan_symbol = NULL: + // StringToDouble("0x1234") -> NaN // junk_string_value. + // StringToDouble("01234") -> 668.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> 0.0 // empty_string_value. + // StringToDouble(" 1") -> 1.0 + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("0123e45") -> NaN // junk_string_value. + // StringToDouble("01239E45") -> 1239e45. + // StringToDouble("-infinity") -> NaN // junk_string_value. + // StringToDouble("NaN") -> NaN // junk_string_value. + // + // flags = NO_FLAGS, + // separator = ' ': + // StringToDouble("1 2 3 4") -> 1234.0 + // StringToDouble("1 2") -> NaN // junk_string_value + // StringToDouble("1 000 000.0") -> 1000000.0 + // StringToDouble("1.000 000") -> 1.0 + // StringToDouble("1.0e1 000") -> NaN // junk_string_value + StringToDoubleConverter(int flags, + double empty_string_value, + double junk_string_value, + const char* infinity_symbol, + const char* nan_symbol, + uc16 separator = kNoSeparator) + : flags_(flags), + empty_string_value_(empty_string_value), + junk_string_value_(junk_string_value), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol), + separator_(separator) { + } + + // Performs the conversion. + // The output parameter 'processed_characters_count' is set to the number + // of characters that have been processed to read the number. + // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included + // in the 'processed_characters_count'. Trailing junk is never included. + double StringToDouble(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble above but for 16 bit characters. + double StringToDouble(const uc16* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble but reads a float. + // Note that this is not equivalent to static_cast(StringToDouble(...)) + // due to potential double-rounding. + float StringToFloat(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToFloat above but for 16 bit characters. + float StringToFloat(const uc16* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble for T = double, and StringToFloat for T = float. + template + T StringTo(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringTo above but for 16 bit characters. + template + T StringTo(const uc16* buffer, + int length, + int* processed_characters_count) const; + + private: + const int flags_; + const double empty_string_value_; + const double junk_string_value_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + const uc16 separator_; + + template + double StringToIeee(Iterator start_pointer, + int length, + bool read_as_double, + int* processed_characters_count) const; + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp new file mode 100644 index 0000000000000000000000000000000000000000..36576cfa91d8c903576d56fb807f594f23a5bf7b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp @@ -0,0 +1,649 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2017 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This file provides support code that is useful for random-number generation + * but not specific to the PCG generation scheme, including: + * - 128-bit int support for platforms where it isn't available natively + * - bit twiddling operations + * - I/O of 128-bit and 8-bit integers + * - Handling the evilness of SeedSeq + * - Support for efficiently producing random numbers less than a given + * bound + */ + +#ifndef PCG_EXTRAS_HPP_INCLUDED +#define PCG_EXTRAS_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __GNUC__ + #include +#endif + +/* + * Abstractions for compiler-specific directives + */ + +#ifdef __GNUC__ + #define PCG_NOINLINE __attribute__((noinline)) +#else + #define PCG_NOINLINE +#endif + +/* + * Some members of the PCG library use 128-bit math. When compiling on 64-bit + * platforms, both GCC and Clang provide 128-bit integer types that are ideal + * for the job. + * + * On 32-bit platforms (or with other compilers), we fall back to a C++ + * class that provides 128-bit unsigned integers instead. It may seem + * like we're reinventing the wheel here, because libraries already exist + * that support large integers, but most existing libraries provide a very + * generic multiprecision code, but here we're operating at a fixed size. + * Also, most other libraries are fairly heavyweight. So we use a direct + * implementation. Sadly, it's much slower than hand-coded assembly or + * direct CPU support. + * + */ +#if __SIZEOF_INT128__ && !PCG_FORCE_EMULATED_128BIT_MATH + namespace arrow_vendored { + namespace pcg_extras { + typedef __uint128_t pcg128_t; + } + } + #define PCG_128BIT_CONSTANT(high,low) \ + ((pcg_extras::pcg128_t(high) << 64) + low) +#else + #include "pcg_uint128.hpp" + namespace arrow_vendored { + namespace pcg_extras { + typedef pcg_extras::uint_x4 pcg128_t; + } + } + #define PCG_128BIT_CONSTANT(high,low) \ + pcg_extras::pcg128_t(high,low) + #define PCG_EMULATED_128BIT_MATH 1 +#endif + + +namespace arrow_vendored { +namespace pcg_extras { + +/* + * We often need to represent a "number of bits". When used normally, these + * numbers are never greater than 128, so an unsigned char is plenty. + * If you're using a nonstandard generator of a larger size, you can set + * PCG_BITCOUNT_T to have it define it as a larger size. (Some compilers + * might produce faster code if you set it to an unsigned int.) + */ + +#ifndef PCG_BITCOUNT_T + typedef uint8_t bitcount_t; +#else + typedef PCG_BITCOUNT_T bitcount_t; +#endif + +/* + * C++ requires us to be able to serialize RNG state by printing or reading + * it from a stream. Because we use 128-bit ints, we also need to be able + * ot print them, so here is code to do so. + * + * This code provides enough functionality to print 128-bit ints in decimal + * and zero-padded in hex. It's not a full-featured implementation. + */ + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, pcg128_t value) +{ + auto desired_base = out.flags() & out.basefield; + bool want_hex = desired_base == out.hex; + + if (want_hex) { + uint64_t highpart = uint64_t(value >> 64); + uint64_t lowpart = uint64_t(value); + auto desired_width = out.width(); + if (desired_width > 16) { + out.width(desired_width - 16); + } + if (highpart != 0 || desired_width > 16) + out << highpart; + CharT oldfill = '\0'; + if (highpart != 0) { + out.width(16); + oldfill = out.fill('0'); + } + auto oldflags = out.setf(decltype(desired_base){}, out.showbase); + out << lowpart; + out.setf(oldflags); + if (highpart != 0) { + out.fill(oldfill); + } + return out; + } + constexpr size_t MAX_CHARS_128BIT = 40; + + char buffer[MAX_CHARS_128BIT]; + char* pos = buffer+sizeof(buffer); + *(--pos) = '\0'; + constexpr auto BASE = pcg128_t(10ULL); + do { + auto div = value / BASE; + auto mod = uint32_t(value - (div * BASE)); + *(--pos) = '0' + char(mod); + value = div; + } while(value != pcg128_t(0ULL)); + return out << pos; +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, pcg128_t& value) +{ + typename std::basic_istream::sentry s(in); + + if (!s) + return in; + + constexpr auto BASE = pcg128_t(10ULL); + pcg128_t current(0ULL); + bool did_nothing = true; + bool overflow = false; + for(;;) { + CharT wide_ch = in.get(); + if (!in.good()) + break; + auto ch = in.narrow(wide_ch, '\0'); + if (ch < '0' || ch > '9') { + in.unget(); + break; + } + did_nothing = false; + pcg128_t digit(uint32_t(ch - '0')); + pcg128_t timesbase = current*BASE; + overflow = overflow || timesbase < current; + current = timesbase + digit; + overflow = overflow || current < digit; + } + + if (did_nothing || overflow) { + in.setstate(std::ios::failbit); + if (overflow) + current = ~pcg128_t(0ULL); + } + + value = current; + + return in; +} + +/* + * Likewise, if people use tiny rngs, we'll be serializing uint8_t. + * If we just used the provided IO operators, they'd read/write chars, + * not ints, so we need to define our own. We *can* redefine this operator + * here because we're in our own namespace. + */ + +template +std::basic_ostream& +operator<<(std::basic_ostream&out, uint8_t value) +{ + return out << uint32_t(value); +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, uint8_t& target) +{ + uint32_t value = 0xdecea5edU; + in >> value; + if (!in && value == 0xdecea5edU) + return in; + if (value > uint8_t(~0)) { + in.setstate(std::ios::failbit); + value = ~0U; + } + target = uint8_t(value); + return in; +} + +/* Unfortunately, the above functions don't get found in preference to the + * built in ones, so we create some more specific overloads that will. + * Ugh. + */ + +inline std::ostream& operator<<(std::ostream& out, uint8_t value) +{ + return pcg_extras::operator<< (out, value); +} + +inline std::istream& operator>>(std::istream& in, uint8_t& value) +{ + return pcg_extras::operator>> (in, value); +} + + + +/* + * Useful bitwise operations. + */ + +/* + * XorShifts are invertable, but they are someting of a pain to invert. + * This function backs them out. It's used by the whacky "inside out" + * generator defined later. + */ + +template +inline itype unxorshift(itype x, bitcount_t bits, bitcount_t shift) +{ + if (2*shift >= bits) { + return x ^ (x >> shift); + } + itype lowmask1 = (itype(1U) << (bits - shift*2)) - 1; + itype highmask1 = ~lowmask1; + itype top1 = x; + itype bottom1 = x & lowmask1; + top1 ^= top1 >> shift; + top1 &= highmask1; + x = top1 | bottom1; + itype lowmask2 = (itype(1U) << (bits - shift)) - 1; + itype bottom2 = x & lowmask2; + bottom2 = unxorshift(bottom2, bits - shift, shift); + bottom2 &= lowmask1; + return top1 | bottom2; +} + +/* + * Rotate left and right. + * + * In ideal world, compilers would spot idiomatic rotate code and convert it + * to a rotate instruction. Of course, opinions vary on what the correct + * idiom is and how to spot it. For clang, sometimes it generates better + * (but still crappy) code if you define PCG_USE_ZEROCHECK_ROTATE_IDIOM. + */ + +template +inline itype rotl(itype value, bitcount_t rot) +{ + constexpr bitcount_t bits = sizeof(itype) * 8; + constexpr bitcount_t mask = bits - 1; +#if PCG_USE_ZEROCHECK_ROTATE_IDIOM + return rot ? (value << rot) | (value >> (bits - rot)) : value; +#else + return (value << rot) | (value >> ((- rot) & mask)); +#endif +} + +template +inline itype rotr(itype value, bitcount_t rot) +{ + constexpr bitcount_t bits = sizeof(itype) * 8; + constexpr bitcount_t mask = bits - 1; +#if PCG_USE_ZEROCHECK_ROTATE_IDIOM + return rot ? (value >> rot) | (value << (bits - rot)) : value; +#else + return (value >> rot) | (value << ((- rot) & mask)); +#endif +} + +/* Unfortunately, both Clang and GCC sometimes perform poorly when it comes + * to properly recognizing idiomatic rotate code, so for we also provide + * assembler directives (enabled with PCG_USE_INLINE_ASM). Boo, hiss. + * (I hope that these compilers get better so that this code can die.) + * + * These overloads will be preferred over the general template code above. + */ +#if PCG_USE_INLINE_ASM && __GNUC__ && (__x86_64__ || __i386__) + +inline uint8_t rotr(uint8_t value, bitcount_t rot) +{ + asm ("rorb %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +inline uint16_t rotr(uint16_t value, bitcount_t rot) +{ + asm ("rorw %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +inline uint32_t rotr(uint32_t value, bitcount_t rot) +{ + asm ("rorl %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} + +#if __x86_64__ +inline uint64_t rotr(uint64_t value, bitcount_t rot) +{ + asm ("rorq %%cl, %0" : "=r" (value) : "0" (value), "c" (rot)); + return value; +} +#endif // __x86_64__ + +#elif defined(_MSC_VER) + // Use MSVC++ bit rotation intrinsics + +#pragma intrinsic(_rotr, _rotr64, _rotr8, _rotr16) + +inline uint8_t rotr(uint8_t value, bitcount_t rot) +{ + return _rotr8(value, rot); +} + +inline uint16_t rotr(uint16_t value, bitcount_t rot) +{ + return _rotr16(value, rot); +} + +inline uint32_t rotr(uint32_t value, bitcount_t rot) +{ + return _rotr(value, rot); +} + +inline uint64_t rotr(uint64_t value, bitcount_t rot) +{ + return _rotr64(value, rot); +} + +#endif // PCG_USE_INLINE_ASM + + +/* + * The C++ SeedSeq concept (modelled by seed_seq) can fill an array of + * 32-bit integers with seed data, but sometimes we want to produce + * larger or smaller integers. + * + * The following code handles this annoyance. + * + * uneven_copy will copy an array of 32-bit ints to an array of larger or + * smaller ints (actually, the code is general it only needing forward + * iterators). The copy is identical to the one that would be performed if + * we just did memcpy on a standard little-endian machine, but works + * regardless of the endian of the machine (or the weirdness of the ints + * involved). + * + * generate_to initializes an array of integers using a SeedSeq + * object. It is given the size as a static constant at compile time and + * tries to avoid memory allocation. If we're filling in 32-bit constants + * we just do it directly. If we need a separate buffer and it's small, + * we allocate it on the stack. Otherwise, we fall back to heap allocation. + * Ugh. + * + * generate_one produces a single value of some integral type using a + * SeedSeq object. + */ + + /* uneven_copy helper, case where destination ints are less than 32 bit. */ + +template +SrcIter uneven_copy_impl( + SrcIter src_first, DestIter dest_first, DestIter dest_last, + std::true_type) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr bitcount_t SRC_SIZE = sizeof(src_t); + constexpr bitcount_t DEST_SIZE = sizeof(dest_t); + constexpr bitcount_t DEST_BITS = DEST_SIZE * 8; + constexpr bitcount_t SCALE = SRC_SIZE / DEST_SIZE; + + size_t count = 0; + src_t value = 0; + + while (dest_first != dest_last) { + if ((count++ % SCALE) == 0) + value = *src_first++; // Get more bits + else + value >>= DEST_BITS; // Move down bits + + *dest_first++ = dest_t(value); // Truncates, ignores high bits. + } + return src_first; +} + + /* uneven_copy helper, case where destination ints are more than 32 bit. */ + +template +SrcIter uneven_copy_impl( + SrcIter src_first, DestIter dest_first, DestIter dest_last, + std::false_type) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr auto SRC_SIZE = sizeof(src_t); + constexpr auto SRC_BITS = SRC_SIZE * 8; + constexpr auto DEST_SIZE = sizeof(dest_t); + constexpr auto SCALE = (DEST_SIZE+SRC_SIZE-1) / SRC_SIZE; + + while (dest_first != dest_last) { + dest_t value(0UL); + unsigned int shift = 0; + + for (size_t i = 0; i < SCALE; ++i) { + value |= dest_t(*src_first++) << shift; + shift += SRC_BITS; + } + + *dest_first++ = value; + } + return src_first; +} + +/* uneven_copy, call the right code for larger vs. smaller */ + +template +inline SrcIter uneven_copy(SrcIter src_first, + DestIter dest_first, DestIter dest_last) +{ + typedef typename std::iterator_traits::value_type src_t; + typedef typename std::iterator_traits::value_type dest_t; + + constexpr bool DEST_IS_SMALLER = sizeof(dest_t) < sizeof(src_t); + + return uneven_copy_impl(src_first, dest_first, dest_last, + std::integral_constant{}); +} + +/* generate_to, fill in a fixed-size array of integral type using a SeedSeq + * (actually works for any random-access iterator) + */ + +template +inline void generate_to_impl(SeedSeq&& generator, DestIter dest, + std::true_type) +{ + generator.generate(dest, dest+size); +} + +template +void generate_to_impl(SeedSeq&& generator, DestIter dest, + std::false_type) +{ + typedef typename std::iterator_traits::value_type dest_t; + constexpr auto DEST_SIZE = sizeof(dest_t); + constexpr auto GEN_SIZE = sizeof(uint32_t); + + constexpr bool GEN_IS_SMALLER = GEN_SIZE < DEST_SIZE; + constexpr size_t FROM_ELEMS = + GEN_IS_SMALLER + ? size * ((DEST_SIZE+GEN_SIZE-1) / GEN_SIZE) + : (size + (GEN_SIZE / DEST_SIZE) - 1) + / ((GEN_SIZE / DEST_SIZE) + GEN_IS_SMALLER); + // this odd code ^^^^^^^^^^^^^^^^^ is work-around for + // a bug: http://llvm.org/bugs/show_bug.cgi?id=21287 + + if (FROM_ELEMS <= 1024) { + uint32_t buffer[FROM_ELEMS]; + generator.generate(buffer, buffer+FROM_ELEMS); + uneven_copy(buffer, dest, dest+size); + } else { + uint32_t* buffer = static_cast(malloc(GEN_SIZE * FROM_ELEMS)); + generator.generate(buffer, buffer+FROM_ELEMS); + uneven_copy(buffer, dest, dest+size); + free(static_cast(buffer)); + } +} + +template +inline void generate_to(SeedSeq&& generator, DestIter dest) +{ + typedef typename std::iterator_traits::value_type dest_t; + constexpr bool IS_32BIT = sizeof(dest_t) == sizeof(uint32_t); + + generate_to_impl(std::forward(generator), dest, + std::integral_constant{}); +} + +/* generate_one, produce a value of integral type using a SeedSeq + * (optionally, we can have it produce more than one and pick which one + * we want) + */ + +template +inline UInt generate_one(SeedSeq&& generator) +{ + UInt result[N]; + generate_to(std::forward(generator), result); + return result[i]; +} + +template +auto bounded_rand(RngType& rng, typename RngType::result_type upper_bound) + -> typename RngType::result_type +{ + typedef typename RngType::result_type rtype; + rtype threshold = (RngType::max() - RngType::min() + rtype(1) - upper_bound) + % upper_bound; + for (;;) { + rtype r = rng() - RngType::min(); + if (r >= threshold) + return r % upper_bound; + } +} + +template +void shuffle(Iter from, Iter to, RandType&& rng) +{ + typedef typename std::iterator_traits::difference_type delta_t; + typedef typename std::remove_reference::type::result_type result_t; + auto count = to - from; + while (count > 1) { + delta_t chosen = delta_t(bounded_rand(rng, result_t(count))); + --count; + --to; + using std::swap; + swap(*(from + chosen), *to); + } +} + +/* + * Although std::seed_seq is useful, it isn't everything. Often we want to + * initialize a random-number generator some other way, such as from a random + * device. + * + * Technically, it does not meet the requirements of a SeedSequence because + * it lacks some of the rarely-used member functions (some of which would + * be impossible to provide). However the C++ standard is quite specific + * that actual engines only called the generate method, so it ought not to be + * a problem in practice. + */ + +template +class seed_seq_from { +private: + RngType rng_; + + typedef uint_least32_t result_type; + +public: + template + seed_seq_from(Args&&... args) : + rng_(std::forward(args)...) + { + // Nothing (else) to do... + } + + template + void generate(Iter start, Iter finish) + { + for (auto i = start; i != finish; ++i) + *i = result_type(rng_()); + } + + constexpr size_t size() const + { + return (sizeof(typename RngType::result_type) > sizeof(result_type) + && RngType::max() > ~size_t(0UL)) + ? ~size_t(0UL) + : size_t(RngType::max()); + } +}; + +// Sometimes, when debugging or testing, it's handy to be able print the name +// of a (in human-readable form). This code allows the idiom: +// +// cout << printable_typename() +// +// to print out my_foo_type_t (or its concrete type if it is a synonym) + +#if __cpp_rtti || __GXX_RTTI + +template +struct printable_typename {}; + +template +std::ostream& operator<<(std::ostream& out, printable_typename) { + const char *implementation_typename = typeid(T).name(); +#ifdef __GNUC__ + int status; + char* pretty_name = + abi::__cxa_demangle(implementation_typename, nullptr, nullptr, &status); + if (status == 0) + out << pretty_name; + free(static_cast(pretty_name)); + if (status == 0) + return out; +#endif + out << implementation_typename; + return out; +} + +#endif // __cpp_rtti || __GXX_RTTI + +} // namespace pcg_extras +} // namespace arrow_vendored + +#endif // PCG_EXTRAS_HPP_INCLUDED diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a864ba0a2c59be60c6ac3fa3895f1832ed0e6923 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_random.hpp @@ -0,0 +1,1954 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2019 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This code provides the reference implementation of the PCG family of + * random number generators. The code is complex because it implements + * + * - several members of the PCG family, specifically members corresponding + * to the output functions: + * - XSH RR (good for 64-bit state, 32-bit output) + * - XSH RS (good for 64-bit state, 32-bit output) + * - XSL RR (good for 128-bit state, 64-bit output) + * - RXS M XS (statistically most powerful generator) + * - XSL RR RR (good for 128-bit state, 128-bit output) + * - and RXS, RXS M, XSH, XSL (mostly for testing) + * - at potentially *arbitrary* bit sizes + * - with four different techniques for random streams (MCG, one-stream + * LCG, settable-stream LCG, unique-stream LCG) + * - and the extended generation schemes allowing arbitrary periods + * - with all features of C++11 random number generation (and more), + * some of which are somewhat painful, including + * - initializing with a SeedSequence which writes 32-bit values + * to memory, even though the state of the generator may not + * use 32-bit values (it might use smaller or larger integers) + * - I/O for RNGs and a prescribed format, which needs to handle + * the issue that 8-bit and 128-bit integers don't have working + * I/O routines (e.g., normally 8-bit = char, not integer) + * - equality and inequality for RNGs + * - and a number of convenience typedefs to mask all the complexity + * + * The code employes a fairly heavy level of abstraction, and has to deal + * with various C++ minutia. If you're looking to learn about how the PCG + * scheme works, you're probably best of starting with one of the other + * codebases (see www.pcg-random.org). But if you're curious about the + * constants for the various output functions used in those other, simpler, + * codebases, this code shows how they are calculated. + * + * On the positive side, at least there are convenience typedefs so that you + * can say + * + * pcg32 myRNG; + * + * rather than: + * + * pcg_detail::engine< + * uint32_t, // Output Type + * uint64_t, // State Type + * pcg_detail::xsh_rr_mixin, true, // Output Func + * pcg_detail::specific_stream, // Stream Kind + * pcg_detail::default_multiplier // LCG Mult + * > myRNG; + * + */ + +#ifndef PCG_RAND_HPP_INCLUDED +#define PCG_RAND_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(disable:4146) +#endif + +#ifdef _MSC_VER + #define PCG_ALWAYS_INLINE __forceinline +#elif __GNUC__ + #define PCG_ALWAYS_INLINE __attribute__((always_inline)) +#else + #define PCG_ALWAYS_INLINE inline +#endif + +/* + * The pcg_extras namespace contains some support code that is likley to + * be useful for a variety of RNGs, including: + * - 128-bit int support for platforms where it isn't available natively + * - bit twiddling operations + * - I/O of 128-bit and 8-bit integers + * - Handling the evilness of SeedSeq + * - Support for efficiently producing random numbers less than a given + * bound + */ + +#include "pcg_extras.hpp" + +namespace arrow_vendored { +namespace pcg_detail { + +using namespace pcg_extras; + +/* + * The LCG generators need some constants to function. This code lets you + * look up the constant by *type*. For example + * + * default_multiplier::multiplier() + * + * gives you the default multipler for 32-bit integers. We use the name + * of the constant and not a generic word like value to allow these classes + * to be used as mixins. + */ + +template +struct default_multiplier { + // Not defined for an arbitrary type +}; + +template +struct default_increment { + // Not defined for an arbitrary type +}; + +#define PCG_DEFINE_CONSTANT(type, what, kind, constant) \ + template <> \ + struct what ## _ ## kind { \ + static constexpr type kind() { \ + return constant; \ + } \ + }; + +PCG_DEFINE_CONSTANT(uint8_t, default, multiplier, 141U) +PCG_DEFINE_CONSTANT(uint8_t, default, increment, 77U) + +PCG_DEFINE_CONSTANT(uint16_t, default, multiplier, 12829U) +PCG_DEFINE_CONSTANT(uint16_t, default, increment, 47989U) + +PCG_DEFINE_CONSTANT(uint32_t, default, multiplier, 747796405U) +PCG_DEFINE_CONSTANT(uint32_t, default, increment, 2891336453U) + +PCG_DEFINE_CONSTANT(uint64_t, default, multiplier, 6364136223846793005ULL) +PCG_DEFINE_CONSTANT(uint64_t, default, increment, 1442695040888963407ULL) + +PCG_DEFINE_CONSTANT(pcg128_t, default, multiplier, + PCG_128BIT_CONSTANT(2549297995355413924ULL,4865540595714422341ULL)) +PCG_DEFINE_CONSTANT(pcg128_t, default, increment, + PCG_128BIT_CONSTANT(6364136223846793005ULL,1442695040888963407ULL)) + +/* Alternative (cheaper) multipliers for 128-bit */ + +template +struct cheap_multiplier : public default_multiplier { + // For most types just use the default. +}; + +template <> +struct cheap_multiplier { + static constexpr uint64_t multiplier() { + return 0xda942042e4dd58b5ULL; + } +}; + + +/* + * Each PCG generator is available in four variants, based on how it applies + * the additive constant for its underlying LCG; the variations are: + * + * single stream - all instances use the same fixed constant, thus + * the RNG always somewhere in same sequence + * mcg - adds zero, resulting in a single stream and reduced + * period + * specific stream - the constant can be changed at any time, selecting + * a different random sequence + * unique stream - the constant is based on the memory address of the + * object, thus every RNG has its own unique sequence + * + * This variation is provided though mixin classes which define a function + * value called increment() that returns the nesessary additive constant. + */ + + + +/* + * unique stream + */ + + +template +class unique_stream { +protected: + static constexpr bool is_mcg = false; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + constexpr itype increment() const { + return itype(reinterpret_cast(this) | 1); + } + + constexpr itype stream() const + { + return increment() >> 1; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return (sizeof(itype) < sizeof(size_t) ? sizeof(itype) + : sizeof(size_t))*8 - 1u; + } + +protected: + constexpr unique_stream() = default; +}; + + +/* + * no stream (mcg) + */ + +template +class no_stream { +protected: + static constexpr bool is_mcg = true; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + static constexpr itype increment() { + return 0; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return 0u; + } + +protected: + constexpr no_stream() = default; +}; + + +/* + * single stream/sequence (oneseq) + */ + +template +class oneseq_stream : public default_increment { +protected: + static constexpr bool is_mcg = false; + + // Is never called, but is provided for symmetry with specific_stream + void set_stream(...) + { + abort(); + } + +public: + typedef itype state_type; + + static constexpr itype stream() + { + return default_increment::increment() >> 1; + } + + static constexpr bool can_specify_stream = false; + + static constexpr size_t streams_pow2() + { + return 0u; + } + +protected: + constexpr oneseq_stream() = default; +}; + + +/* + * specific stream + */ + +template +class specific_stream { +protected: + static constexpr bool is_mcg = false; + + itype inc_ = default_increment::increment(); + +public: + typedef itype state_type; + typedef itype stream_state; + + constexpr itype increment() const { + return inc_; + } + + itype stream() + { + return inc_ >> 1; + } + + void set_stream(itype specific_seq) + { + inc_ = (specific_seq << 1) | 1; + } + + static constexpr bool can_specify_stream = true; + + static constexpr size_t streams_pow2() + { + return (sizeof(itype)*8) - 1u; + } + +protected: + specific_stream() = default; + + specific_stream(itype specific_seq) + : inc_(itype(specific_seq << 1) | itype(1U)) + { + // Nothing (else) to do. + } +}; + + +/* + * This is where it all comes together. This function joins together three + * mixin classes which define + * - the LCG additive constant (the stream) + * - the LCG multiplier + * - the output function + * in addition, we specify the type of the LCG state, and the result type, + * and whether to use the pre-advance version of the state for the output + * (increasing instruction-level parallelism) or the post-advance version + * (reducing register pressure). + * + * Given the high level of parameterization, the code has to use some + * template-metaprogramming tricks to handle some of the suble variations + * involved. + */ + +template , + typename multiplier_mixin = default_multiplier > +class engine : protected output_mixin, + public stream_mixin, + protected multiplier_mixin { +protected: + itype state_; + + struct can_specify_stream_tag {}; + struct no_specifiable_stream_tag {}; + + using stream_mixin::increment; + using multiplier_mixin::multiplier; + +public: + typedef xtype result_type; + typedef itype state_type; + + static constexpr size_t period_pow2() + { + return sizeof(state_type)*8 - 2*stream_mixin::is_mcg; + } + + // It would be nice to use std::numeric_limits for these, but + // we can't be sure that it'd be defined for the 128-bit types. + + static constexpr result_type min() + { + return result_type(0UL); + } + + static constexpr result_type max() + { + return result_type(~result_type(0UL)); + } + +protected: + itype bump(itype state) + { + return state * multiplier() + increment(); + } + + itype base_generate() + { + return state_ = bump(state_); + } + + itype base_generate0() + { + itype old_state = state_; + state_ = bump(state_); + return old_state; + } + +public: + result_type operator()() + { + if (output_previous) + return this->output(base_generate0()); + else + return this->output(base_generate()); + } + + result_type operator()(result_type upper_bound) + { + return bounded_rand(*this, upper_bound); + } + +protected: + static itype advance(itype state, itype delta, + itype cur_mult, itype cur_plus); + + static itype distance(itype cur_state, itype newstate, itype cur_mult, + itype cur_plus, itype mask = ~itype(0U)); + + itype distance(itype newstate, itype mask = itype(~itype(0U))) const + { + return distance(state_, newstate, multiplier(), increment(), mask); + } + +public: + void advance(itype delta) + { + state_ = advance(state_, delta, this->multiplier(), this->increment()); + } + + void backstep(itype delta) + { + advance(-delta); + } + + void discard(itype delta) + { + advance(delta); + } + + bool wrapped() + { + if (stream_mixin::is_mcg) { + // For MCGs, the low order two bits never change. In this + // implementation, we keep them fixed at 3 to make this test + // easier. + return state_ == 3; + } else { + return state_ == 0; + } + } + + engine(itype state = itype(0xcafef00dd15ea5e5ULL)) + : state_(this->is_mcg ? state|state_type(3U) + : bump(state + this->increment())) + { + // Nothing else to do. + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + engine(itype state, typename sm::stream_state stream_seed) + : stream_mixin(stream_seed), + state_(this->is_mcg ? state|state_type(3U) + : bump(state + this->increment())) + { + // Nothing else to do. + } + + template + engine(SeedSeq&& seedSeq, typename std::enable_if< + !stream_mixin::can_specify_stream + && !std::is_convertible::value + && !std::is_convertible::value, + no_specifiable_stream_tag>::type = {}) + : engine(generate_one(std::forward(seedSeq))) + { + // Nothing else to do. + } + + template + engine(SeedSeq&& seedSeq, typename std::enable_if< + stream_mixin::can_specify_stream + && !std::is_convertible::value + && !std::is_convertible::value, + can_specify_stream_tag>::type = {}) + { + itype seeddata[2]; + generate_to<2>(std::forward(seedSeq), seeddata); + seed(seeddata[1], seeddata[0]); + } + + + template + void seed(Args&&... args) + { + new (this) engine(std::forward(args)...); + } + + template + friend bool operator==(const engine&, + const engine&); + + template + friend itype1 operator-(const engine&, + const engine&); + + template + friend std::basic_ostream& + operator<<(std::basic_ostream& out, + const engine&); + + template + friend std::basic_istream& + operator>>(std::basic_istream& in, + engine& rng); +}; + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, + const engine& rng) +{ + using pcg_extras::operator<<; + + auto orig_flags = out.flags(std::ios_base::dec | std::ios_base::left); + auto space = out.widen(' '); + auto orig_fill = out.fill(); + + out << rng.multiplier() << space + << rng.increment() << space + << rng.state_; + + out.flags(orig_flags); + out.fill(orig_fill); + return out; +} + + +template +std::basic_istream& +operator>>(std::basic_istream& in, + engine& rng) +{ + using pcg_extras::operator>>; + + auto orig_flags = in.flags(std::ios_base::dec | std::ios_base::skipws); + + itype multiplier, increment, state; + in >> multiplier >> increment >> state; + + if (!in.fail()) { + bool good = true; + if (multiplier != rng.multiplier()) { + good = false; + } else if (rng.can_specify_stream) { + rng.set_stream(increment >> 1); + } else if (increment != rng.increment()) { + good = false; + } + if (good) { + rng.state_ = state; + } else { + in.clear(std::ios::failbit); + } + } + + in.flags(orig_flags); + return in; +} + + +template +itype engine::advance( + itype state, itype delta, itype cur_mult, itype cur_plus) +{ + // The method used here is based on Brown, "Random Number Generation + // with Arbitrary Stride,", Transactions of the American Nuclear + // Society (Nov. 1994). The algorithm is very similar to fast + // exponentiation. + // + // Even though delta is an unsigned integer, we can pass a + // signed integer to go backwards, it just goes "the long way round". + + constexpr itype ZERO = 0u; // itype may be a non-trivial types, so + constexpr itype ONE = 1u; // we define some ugly constants. + itype acc_mult = 1; + itype acc_plus = 0; + while (delta > ZERO) { + if (delta & ONE) { + acc_mult *= cur_mult; + acc_plus = acc_plus*cur_mult + cur_plus; + } + cur_plus = (cur_mult+ONE)*cur_plus; + cur_mult *= cur_mult; + delta >>= 1; + } + return acc_mult * state + acc_plus; +} + +template +itype engine::distance( + itype cur_state, itype newstate, itype cur_mult, itype cur_plus, itype mask) +{ + constexpr itype ONE = 1u; // itype could be weird, so use constant + bool is_mcg = cur_plus == itype(0); + itype the_bit = is_mcg ? itype(4u) : itype(1u); + itype distance = 0u; + while ((cur_state & mask) != (newstate & mask)) { + if ((cur_state & the_bit) != (newstate & the_bit)) { + cur_state = cur_state * cur_mult + cur_plus; + distance |= the_bit; + } + assert((cur_state & the_bit) == (newstate & the_bit)); + the_bit <<= 1; + cur_plus = (cur_mult+ONE)*cur_plus; + cur_mult *= cur_mult; + } + return is_mcg ? distance >> 2 : distance; +} + +template +itype operator-(const engine& lhs, + const engine& rhs) +{ + static_assert( + std::is_same::value && + std::is_same::value, + "Incomparable generators"); + if (lhs.increment() == rhs.increment()) { + return rhs.distance(lhs.state_); + } else { + constexpr itype ONE = 1u; + itype lhs_diff = lhs.increment() + (lhs.multiplier()-ONE) * lhs.state_; + itype rhs_diff = rhs.increment() + (rhs.multiplier()-ONE) * rhs.state_; + if ((lhs_diff & itype(3u)) != (rhs_diff & itype(3u))) { + rhs_diff = -rhs_diff; + } + return rhs.distance(rhs_diff, lhs_diff, rhs.multiplier(), itype(0u)); + } +} + + +template +bool operator==(const engine& lhs, + const engine& rhs) +{ + return (lhs.multiplier() == rhs.multiplier()) + && (lhs.increment() == rhs.increment()) + && (lhs.state_ == rhs.state_); +} + +template +inline bool operator!=(const engine& lhs, + const engine& rhs) +{ + return !operator==(lhs,rhs); +} + + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using oneseq_base = engine, output_previous, + oneseq_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using unique_base = engine, output_previous, + unique_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using setseq_base = engine, output_previous, + specific_stream, + multiplier_mixin >; + +template class output_mixin, + bool output_previous = (sizeof(itype) <= 8), + template class multiplier_mixin = default_multiplier> +using mcg_base = engine, output_previous, + no_stream, + multiplier_mixin >; + +/* + * OUTPUT FUNCTIONS. + * + * These are the core of the PCG generation scheme. They specify how to + * turn the base LCG's internal state into the output value of the final + * generator. + * + * They're implemented as mixin classes. + * + * All of the classes have code that is written to allow it to be applied + * at *arbitrary* bit sizes, although in practice they'll only be used at + * standard sizes supported by C++. + */ + +/* + * XSH RS -- high xorshift, followed by a random shift + * + * Fast. A good performer. + */ + +template +struct xsh_rs_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t opbits = + sparebits-5 >= 64 ? 5 + : sparebits-4 >= 32 ? 4 + : sparebits-3 >= 16 ? 3 + : sparebits-2 >= 4 ? 2 + : sparebits-1 >= 1 ? 1 + : 0; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t maxrandshift = mask; + constexpr bitcount_t topspare = opbits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = topspare + (xtypebits+maxrandshift)/2; + bitcount_t rshift = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> xshift; + xtype result = xtype(internal >> (bottomspare - maxrandshift + rshift)); + return result; + } +}; + +/* + * XSH RR -- high xorshift, followed by a random rotate + * + * Fast. A good performer. Slightly better statistically than XSH RS. + */ + +template +struct xsh_rr_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype)*8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t wantedopbits = + xtypebits >= 128 ? 7 + : xtypebits >= 64 ? 6 + : xtypebits >= 32 ? 5 + : xtypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = + sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = opbits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits)/2; + bitcount_t rot = opbits ? bitcount_t(internal >> (bits - opbits)) & mask + : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + xtype result = xtype(internal >> bottomspare); + result = rotr(result, amprot); + return result; + } +}; + +/* + * RXS -- random xorshift + */ + +template +struct rxs_mixin { +static xtype output_rxs(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype)*8); + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t extrashift = (xtypebits - shift)/2; + bitcount_t rshift = shift > 64+8 ? (internal >> (bits - 6)) & 63 + : shift > 32+4 ? (internal >> (bits - 5)) & 31 + : shift > 16+2 ? (internal >> (bits - 4)) & 15 + : shift > 8+1 ? (internal >> (bits - 3)) & 7 + : shift > 4+1 ? (internal >> (bits - 2)) & 3 + : shift > 2+1 ? (internal >> (bits - 1)) & 1 + : 0; + internal ^= internal >> (shift + extrashift - rshift); + xtype result = internal >> rshift; + return result; + } +}; + +/* + * RXS M XS -- random xorshift, mcg multiply, fixed xorshift + * + * The most statistically powerful generator, but all those steps + * make it slower than some of the others. We give it the rottenest jobs. + * + * Because it's usually used in contexts where the state type and the + * result type are the same, it is a permutation and is thus invertable. + * We thus provide a function to invert it. This function is used to + * for the "inside out" generator used by the extended generator. + */ + +/* Defined type-based concepts for the multiplication step. They're actually + * all derived by truncating the 128-bit, which was computed to be a good + * "universal" constant. + */ + +template +struct mcg_multiplier { + // Not defined for an arbitrary type +}; + +template +struct mcg_unmultiplier { + // Not defined for an arbitrary type +}; + +PCG_DEFINE_CONSTANT(uint8_t, mcg, multiplier, 217U) +PCG_DEFINE_CONSTANT(uint8_t, mcg, unmultiplier, 105U) + +PCG_DEFINE_CONSTANT(uint16_t, mcg, multiplier, 62169U) +PCG_DEFINE_CONSTANT(uint16_t, mcg, unmultiplier, 28009U) + +PCG_DEFINE_CONSTANT(uint32_t, mcg, multiplier, 277803737U) +PCG_DEFINE_CONSTANT(uint32_t, mcg, unmultiplier, 2897767785U) + +PCG_DEFINE_CONSTANT(uint64_t, mcg, multiplier, 12605985483714917081ULL) +PCG_DEFINE_CONSTANT(uint64_t, mcg, unmultiplier, 15009553638781119849ULL) + +PCG_DEFINE_CONSTANT(pcg128_t, mcg, multiplier, + PCG_128BIT_CONSTANT(17766728186571221404ULL, 12605985483714917081ULL)) +PCG_DEFINE_CONSTANT(pcg128_t, mcg, unmultiplier, + PCG_128BIT_CONSTANT(14422606686972528997ULL, 15009553638781119849ULL)) + + +template +struct rxs_m_xs_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = xtypebits >= 128 ? 6 + : xtypebits >= 64 ? 5 + : xtypebits >= 32 ? 4 + : xtypebits >= 16 ? 3 + : 2; + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t mask = (1 << opbits) - 1; + bitcount_t rshift = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> (opbits + rshift); + internal *= mcg_multiplier::multiplier(); + xtype result = internal >> shift; + result ^= result >> ((2U*xtypebits+2U)/3U); + return result; + } + + static itype unoutput(itype internal) + { + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = bits >= 128 ? 6 + : bits >= 64 ? 5 + : bits >= 32 ? 4 + : bits >= 16 ? 3 + : 2; + constexpr bitcount_t mask = (1 << opbits) - 1; + + internal = unxorshift(internal, bits, (2U*bits+2U)/3U); + + internal *= mcg_unmultiplier::unmultiplier(); + + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + internal = unxorshift(internal, bits, opbits + rshift); + + return internal; + } +}; + + +/* + * RXS M -- random xorshift, mcg multiply + */ + +template +struct rxs_m_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t opbits = xtypebits >= 128 ? 6 + : xtypebits >= 64 ? 5 + : xtypebits >= 32 ? 4 + : xtypebits >= 16 ? 3 + : 2; + constexpr bitcount_t shift = bits - xtypebits; + constexpr bitcount_t mask = (1 << opbits) - 1; + bitcount_t rshift = opbits ? (internal >> (bits - opbits)) & mask : 0; + internal ^= internal >> (opbits + rshift); + internal *= mcg_multiplier::multiplier(); + xtype result = internal >> shift; + return result; + } +}; + + +/* + * DXSM -- double xorshift multiply + * + * This is a new, more powerful output permutation (added in 2019). It's + * a more comprehensive scrambling than RXS M, but runs faster on 128-bit + * types. Although primarily intended for use at large sizes, also works + * at smaller sizes as well. + * + * This permutation is similar to xorshift multiply hash functions, except + * that one of the multipliers is the LCG multiplier (to avoid needing to + * have a second constant) and the other is based on the low-order bits. + * This latter aspect means that the scrambling applied to the high bits + * depends on the low bits, and makes it (to my eye) impractical to back + * out the permutation without having the low-order bits. + */ + +template +struct dxsm_mixin { + inline xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t itypebits = bitcount_t(sizeof(itype) * 8); + static_assert(xtypebits <= itypebits/2, + "Output type must be half the size of the state type."); + + xtype hi = xtype(internal >> (itypebits - xtypebits)); + xtype lo = xtype(internal); + + lo |= 1; + hi ^= hi >> (xtypebits/2); + hi *= xtype(cheap_multiplier::multiplier()); + hi ^= hi >> (3*(xtypebits/4)); + hi *= lo; + return hi; + } +}; + + +/* + * XSL RR -- fixed xorshift (to low bits), random rotate + * + * Useful for 128-bit types that are split across two CPU registers. + */ + +template +struct xsl_rr_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t wantedopbits = xtypebits >= 128 ? 7 + : xtypebits >= 64 ? 6 + : xtypebits >= 32 ? 5 + : xtypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + bitcount_t rot = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + xtype result = xtype(internal >> bottomspare); + result = rotr(result, amprot); + return result; + } +}; + + +/* + * XSL RR RR -- fixed xorshift (to low bits), random rotate (both parts) + * + * Useful for 128-bit types that are split across two CPU registers. + * If you really want an invertable 128-bit RNG, I guess this is the one. + */ + +template struct halfsize_trait {}; +template <> struct halfsize_trait { typedef uint64_t type; }; +template <> struct halfsize_trait { typedef uint32_t type; }; +template <> struct halfsize_trait { typedef uint16_t type; }; +template <> struct halfsize_trait { typedef uint8_t type; }; + +template +struct xsl_rr_rr_mixin { + typedef typename halfsize_trait::type htype; + + static itype output(itype internal) + { + constexpr bitcount_t htypebits = bitcount_t(sizeof(htype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - htypebits; + constexpr bitcount_t wantedopbits = htypebits >= 128 ? 7 + : htypebits >= 64 ? 6 + : htypebits >= 32 ? 5 + : htypebits >= 16 ? 4 + : 3; + constexpr bitcount_t opbits = sparebits >= wantedopbits ? wantedopbits + : sparebits; + constexpr bitcount_t amplifier = wantedopbits - opbits; + constexpr bitcount_t mask = (1 << opbits) - 1; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t xshift = (topspare + htypebits) / 2; + + bitcount_t rot = + opbits ? bitcount_t(internal >> (bits - opbits)) & mask : 0; + bitcount_t amprot = (rot << amplifier) & mask; + internal ^= internal >> xshift; + htype lowbits = htype(internal); + lowbits = rotr(lowbits, amprot); + htype highbits = htype(internal >> topspare); + bitcount_t rot2 = lowbits & mask; + bitcount_t amprot2 = (rot2 << amplifier) & mask; + highbits = rotr(highbits, amprot2); + return (itype(highbits) << topspare) ^ itype(lowbits); + } +}; + + +/* + * XSH -- fixed xorshift (to high bits) + * + * You shouldn't use this at 64-bits or less. + */ + +template +struct xsh_mixin { + static xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t topspare = 0; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + internal ^= internal >> xshift; + xtype result = internal >> bottomspare; + return result; + } +}; + +/* + * XSL -- fixed xorshift (to low bits) + * + * You shouldn't use this at 64-bits or less. + */ + +template +struct xsl_mixin { + inline xtype output(itype internal) + { + constexpr bitcount_t xtypebits = bitcount_t(sizeof(xtype) * 8); + constexpr bitcount_t bits = bitcount_t(sizeof(itype) * 8); + constexpr bitcount_t sparebits = bits - xtypebits; + constexpr bitcount_t topspare = sparebits; + constexpr bitcount_t bottomspare = sparebits - topspare; + constexpr bitcount_t xshift = (topspare + xtypebits) / 2; + + internal ^= internal >> xshift; + xtype result = internal >> bottomspare; + return result; + } +}; + + +/* ---- End of Output Functions ---- */ + + +template +struct inside_out : private baseclass { + inside_out() = delete; + + typedef typename baseclass::result_type result_type; + typedef typename baseclass::state_type state_type; + static_assert(sizeof(result_type) == sizeof(state_type), + "Require a RNG whose output function is a permutation"); + + static bool external_step(result_type& randval, size_t i) + { + state_type state = baseclass::unoutput(randval); + state = state * baseclass::multiplier() + baseclass::increment() + + state_type(i*2); + result_type result = baseclass::output(state); + randval = result; + state_type zero = + baseclass::is_mcg ? state & state_type(3U) : state_type(0U); + return result == zero; + } + + static bool external_advance(result_type& randval, size_t i, + result_type delta, bool forwards = true) + { + state_type state = baseclass::unoutput(randval); + state_type mult = baseclass::multiplier(); + state_type inc = baseclass::increment() + state_type(i*2); + state_type zero = + baseclass::is_mcg ? state & state_type(3U) : state_type(0U); + state_type dist_to_zero = baseclass::distance(state, zero, mult, inc); + bool crosses_zero = + forwards ? dist_to_zero <= delta + : (-dist_to_zero) <= delta; + if (!forwards) + delta = -delta; + state = baseclass::advance(state, delta, mult, inc); + randval = baseclass::output(state); + return crosses_zero; + } +}; + + +template +class extended : public baseclass { +public: + typedef typename baseclass::state_type state_type; + typedef typename baseclass::result_type result_type; + typedef inside_out insideout; + +private: + static constexpr bitcount_t rtypebits = sizeof(result_type)*8; + static constexpr bitcount_t stypebits = sizeof(state_type)*8; + + static constexpr bitcount_t tick_limit_pow2 = 64U; + + static constexpr size_t table_size = 1UL << table_pow2; + static constexpr size_t table_shift = stypebits - table_pow2; + static constexpr state_type table_mask = + (state_type(1U) << table_pow2) - state_type(1U); + + static constexpr bool may_tick = + (advance_pow2 < stypebits) && (advance_pow2 < tick_limit_pow2); + static constexpr size_t tick_shift = stypebits - advance_pow2; + static constexpr state_type tick_mask = + may_tick ? state_type( + (uint64_t(1) << (advance_pow2*may_tick)) - 1) + // ^-- stupidity to appease GCC warnings + : ~state_type(0U); + + static constexpr bool may_tock = stypebits < tick_limit_pow2; + + result_type data_[table_size]; + + PCG_NOINLINE void advance_table(); + + PCG_NOINLINE void advance_table(state_type delta, bool isForwards = true); + + result_type& get_extended_value() + { + state_type state = this->state_; + if (kdd && baseclass::is_mcg) { + // The low order bits of an MCG are constant, so drop them. + state >>= 2; + } + size_t index = kdd ? state & table_mask + : state >> table_shift; + + if (may_tick) { + bool tick = kdd ? (state & tick_mask) == state_type(0u) + : (state >> tick_shift) == state_type(0u); + if (tick) + advance_table(); + } + if (may_tock) { + bool tock = state == state_type(0u); + if (tock) + advance_table(); + } + return data_[index]; + } + +public: + static constexpr size_t period_pow2() + { + return baseclass::period_pow2() + table_size*extvalclass::period_pow2(); + } + + PCG_ALWAYS_INLINE result_type operator()() + { + result_type rhs = get_extended_value(); + result_type lhs = this->baseclass::operator()(); + return lhs ^ rhs; + } + + result_type operator()(result_type upper_bound) + { + return bounded_rand(*this, upper_bound); + } + + void set(result_type wanted) + { + result_type& rhs = get_extended_value(); + result_type lhs = this->baseclass::operator()(); + rhs = lhs ^ wanted; + } + + void advance(state_type distance, bool forwards = true); + + void backstep(state_type distance) + { + advance(distance, false); + } + + extended(const result_type* data) + : baseclass() + { + datainit(data); + } + + extended(const result_type* data, state_type seed) + : baseclass(seed) + { + datainit(data); + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + extended(const result_type* data, state_type seed, + typename bc::stream_state stream_seed) + : baseclass(seed, stream_seed) + { + datainit(data); + } + + extended() + : baseclass() + { + selfinit(); + } + + extended(state_type seed) + : baseclass(seed) + { + selfinit(); + } + + // This function may or may not exist. It thus has to be a template + // to use SFINAE; users don't have to worry about its template-ness. + + template + extended(state_type seed, typename bc::stream_state stream_seed) + : baseclass(seed, stream_seed) + { + selfinit(); + } + +private: + void selfinit(); + void datainit(const result_type* data); + +public: + + template::value + && !std::is_convertible::value>::type> + extended(SeedSeq&& seedSeq) + : baseclass(seedSeq) + { + generate_to(seedSeq, data_); + } + + template + void seed(Args&&... args) + { + new (this) extended(std::forward(args)...); + } + + template + friend bool operator==(const extended&, + const extended&); + + template + friend std::basic_ostream& + operator<<(std::basic_ostream& out, + const extended&); + + template + friend std::basic_istream& + operator>>(std::basic_istream& in, + extended&); + +}; + + +template +void extended::datainit( + const result_type* data) +{ + for (size_t i = 0; i < table_size; ++i) + data_[i] = data[i]; +} + +template +void extended::selfinit() +{ + // We need to fill the extended table with something, and we have + // very little provided data, so we use the base generator to + // produce values. Although not ideal (use a seed sequence, folks!), + // unexpected correlations are mitigated by + // - using XOR differences rather than the number directly + // - the way the table is accessed, its values *won't* be accessed + // in the same order the were written. + // - any strange correlations would only be apparent if we + // were to backstep the generator so that the base generator + // was generating the same values again + result_type lhs = baseclass::operator()(); + result_type rhs = baseclass::operator()(); + result_type xdiff = lhs - rhs; + for (size_t i = 0; i < table_size; ++i) { + data_[i] = baseclass::operator()() ^ xdiff; + } +} + +template +bool operator==(const extended& lhs, + const extended& rhs) +{ + auto& base_lhs = static_cast(lhs); + auto& base_rhs = static_cast(rhs); + return base_lhs == base_rhs + && std::equal( + std::begin(lhs.data_), std::end(lhs.data_), + std::begin(rhs.data_) + ); +} + +template +inline bool operator!=(const extended& lhs, + const extended& rhs) +{ + return !operator==(lhs, rhs); +} + +template +std::basic_ostream& +operator<<(std::basic_ostream& out, + const extended& rng) +{ + using pcg_extras::operator<<; + + auto orig_flags = out.flags(std::ios_base::dec | std::ios_base::left); + auto space = out.widen(' '); + auto orig_fill = out.fill(); + + out << rng.multiplier() << space + << rng.increment() << space + << rng.state_; + + for (const auto& datum : rng.data_) + out << space << datum; + + out.flags(orig_flags); + out.fill(orig_fill); + return out; +} + +template +std::basic_istream& +operator>>(std::basic_istream& in, + extended& rng) +{ + extended new_rng; + auto& base_rng = static_cast(new_rng); + in >> base_rng; + + if (in.fail()) + return in; + + using pcg_extras::operator>>; + + auto orig_flags = in.flags(std::ios_base::dec | std::ios_base::skipws); + + for (auto& datum : new_rng.data_) { + in >> datum; + if (in.fail()) + goto bail; + } + + rng = new_rng; + +bail: + in.flags(orig_flags); + return in; +} + + + +template +void +extended::advance_table() +{ + bool carry = false; + for (size_t i = 0; i < table_size; ++i) { + if (carry) { + carry = insideout::external_step(data_[i],i+1); + } + bool carry2 = insideout::external_step(data_[i],i+1); + carry = carry || carry2; + } +} + +template +void +extended::advance_table( + state_type delta, bool isForwards) +{ + typedef typename baseclass::state_type base_state_t; + typedef typename extvalclass::state_type ext_state_t; + constexpr bitcount_t basebits = sizeof(base_state_t)*8; + constexpr bitcount_t extbits = sizeof(ext_state_t)*8; + static_assert(basebits <= extbits || advance_pow2 > 0, + "Current implementation might overflow its carry"); + + base_state_t carry = 0; + for (size_t i = 0; i < table_size; ++i) { + base_state_t total_delta = carry + delta; + ext_state_t trunc_delta = ext_state_t(total_delta); + if (basebits > extbits) { + carry = total_delta >> extbits; + } else { + carry = 0; + } + carry += + insideout::external_advance(data_[i],i+1, trunc_delta, isForwards); + } +} + +template +void extended::advance( + state_type distance, bool forwards) +{ + static_assert(kdd, + "Efficient advance is too hard for non-kdd extension. " + "For a weak advance, cast to base class"); + state_type zero = + baseclass::is_mcg ? this->state_ & state_type(3U) : state_type(0U); + if (may_tick) { + state_type ticks = distance >> (advance_pow2*may_tick); + // ^-- stupidity to appease GCC + // warnings + state_type adv_mask = + baseclass::is_mcg ? tick_mask << 2 : tick_mask; + state_type next_advance_distance = this->distance(zero, adv_mask); + if (!forwards) + next_advance_distance = (-next_advance_distance) & tick_mask; + if (next_advance_distance < (distance & tick_mask)) { + ++ticks; + } + if (ticks) + advance_table(ticks, forwards); + } + if (forwards) { + if (may_tock && this->distance(zero) <= distance) + advance_table(); + baseclass::advance(distance); + } else { + if (may_tock && -(this->distance(zero)) <= distance) + advance_table(state_type(1U), false); + baseclass::advance(-distance); + } +} + +} // namespace pcg_detail + +namespace pcg_engines { + +using namespace pcg_detail; + +/* Predefined types for XSH RS */ + +typedef oneseq_base oneseq_xsh_rs_16_8; +typedef oneseq_base oneseq_xsh_rs_32_16; +typedef oneseq_base oneseq_xsh_rs_64_32; +typedef oneseq_base oneseq_xsh_rs_128_64; +typedef oneseq_base + cm_oneseq_xsh_rs_128_64; + +typedef unique_base unique_xsh_rs_16_8; +typedef unique_base unique_xsh_rs_32_16; +typedef unique_base unique_xsh_rs_64_32; +typedef unique_base unique_xsh_rs_128_64; +typedef unique_base + cm_unique_xsh_rs_128_64; + +typedef setseq_base setseq_xsh_rs_16_8; +typedef setseq_base setseq_xsh_rs_32_16; +typedef setseq_base setseq_xsh_rs_64_32; +typedef setseq_base setseq_xsh_rs_128_64; +typedef setseq_base + cm_setseq_xsh_rs_128_64; + +typedef mcg_base mcg_xsh_rs_16_8; +typedef mcg_base mcg_xsh_rs_32_16; +typedef mcg_base mcg_xsh_rs_64_32; +typedef mcg_base mcg_xsh_rs_128_64; +typedef mcg_base + cm_mcg_xsh_rs_128_64; + +/* Predefined types for XSH RR */ + +typedef oneseq_base oneseq_xsh_rr_16_8; +typedef oneseq_base oneseq_xsh_rr_32_16; +typedef oneseq_base oneseq_xsh_rr_64_32; +typedef oneseq_base oneseq_xsh_rr_128_64; +typedef oneseq_base + cm_oneseq_xsh_rr_128_64; + +typedef unique_base unique_xsh_rr_16_8; +typedef unique_base unique_xsh_rr_32_16; +typedef unique_base unique_xsh_rr_64_32; +typedef unique_base unique_xsh_rr_128_64; +typedef unique_base + cm_unique_xsh_rr_128_64; + +typedef setseq_base setseq_xsh_rr_16_8; +typedef setseq_base setseq_xsh_rr_32_16; +typedef setseq_base setseq_xsh_rr_64_32; +typedef setseq_base setseq_xsh_rr_128_64; +typedef setseq_base + cm_setseq_xsh_rr_128_64; + +typedef mcg_base mcg_xsh_rr_16_8; +typedef mcg_base mcg_xsh_rr_32_16; +typedef mcg_base mcg_xsh_rr_64_32; +typedef mcg_base mcg_xsh_rr_128_64; +typedef mcg_base + cm_mcg_xsh_rr_128_64; + + +/* Predefined types for RXS M XS */ + +typedef oneseq_base oneseq_rxs_m_xs_8_8; +typedef oneseq_base oneseq_rxs_m_xs_16_16; +typedef oneseq_base oneseq_rxs_m_xs_32_32; +typedef oneseq_base oneseq_rxs_m_xs_64_64; +typedef oneseq_base + oneseq_rxs_m_xs_128_128; +typedef oneseq_base + cm_oneseq_rxs_m_xs_128_128; + +typedef unique_base unique_rxs_m_xs_8_8; +typedef unique_base unique_rxs_m_xs_16_16; +typedef unique_base unique_rxs_m_xs_32_32; +typedef unique_base unique_rxs_m_xs_64_64; +typedef unique_base unique_rxs_m_xs_128_128; +typedef unique_base + cm_unique_rxs_m_xs_128_128; + +typedef setseq_base setseq_rxs_m_xs_8_8; +typedef setseq_base setseq_rxs_m_xs_16_16; +typedef setseq_base setseq_rxs_m_xs_32_32; +typedef setseq_base setseq_rxs_m_xs_64_64; +typedef setseq_base setseq_rxs_m_xs_128_128; +typedef setseq_base + cm_setseq_rxs_m_xs_128_128; + + // MCG versions don't make sense here, so aren't defined. + +/* Predefined types for RXS M */ + +typedef oneseq_base oneseq_rxs_m_16_8; +typedef oneseq_base oneseq_rxs_m_32_16; +typedef oneseq_base oneseq_rxs_m_64_32; +typedef oneseq_base oneseq_rxs_m_128_64; +typedef oneseq_base + cm_oneseq_rxs_m_128_64; + +typedef unique_base unique_rxs_m_16_8; +typedef unique_base unique_rxs_m_32_16; +typedef unique_base unique_rxs_m_64_32; +typedef unique_base unique_rxs_m_128_64; +typedef unique_base + cm_unique_rxs_m_128_64; + +typedef setseq_base setseq_rxs_m_16_8; +typedef setseq_base setseq_rxs_m_32_16; +typedef setseq_base setseq_rxs_m_64_32; +typedef setseq_base setseq_rxs_m_128_64; +typedef setseq_base + cm_setseq_rxs_m_128_64; + +typedef mcg_base mcg_rxs_m_16_8; +typedef mcg_base mcg_rxs_m_32_16; +typedef mcg_base mcg_rxs_m_64_32; +typedef mcg_base mcg_rxs_m_128_64; +typedef mcg_base + cm_mcg_rxs_m_128_64; + +/* Predefined types for DXSM */ + +typedef oneseq_base oneseq_dxsm_16_8; +typedef oneseq_base oneseq_dxsm_32_16; +typedef oneseq_base oneseq_dxsm_64_32; +typedef oneseq_base oneseq_dxsm_128_64; +typedef oneseq_base + cm_oneseq_dxsm_128_64; + +typedef unique_base unique_dxsm_16_8; +typedef unique_base unique_dxsm_32_16; +typedef unique_base unique_dxsm_64_32; +typedef unique_base unique_dxsm_128_64; +typedef unique_base + cm_unique_dxsm_128_64; + +typedef setseq_base setseq_dxsm_16_8; +typedef setseq_base setseq_dxsm_32_16; +typedef setseq_base setseq_dxsm_64_32; +typedef setseq_base setseq_dxsm_128_64; +typedef setseq_base + cm_setseq_dxsm_128_64; + +typedef mcg_base mcg_dxsm_16_8; +typedef mcg_base mcg_dxsm_32_16; +typedef mcg_base mcg_dxsm_64_32; +typedef mcg_base mcg_dxsm_128_64; +typedef mcg_base + cm_mcg_dxsm_128_64; + +/* Predefined types for XSL RR (only defined for "large" types) */ + +typedef oneseq_base oneseq_xsl_rr_64_32; +typedef oneseq_base oneseq_xsl_rr_128_64; +typedef oneseq_base + cm_oneseq_xsl_rr_128_64; + +typedef unique_base unique_xsl_rr_64_32; +typedef unique_base unique_xsl_rr_128_64; +typedef unique_base + cm_unique_xsl_rr_128_64; + +typedef setseq_base setseq_xsl_rr_64_32; +typedef setseq_base setseq_xsl_rr_128_64; +typedef setseq_base + cm_setseq_xsl_rr_128_64; + +typedef mcg_base mcg_xsl_rr_64_32; +typedef mcg_base mcg_xsl_rr_128_64; +typedef mcg_base + cm_mcg_xsl_rr_128_64; + + +/* Predefined types for XSL RR RR (only defined for "large" types) */ + +typedef oneseq_base + oneseq_xsl_rr_rr_64_64; +typedef oneseq_base + oneseq_xsl_rr_rr_128_128; +typedef oneseq_base + cm_oneseq_xsl_rr_rr_128_128; + +typedef unique_base + unique_xsl_rr_rr_64_64; +typedef unique_base + unique_xsl_rr_rr_128_128; +typedef unique_base + cm_unique_xsl_rr_rr_128_128; + +typedef setseq_base + setseq_xsl_rr_rr_64_64; +typedef setseq_base + setseq_xsl_rr_rr_128_128; +typedef setseq_base + cm_setseq_xsl_rr_rr_128_128; + + // MCG versions don't make sense here, so aren't defined. + +/* Extended generators */ + +template +using ext_std8 = extended; + +template +using ext_std16 = extended; + +template +using ext_std32 = extended; + +template +using ext_std64 = extended; + + +template +using ext_oneseq_rxs_m_xs_32_32 = + ext_std32; + +template +using ext_mcg_xsh_rs_64_32 = + ext_std32; + +template +using ext_oneseq_xsh_rs_64_32 = + ext_std32; + +template +using ext_setseq_xsh_rr_64_32 = + ext_std32; + +template +using ext_mcg_xsl_rr_128_64 = + ext_std64; + +template +using ext_oneseq_xsl_rr_128_64 = + ext_std64; + +template +using ext_setseq_xsl_rr_128_64 = + ext_std64; + +} // namespace pcg_engines + +typedef pcg_engines::setseq_xsh_rr_64_32 pcg32; +typedef pcg_engines::oneseq_xsh_rr_64_32 pcg32_oneseq; +typedef pcg_engines::unique_xsh_rr_64_32 pcg32_unique; +typedef pcg_engines::mcg_xsh_rs_64_32 pcg32_fast; + +typedef pcg_engines::setseq_xsl_rr_128_64 pcg64; +typedef pcg_engines::oneseq_xsl_rr_128_64 pcg64_oneseq; +typedef pcg_engines::unique_xsl_rr_128_64 pcg64_unique; +typedef pcg_engines::mcg_xsl_rr_128_64 pcg64_fast; + +typedef pcg_engines::setseq_rxs_m_xs_8_8 pcg8_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_16_16 pcg16_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_32_32 pcg32_once_insecure; +typedef pcg_engines::setseq_rxs_m_xs_64_64 pcg64_once_insecure; +typedef pcg_engines::setseq_xsl_rr_rr_128_128 pcg128_once_insecure; + +typedef pcg_engines::oneseq_rxs_m_xs_8_8 pcg8_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_16_16 pcg16_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_32_32 pcg32_oneseq_once_insecure; +typedef pcg_engines::oneseq_rxs_m_xs_64_64 pcg64_oneseq_once_insecure; +typedef pcg_engines::oneseq_xsl_rr_rr_128_128 pcg128_oneseq_once_insecure; + + +// These two extended RNGs provide two-dimensionally equidistributed +// 32-bit generators. pcg32_k2_fast occupies the same space as pcg64, +// and can be called twice to generate 64 bits, but does not required +// 128-bit math; on 32-bit systems, it's faster than pcg64 as well. + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<1,16,true> pcg32_k2; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<1,32,true> pcg32_k2_fast; + +// These eight extended RNGs have about as much state as arc4random +// +// - the k variants are k-dimensionally equidistributed +// - the c variants offer better crypographic security +// +// (just how good the cryptographic security is is an open question) + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<6,16,true> pcg32_k64; +typedef pcg_engines::ext_mcg_xsh_rs_64_32<6,32,true> pcg32_k64_oneseq; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<6,32,true> pcg32_k64_fast; + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<6,16,false> pcg32_c64; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<6,32,false> pcg32_c64_oneseq; +typedef pcg_engines::ext_mcg_xsh_rs_64_32<6,32,false> pcg32_c64_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<5,16,true> pcg64_k32; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<5,128,true> pcg64_k32_oneseq; +typedef pcg_engines::ext_mcg_xsl_rr_128_64<5,128,true> pcg64_k32_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<5,16,false> pcg64_c32; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<5,128,false> pcg64_c32_oneseq; +typedef pcg_engines::ext_mcg_xsl_rr_128_64<5,128,false> pcg64_c32_fast; + +// These eight extended RNGs have more state than the Mersenne twister +// +// - the k variants are k-dimensionally equidistributed +// - the c variants offer better crypographic security +// +// (just how good the cryptographic security is is an open question) + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<10,16,true> pcg32_k1024; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<10,32,true> pcg32_k1024_fast; + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<10,16,false> pcg32_c1024; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<10,32,false> pcg32_c1024_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<10,16,true> pcg64_k1024; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<10,128,true> pcg64_k1024_fast; + +typedef pcg_engines::ext_setseq_xsl_rr_128_64<10,16,false> pcg64_c1024; +typedef pcg_engines::ext_oneseq_xsl_rr_128_64<10,128,false> pcg64_c1024_fast; + +// These generators have an insanely huge period (2^524352), and is suitable +// for silly party tricks, such as dumping out 64 KB ZIP files at an arbitrary +// point in the future. [Actually, over the full period of the generator, it +// will produce every 64 KB ZIP file 2^64 times!] + +typedef pcg_engines::ext_setseq_xsh_rr_64_32<14,16,true> pcg32_k16384; +typedef pcg_engines::ext_oneseq_xsh_rs_64_32<14,32,true> pcg32_k16384_fast; + +} // namespace arrow_vendored + +#ifdef _MSC_VER + #pragma warning(default:4146) +#endif + +#endif // PCG_RAND_HPP_INCLUDED diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0181e69e4ef37fc279e4d067fd42d0cedacb9126 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp @@ -0,0 +1,1008 @@ +/* + * PCG Random Number Generation for C++ + * + * Copyright 2014-2021 Melissa O'Neill , + * and the PCG Project contributors. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + * Licensed under the Apache License, Version 2.0 (provided in + * LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) + * or under the MIT license (provided in LICENSE-MIT.txt and at + * http://opensource.org/licenses/MIT), at your option. This file may not + * be copied, modified, or distributed except according to those terms. + * + * Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either + * express or implied. See your chosen license for details. + * + * For additional information about the PCG random number generation scheme, + * visit http://www.pcg-random.org/. + */ + +/* + * This code provides a a C++ class that can provide 128-bit (or higher) + * integers. To produce 2K-bit integers, it uses two K-bit integers, + * placed in a union that allowes the code to also see them as four K/2 bit + * integers (and access them either directly name, or by index). + * + * It may seem like we're reinventing the wheel here, because several + * libraries already exist that support large integers, but most existing + * libraries provide a very generic multiprecision code, but here we're + * operating at a fixed size. Also, most other libraries are fairly + * heavyweight. So we use a direct implementation. Sadly, it's much slower + * than hand-coded assembly or direct CPU support. + */ + +#ifndef PCG_UINT128_HPP_INCLUDED +#define PCG_UINT128_HPP_INCLUDED 1 + +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) // Use MSVC++ intrinsics +#include +#endif + +/* + * We want to lay the type out the same way that a native type would be laid + * out, which means we must know the machine's endian, at compile time. + * This ugliness attempts to do so. + */ + +#ifndef PCG_LITTLE_ENDIAN + #if defined(__BYTE_ORDER__) + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #define PCG_LITTLE_ENDIAN 1 + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #define PCG_LITTLE_ENDIAN 0 + #else + #error __BYTE_ORDER__ does not match a standard endian, pick a side + #endif + #elif __LITTLE_ENDIAN__ || _LITTLE_ENDIAN + #define PCG_LITTLE_ENDIAN 1 + #elif __BIG_ENDIAN__ || _BIG_ENDIAN + #define PCG_LITTLE_ENDIAN 0 + #elif __x86_64 || __x86_64__ || _M_X64 || __i386 || __i386__ || _M_IX86 + #define PCG_LITTLE_ENDIAN 1 + #elif __powerpc__ || __POWERPC__ || __ppc__ || __PPC__ \ + || __m68k__ || __mc68000__ + #define PCG_LITTLE_ENDIAN 0 + #else + #error Unable to determine target endianness + #endif +#endif + +#if INTPTR_MAX == INT64_MAX && !defined(PCG_64BIT_SPECIALIZATIONS) + #define PCG_64BIT_SPECIALIZATIONS 1 +#endif + +namespace arrow_vendored { +namespace pcg_extras { + +// Recent versions of GCC have intrinsics we can use to quickly calculate +// the number of leading and trailing zeros in a number. If possible, we +// use them, otherwise we fall back to old-fashioned bit twiddling to figure +// them out. + +#ifndef PCG_BITCOUNT_T + typedef uint8_t bitcount_t; +#else + typedef PCG_BITCOUNT_T bitcount_t; +#endif + +/* + * Provide some useful helper functions + * * flog2 floor(log2(x)) + * * trailingzeros number of trailing zero bits + */ + +#if defined(__GNUC__) // Any GNU-compatible compiler supporting C++11 has + // some useful intrinsics we can use. + +inline bitcount_t flog2(uint32_t v) +{ + return 31 - __builtin_clz(v); +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + return __builtin_ctz(v); +} + +inline bitcount_t flog2(uint64_t v) +{ +#if UINT64_MAX == ULONG_MAX + return 63 - __builtin_clzl(v); +#elif UINT64_MAX == ULLONG_MAX + return 63 - __builtin_clzll(v); +#else + #error Cannot find a function for uint64_t +#endif +} + +inline bitcount_t trailingzeros(uint64_t v) +{ +#if UINT64_MAX == ULONG_MAX + return __builtin_ctzl(v); +#elif UINT64_MAX == ULLONG_MAX + return __builtin_ctzll(v); +#else + #error Cannot find a function for uint64_t +#endif +} + +#elif defined(_MSC_VER) // Use MSVC++ intrinsics + +#pragma intrinsic(_BitScanReverse, _BitScanForward) +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) +#pragma intrinsic(_BitScanReverse64, _BitScanForward64) +#endif + +inline bitcount_t flog2(uint32_t v) +{ + unsigned long i; + _BitScanReverse(&i, v); + return bitcount_t(i); +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + unsigned long i; + _BitScanForward(&i, v); + return bitcount_t(i); +} + +inline bitcount_t flog2(uint64_t v) +{ +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) + unsigned long i; + _BitScanReverse64(&i, v); + return bitcount_t(i); +#else + // 32-bit x86 + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + return high ? 32+flog2(high) : flog2(low); +#endif +} + +inline bitcount_t trailingzeros(uint64_t v) +{ +#if defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64) + unsigned long i; + _BitScanForward64(&i, v); + return bitcount_t(i); +#else + // 32-bit x86 + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + return low ? trailingzeros(low) : trailingzeros(high)+32; +#endif +} + +#else // Otherwise, we fall back to bit twiddling + // implementations + +inline bitcount_t flog2(uint32_t v) +{ + // Based on code by Eric Cole and Mark Dickinson, which appears at + // https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn + + static const uint8_t multiplyDeBruijnBitPos[32] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + + v |= v >> 1; // first round down to one less than a power of 2 + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + + return multiplyDeBruijnBitPos[(uint32_t)(v * 0x07C4ACDDU) >> 27]; +} + +inline bitcount_t trailingzeros(uint32_t v) +{ + static const uint8_t multiplyDeBruijnBitPos[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + + return multiplyDeBruijnBitPos[((uint32_t)((v & -v) * 0x077CB531U)) >> 27]; +} + +inline bitcount_t flog2(uint64_t v) +{ + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + + return high ? 32+flog2(high) : flog2(low); +} + +inline bitcount_t trailingzeros(uint64_t v) +{ + uint32_t high = v >> 32; + uint32_t low = uint32_t(v); + + return low ? trailingzeros(low) : trailingzeros(high)+32; +} + +#endif + +inline bitcount_t flog2(uint8_t v) +{ + return flog2(uint32_t(v)); +} + +inline bitcount_t flog2(uint16_t v) +{ + return flog2(uint32_t(v)); +} + +#if __SIZEOF_INT128__ +inline bitcount_t flog2(__uint128_t v) +{ + uint64_t high = uint64_t(v >> 64); + uint64_t low = uint64_t(v); + + return high ? 64+flog2(high) : flog2(low); +} +#endif + +inline bitcount_t trailingzeros(uint8_t v) +{ + return trailingzeros(uint32_t(v)); +} + +inline bitcount_t trailingzeros(uint16_t v) +{ + return trailingzeros(uint32_t(v)); +} + +#if __SIZEOF_INT128__ +inline bitcount_t trailingzeros(__uint128_t v) +{ + uint64_t high = uint64_t(v >> 64); + uint64_t low = uint64_t(v); + return low ? trailingzeros(low) : trailingzeros(high)+64; +} +#endif + +template +inline bitcount_t clog2(UInt v) +{ + return flog2(v) + ((v & (-v)) != v); +} + +template +inline UInt addwithcarry(UInt x, UInt y, bool carryin, bool* carryout) +{ + UInt half_result = y + carryin; + UInt result = x + half_result; + *carryout = (half_result < y) || (result < x); + return result; +} + +template +inline UInt subwithcarry(UInt x, UInt y, bool carryin, bool* carryout) +{ + UInt half_result = y + carryin; + UInt result = x - half_result; + *carryout = (half_result < y) || (result > x); + return result; +} + + +template +class uint_x4 { +// private: + static constexpr unsigned int UINT_BITS = sizeof(UInt) * CHAR_BIT; +public: + union { +#if PCG_LITTLE_ENDIAN + struct { + UInt v0, v1, v2, v3; + } w; + struct { + UIntX2 v01, v23; + } d; +#else + struct { + UInt v3, v2, v1, v0; + } w; + struct { + UIntX2 v23, v01; + } d; +#endif + // For the array access versions, the code that uses the array + // must handle endian itself. Yuck. + UInt wa[4]; + }; + +public: + uint_x4() = default; + + constexpr uint_x4(UInt v3, UInt v2, UInt v1, UInt v0) +#if PCG_LITTLE_ENDIAN + : w{v0, v1, v2, v3} +#else + : w{v3, v2, v1, v0} +#endif + { + // Nothing (else) to do + } + + constexpr uint_x4(UIntX2 v23, UIntX2 v01) +#if PCG_LITTLE_ENDIAN + : d{v01,v23} +#else + : d{v23,v01} +#endif + { + // Nothing (else) to do + } + + constexpr uint_x4(UIntX2 v01) +#if PCG_LITTLE_ENDIAN + : d{v01, UIntX2(0)} +#else + : d{UIntX2(0),v01} +#endif + { + // Nothing (else) to do + } + + template::value + && sizeof(Integral) <= sizeof(UIntX2)) + >::type* = nullptr> + constexpr uint_x4(Integral v01) +#if PCG_LITTLE_ENDIAN + : d{UIntX2(v01), UIntX2(0)} +#else + : d{UIntX2(0), UIntX2(v01)} +#endif + { + // Nothing (else) to do + } + + explicit constexpr operator UIntX2() const + { + return d.v01; + } + + template::value + && sizeof(Integral) <= sizeof(UIntX2)) + >::type* = nullptr> + explicit constexpr operator Integral() const + { + return Integral(d.v01); + } + + explicit constexpr operator bool() const + { + return d.v01 || d.v23; + } + + template + friend uint_x4 operator*(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator*(const uint_x4&, V); + + template + friend std::pair< uint_x4,uint_x4 > + divmod(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator+(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator-(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator<<(const uint_x4&, const bitcount_t shift); + + template + friend uint_x4 operator>>(const uint_x4&, const bitcount_t shift); + +#if PCG_64BIT_SPECIALIZATIONS + template + friend uint_x4 operator<<(const uint_x4&, const bitcount_t shift); + + template + friend uint_x4 operator>>(const uint_x4&, const bitcount_t shift); +#endif + + template + friend uint_x4 operator&(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator|(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator^(const uint_x4&, const uint_x4&); + + template + friend bool operator==(const uint_x4&, const uint_x4&); + + template + friend bool operator!=(const uint_x4&, const uint_x4&); + + template + friend bool operator<(const uint_x4&, const uint_x4&); + + template + friend bool operator<=(const uint_x4&, const uint_x4&); + + template + friend bool operator>(const uint_x4&, const uint_x4&); + + template + friend bool operator>=(const uint_x4&, const uint_x4&); + + template + friend uint_x4 operator~(const uint_x4&); + + template + friend uint_x4 operator-(const uint_x4&); + + template + friend bitcount_t flog2(const uint_x4&); + + template + friend bitcount_t trailingzeros(const uint_x4&); + +#if PCG_64BIT_SPECIALIZATIONS + template + friend bitcount_t flog2(const uint_x4&); + + template + friend bitcount_t trailingzeros(const uint_x4&); +#endif + + uint_x4& operator*=(const uint_x4& rhs) + { + uint_x4 result = *this * rhs; + return *this = result; + } + + uint_x4& operator*=(UIntX2 rhs) + { + uint_x4 result = *this * rhs; + return *this = result; + } + + uint_x4& operator/=(const uint_x4& rhs) + { + uint_x4 result = *this / rhs; + return *this = result; + } + + uint_x4& operator%=(const uint_x4& rhs) + { + uint_x4 result = *this % rhs; + return *this = result; + } + + uint_x4& operator+=(const uint_x4& rhs) + { + uint_x4 result = *this + rhs; + return *this = result; + } + + uint_x4& operator-=(const uint_x4& rhs) + { + uint_x4 result = *this - rhs; + return *this = result; + } + + uint_x4& operator&=(const uint_x4& rhs) + { + uint_x4 result = *this & rhs; + return *this = result; + } + + uint_x4& operator|=(const uint_x4& rhs) + { + uint_x4 result = *this | rhs; + return *this = result; + } + + uint_x4& operator^=(const uint_x4& rhs) + { + uint_x4 result = *this ^ rhs; + return *this = result; + } + + uint_x4& operator>>=(bitcount_t shift) + { + uint_x4 result = *this >> shift; + return *this = result; + } + + uint_x4& operator<<=(bitcount_t shift) + { + uint_x4 result = *this << shift; + return *this = result; + } + +}; + +template +bitcount_t flog2(const uint_x4& v) +{ +#if PCG_LITTLE_ENDIAN + for (uint8_t i = 4; i !=0; /* dec in loop */) { + --i; +#else + for (uint8_t i = 0; i < 4; ++i) { +#endif + if (v.wa[i] == 0) + continue; + return flog2(v.wa[i]) + uint_x4::UINT_BITS*i; + } + abort(); +} + +template +bitcount_t trailingzeros(const uint_x4& v) +{ +#if PCG_LITTLE_ENDIAN + for (uint8_t i = 0; i < 4; ++i) { +#else + for (uint8_t i = 4; i !=0; /* dec in loop */) { + --i; +#endif + if (v.wa[i] != 0) + return trailingzeros(v.wa[i]) + uint_x4::UINT_BITS*i; + } + return uint_x4::UINT_BITS*4; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +bitcount_t flog2(const uint_x4& v) +{ + return v.d.v23 > 0 ? flog2(v.d.v23) + uint_x4::UINT_BITS*2 + : flog2(v.d.v01); +} + +template +bitcount_t trailingzeros(const uint_x4& v) +{ + return v.d.v01 == 0 ? trailingzeros(v.d.v23) + uint_x4::UINT_BITS*2 + : trailingzeros(v.d.v01); +} +#endif + +template +std::pair< uint_x4, uint_x4 > + divmod(const uint_x4& orig_dividend, + const uint_x4& divisor) +{ + // If the dividend is less than the divisor, the answer is always zero. + // This takes care of boundary cases like 0/x (which would otherwise be + // problematic because we can't take the log of zero. (The boundary case + // of division by zero is undefined.) + if (orig_dividend < divisor) + return { uint_x4(UIntX2(0)), orig_dividend }; + + auto dividend = orig_dividend; + + auto log2_divisor = flog2(divisor); + auto log2_dividend = flog2(dividend); + // assert(log2_dividend >= log2_divisor); + bitcount_t logdiff = log2_dividend - log2_divisor; + + constexpr uint_x4 ONE(UIntX2(1)); + if (logdiff == 0) + return { ONE, dividend - divisor }; + + // Now we change the log difference to + // floor(log2(divisor)) - ceil(log2(dividend)) + // to ensure that we *underestimate* the result. + logdiff -= 1; + + uint_x4 quotient(UIntX2(0)); + + auto qfactor = ONE << logdiff; + auto factor = divisor << logdiff; + + do { + dividend -= factor; + quotient += qfactor; + while (dividend < factor) { + factor >>= 1; + qfactor >>= 1; + } + } while (dividend >= divisor); + + return { quotient, dividend }; +} + +template +uint_x4 operator/(const uint_x4& dividend, + const uint_x4& divisor) +{ + return divmod(dividend, divisor).first; +} + +template +uint_x4 operator%(const uint_x4& dividend, + const uint_x4& divisor) +{ + return divmod(dividend, divisor).second; +} + + +template +uint_x4 operator*(const uint_x4& a, + const uint_x4& b) +{ + constexpr auto UINT_BITS = uint_x4::UINT_BITS; + uint_x4 r = {0U, 0U, 0U, 0U}; + bool carryin = false; + bool carryout; + UIntX2 a0b0 = UIntX2(a.w.v0) * UIntX2(b.w.v0); + r.w.v0 = UInt(a0b0); + r.w.v1 = UInt(a0b0 >> UINT_BITS); + + UIntX2 a1b0 = UIntX2(a.w.v1) * UIntX2(b.w.v0); + r.w.v2 = UInt(a1b0 >> UINT_BITS); + r.w.v1 = addwithcarry(r.w.v1, UInt(a1b0), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a0b1 = UIntX2(a.w.v0) * UIntX2(b.w.v1); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a0b1 >> UINT_BITS), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + carryin = false; + r.w.v1 = addwithcarry(r.w.v1, UInt(a0b1), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a1b1 = UIntX2(a.w.v1) * UIntX2(b.w.v1); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a1b1), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(a1b1 >> UINT_BITS), carryin, &carryout); + + r.d.v23 += a.d.v01 * b.d.v23 + a.d.v23 * b.d.v01; + + return r; +} + + +template +uint_x4 operator*(const uint_x4& a, + UIntX2 b01) +{ + constexpr auto UINT_BITS = uint_x4::UINT_BITS; + uint_x4 r = {0U, 0U, 0U, 0U}; + bool carryin = false; + bool carryout; + UIntX2 a0b0 = UIntX2(a.w.v0) * UIntX2(UInt(b01)); + r.w.v0 = UInt(a0b0); + r.w.v1 = UInt(a0b0 >> UINT_BITS); + + UIntX2 a1b0 = UIntX2(a.w.v1) * UIntX2(UInt(b01)); + r.w.v2 = UInt(a1b0 >> UINT_BITS); + r.w.v1 = addwithcarry(r.w.v1, UInt(a1b0), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a0b1 = UIntX2(a.w.v0) * UIntX2(b01 >> UINT_BITS); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a0b1 >> UINT_BITS), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + carryin = false; + r.w.v1 = addwithcarry(r.w.v1, UInt(a0b1), carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(r.w.v2, UInt(0U), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(0U), carryin, &carryout); + + UIntX2 a1b1 = UIntX2(a.w.v1) * UIntX2(b01 >> UINT_BITS); + carryin = false; + r.w.v2 = addwithcarry(r.w.v2, UInt(a1b1), carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(r.w.v3, UInt(a1b1 >> UINT_BITS), carryin, &carryout); + + r.d.v23 += a.d.v23 * b01; + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +#if defined(_MSC_VER) +#pragma intrinsic(_umul128) +#endif + +#if defined(_MSC_VER) || __SIZEOF_INT128__ +template +uint_x4 operator*(const uint_x4& a, + const uint_x4& b) +{ +#if defined(_MSC_VER) + uint64_t hi; + uint64_t lo = _umul128(a.d.v01, b.d.v01, &hi); +#else + __uint128_t r = __uint128_t(a.d.v01) * __uint128_t(b.d.v01); + uint64_t lo = uint64_t(r); + uint64_t hi = r >> 64; +#endif + hi += a.d.v23 * b.d.v01 + a.d.v01 * b.d.v23; + return {hi, lo}; +} +#endif +#endif + + +template +uint_x4 operator+(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + + bool carryin = false; + bool carryout; + r.w.v0 = addwithcarry(a.w.v0, b.w.v0, carryin, &carryout); + carryin = carryout; + r.w.v1 = addwithcarry(a.w.v1, b.w.v1, carryin, &carryout); + carryin = carryout; + r.w.v2 = addwithcarry(a.w.v2, b.w.v2, carryin, &carryout); + carryin = carryout; + r.w.v3 = addwithcarry(a.w.v3, b.w.v3, carryin, &carryout); + + return r; +} + +template +uint_x4 operator-(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + + bool carryin = false; + bool carryout; + r.w.v0 = subwithcarry(a.w.v0, b.w.v0, carryin, &carryout); + carryin = carryout; + r.w.v1 = subwithcarry(a.w.v1, b.w.v1, carryin, &carryout); + carryin = carryout; + r.w.v2 = subwithcarry(a.w.v2, b.w.v2, carryin, &carryout); + carryin = carryout; + r.w.v3 = subwithcarry(a.w.v3, b.w.v3, carryin, &carryout); + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +uint_x4 operator+(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {uint64_t(0u), uint64_t(0u)}; + + bool carryin = false; + bool carryout; + r.d.v01 = addwithcarry(a.d.v01, b.d.v01, carryin, &carryout); + carryin = carryout; + r.d.v23 = addwithcarry(a.d.v23, b.d.v23, carryin, &carryout); + + return r; +} + +template +uint_x4 operator-(const uint_x4& a, + const uint_x4& b) +{ + uint_x4 r = {uint64_t(0u), uint64_t(0u)}; + + bool carryin = false; + bool carryout; + r.d.v01 = subwithcarry(a.d.v01, b.d.v01, carryin, &carryout); + carryin = carryout; + r.d.v23 = subwithcarry(a.d.v23, b.d.v23, carryin, &carryout); + + return r; +} +#endif + +template +uint_x4 operator&(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 & b.d.v23, a.d.v01 & b.d.v01); +} + +template +uint_x4 operator|(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 | b.d.v23, a.d.v01 | b.d.v01); +} + +template +uint_x4 operator^(const uint_x4& a, + const uint_x4& b) +{ + return uint_x4(a.d.v23 ^ b.d.v23, a.d.v01 ^ b.d.v01); +} + +template +uint_x4 operator~(const uint_x4& v) +{ + return uint_x4(~v.d.v23, ~v.d.v01); +} + +template +uint_x4 operator-(const uint_x4& v) +{ + return uint_x4(0UL,0UL) - v; +} + +template +bool operator==(const uint_x4& a, const uint_x4& b) +{ + return (a.d.v01 == b.d.v01) && (a.d.v23 == b.d.v23); +} + +template +bool operator!=(const uint_x4& a, const uint_x4& b) +{ + return !operator==(a,b); +} + + +template +bool operator<(const uint_x4& a, const uint_x4& b) +{ + return (a.d.v23 < b.d.v23) + || ((a.d.v23 == b.d.v23) && (a.d.v01 < b.d.v01)); +} + +template +bool operator>(const uint_x4& a, const uint_x4& b) +{ + return operator<(b,a); +} + +template +bool operator<=(const uint_x4& a, const uint_x4& b) +{ + return !(operator<(b,a)); +} + +template +bool operator>=(const uint_x4& a, const uint_x4& b) +{ + return !(operator<(a,b)); +} + + + +template +uint_x4 operator<<(const uint_x4& v, + const bitcount_t shift) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + const bitcount_t bits = uint_x4::UINT_BITS; + const bitcount_t bitmask = bits - 1; + const bitcount_t shiftdiv = shift / bits; + const bitcount_t shiftmod = shift & bitmask; + + if (shiftmod) { + UInt carryover = 0; +#if PCG_LITTLE_ENDIAN + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#else + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#endif + r.wa[out] = (v.wa[in] << shiftmod) | carryover; + carryover = (v.wa[in] >> (bits - shiftmod)); + } + } else { +#if PCG_LITTLE_ENDIAN + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#else + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#endif + r.wa[out] = v.wa[in]; + } + } + + return r; +} + +template +uint_x4 operator>>(const uint_x4& v, + const bitcount_t shift) +{ + uint_x4 r = {0U, 0U, 0U, 0U}; + const bitcount_t bits = uint_x4::UINT_BITS; + const bitcount_t bitmask = bits - 1; + const bitcount_t shiftdiv = shift / bits; + const bitcount_t shiftmod = shift & bitmask; + + if (shiftmod) { + UInt carryover = 0; +#if PCG_LITTLE_ENDIAN + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#else + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#endif + r.wa[out] = (v.wa[in] >> shiftmod) | carryover; + carryover = (v.wa[in] << (bits - shiftmod)); + } + } else { +#if PCG_LITTLE_ENDIAN + for (uint8_t out = 4-shiftdiv, in = 4; out != 0; /* dec in loop */) { + --out, --in; +#else + for (uint8_t out = shiftdiv, in = 0; out < 4; ++out, ++in) { +#endif + r.wa[out] = v.wa[in]; + } + } + + return r; +} + +#if PCG_64BIT_SPECIALIZATIONS +template +uint_x4 operator<<(const uint_x4& v, + const bitcount_t shift) +{ + constexpr bitcount_t bits2 = uint_x4::UINT_BITS * 2; + + if (shift >= bits2) { + return {v.d.v01 << (shift-bits2), uint64_t(0u)}; + } else { + return {shift ? (v.d.v23 << shift) | (v.d.v01 >> (bits2-shift)) + : v.d.v23, + v.d.v01 << shift}; + } +} + +template +uint_x4 operator>>(const uint_x4& v, + const bitcount_t shift) +{ + constexpr bitcount_t bits2 = uint_x4::UINT_BITS * 2; + + if (shift >= bits2) { + return {uint64_t(0u), v.d.v23 >> (shift-bits2)}; + } else { + return {v.d.v23 >> shift, + shift ? (v.d.v01 >> shift) | (v.d.v23 << (bits2-shift)) + : v.d.v01}; + } +} +#endif + +} // namespace pcg_extras +} // namespace arrow_vendored + +#endif // PCG_UINT128_HPP_INCLUDED diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h new file mode 100644 index 0000000000000000000000000000000000000000..6d039064d6ab3f56a2215ae50d1b0286db61ddfe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h @@ -0,0 +1,83 @@ +/* Debugging assertions and traps + * Portable Snippets - https://github.com/nemequ/portable-snippets + * Created by Evan Nemerson + * + * To the extent possible under law, the authors have waived all + * copyright and related or neighboring rights to this code. For + * details, see the Creative Commons Zero 1.0 Universal license at + * https://creativecommons.org/publicdomain/zero/1.0/ + */ + +#if !defined(PSNIP_DEBUG_TRAP_H) +#define PSNIP_DEBUG_TRAP_H + +#if !defined(PSNIP_NDEBUG) && defined(NDEBUG) && !defined(PSNIP_DEBUG) +# define PSNIP_NDEBUG 1 +#endif + +#if defined(__has_builtin) && !defined(__ibmxl__) +# if __has_builtin(__builtin_debugtrap) +# define psnip_trap() __builtin_debugtrap() +# elif __has_builtin(__debugbreak) +# define psnip_trap() __debugbreak() +# endif +#endif +#if !defined(psnip_trap) +# if defined(_MSC_VER) || defined(__INTEL_COMPILER) +# define psnip_trap() __debugbreak() +# elif defined(__ARMCC_VERSION) +# define psnip_trap() __breakpoint(42) +# elif defined(__ibmxl__) || defined(__xlC__) +# include +# define psnip_trap() __trap(42) +# elif defined(__DMC__) && defined(_M_IX86) + static inline void psnip_trap(void) { __asm int 3h; } +# elif defined(__i386__) || defined(__x86_64__) + static inline void psnip_trap(void) { __asm__ __volatile__("int $03"); } +# elif defined(__thumb__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xde01"); } +# elif defined(__aarch64__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xd4200000"); } +# elif defined(__arm__) + static inline void psnip_trap(void) { __asm__ __volatile__(".inst 0xe7f001f0"); } +# elif defined (__alpha__) && !defined(__osf__) + static inline void psnip_trap(void) { __asm__ __volatile__("bpt"); } +# elif defined(_54_) + static inline void psnip_trap(void) { __asm__ __volatile__("ESTOP"); } +# elif defined(_55_) + static inline void psnip_trap(void) { __asm__ __volatile__(";\n .if (.MNEMONIC)\n ESTOP_1\n .else\n ESTOP_1()\n .endif\n NOP"); } +# elif defined(_64P_) + static inline void psnip_trap(void) { __asm__ __volatile__("SWBP 0"); } +# elif defined(_6x_) + static inline void psnip_trap(void) { __asm__ __volatile__("NOP\n .word 0x10000000"); } +# elif defined(__STDC_HOSTED__) && (__STDC_HOSTED__ == 0) && defined(__GNUC__) +# define psnip_trap() __builtin_trap() +# else +# include +# if defined(SIGTRAP) +# define psnip_trap() raise(SIGTRAP) +# else +# define psnip_trap() raise(SIGABRT) +# endif +# endif +#endif + +#if defined(HEDLEY_LIKELY) +# define PSNIP_DBG_LIKELY(expr) HEDLEY_LIKELY(expr) +#elif defined(__GNUC__) && (__GNUC__ >= 3) +# define PSNIP_DBG_LIKELY(expr) __builtin_expect(!!(expr), 1) +#else +# define PSNIP_DBG_LIKELY(expr) (!!(expr)) +#endif + +#if !defined(PSNIP_NDEBUG) || (PSNIP_NDEBUG == 0) +# define psnip_dbg_assert(expr) do { \ + if (!PSNIP_DBG_LIKELY(expr)) { \ + psnip_trap(); \ + } \ + } while (0) +#else +# define psnip_dbg_assert(expr) +#endif + +#endif /* !defined(PSNIP_DEBUG_TRAP_H) */ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h new file mode 100644 index 0000000000000000000000000000000000000000..7f6426ac7657192f6e0aa92bc25a828f106b4f7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/portable-snippets/safe-math.h @@ -0,0 +1,1072 @@ +/* Overflow-safe math functions + * Portable Snippets - https://github.com/nemequ/portable-snippets + * Created by Evan Nemerson + * + * To the extent possible under law, the authors have waived all + * copyright and related or neighboring rights to this code. For + * details, see the Creative Commons Zero 1.0 Universal license at + * https://creativecommons.org/publicdomain/zero/1.0/ + */ + +#if !defined(PSNIP_SAFE_H) +#define PSNIP_SAFE_H + +#if !defined(PSNIP_SAFE_FORCE_PORTABLE) +# if defined(__has_builtin) +# if __has_builtin(__builtin_add_overflow) && !defined(__ibmxl__) +# define PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__INTEL_COMPILER) +# define PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW +# endif +# if defined(__has_include) +# if __has_include() +# define PSNIP_SAFE_HAVE_INTSAFE_H +# endif +# elif defined(_WIN32) +# define PSNIP_SAFE_HAVE_INTSAFE_H +# endif +#endif /* !defined(PSNIP_SAFE_FORCE_PORTABLE) */ + +#if defined(__GNUC__) +# define PSNIP_SAFE_LIKELY(expr) __builtin_expect(!!(expr), 1) +# define PSNIP_SAFE_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#else +# define PSNIP_SAFE_LIKELY(expr) !!(expr) +# define PSNIP_SAFE_UNLIKELY(expr) !!(expr) +#endif /* defined(__GNUC__) */ + +#if !defined(PSNIP_SAFE_STATIC_INLINE) +# if defined(__GNUC__) +# define PSNIP_SAFE__COMPILER_ATTRIBUTES __attribute__((__unused__)) +# else +# define PSNIP_SAFE__COMPILER_ATTRIBUTES +# endif + +# if defined(HEDLEY_INLINE) +# define PSNIP_SAFE__INLINE HEDLEY_INLINE +# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define PSNIP_SAFE__INLINE inline +# elif defined(__GNUC_STDC_INLINE__) +# define PSNIP_SAFE__INLINE __inline__ +# elif defined(_MSC_VER) && _MSC_VER >= 1200 +# define PSNIP_SAFE__INLINE __inline +# else +# define PSNIP_SAFE__INLINE +# endif + +# define PSNIP_SAFE__FUNCTION PSNIP_SAFE__COMPILER_ATTRIBUTES static PSNIP_SAFE__INLINE +#endif + +// !defined(__cplusplus) added for Solaris support +#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define psnip_safe_bool _Bool +#else +# define psnip_safe_bool int +#endif + +#if !defined(PSNIP_SAFE_NO_FIXED) +/* For maximum portability include the exact-int module from + portable snippets. */ +# if \ + !defined(psnip_int64_t) || !defined(psnip_uint64_t) || \ + !defined(psnip_int32_t) || !defined(psnip_uint32_t) || \ + !defined(psnip_int16_t) || !defined(psnip_uint16_t) || \ + !defined(psnip_int8_t) || !defined(psnip_uint8_t) +# include +# if !defined(psnip_int64_t) +# define psnip_int64_t int64_t +# endif +# if !defined(psnip_uint64_t) +# define psnip_uint64_t uint64_t +# endif +# if !defined(psnip_int32_t) +# define psnip_int32_t int32_t +# endif +# if !defined(psnip_uint32_t) +# define psnip_uint32_t uint32_t +# endif +# if !defined(psnip_int16_t) +# define psnip_int16_t int16_t +# endif +# if !defined(psnip_uint16_t) +# define psnip_uint16_t uint16_t +# endif +# if !defined(psnip_int8_t) +# define psnip_int8_t int8_t +# endif +# if !defined(psnip_uint8_t) +# define psnip_uint8_t uint8_t +# endif +# endif +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ +#include +#include + +#if !defined(PSNIP_SAFE_SIZE_MAX) +# if defined(__SIZE_MAX__) +# define PSNIP_SAFE_SIZE_MAX __SIZE_MAX__ +# elif defined(PSNIP_EXACT_INT_HAVE_STDINT) +# include +# endif +#endif + +#if defined(PSNIP_SAFE_SIZE_MAX) +# define PSNIP_SAFE__SIZE_MAX_RT PSNIP_SAFE_SIZE_MAX +#else +# define PSNIP_SAFE__SIZE_MAX_RT (~((size_t) 0)) +#endif + +#if defined(PSNIP_SAFE_HAVE_INTSAFE_H) +/* In VS 10, stdint.h and intsafe.h both define (U)INTN_MIN/MAX, which + triggers warning C4005 (level 1). */ +# if defined(_MSC_VER) && (_MSC_VER == 1600) +# pragma warning(push) +# pragma warning(disable:4005) +# endif +# include +# if defined(_MSC_VER) && (_MSC_VER == 1600) +# pragma warning(pop) +# endif +#endif /* defined(PSNIP_SAFE_HAVE_INTSAFE_H) */ + +/* If there is a type larger than the one we're concerned with it's + * likely much faster to simply promote the operands, perform the + * requested operation, verify that the result falls within the + * original type, then cast the result back to the original type. */ + +#if !defined(PSNIP_SAFE_NO_PROMOTIONS) + +#define PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, op_name, op) \ + PSNIP_SAFE__FUNCTION psnip_safe_##name##_larger \ + psnip_safe_larger_##name##_##op_name (T a, T b) { \ + return ((psnip_safe_##name##_larger) a) op ((psnip_safe_##name##_larger) b); \ + } + +#define PSNIP_SAFE_DEFINE_LARGER_UNARY_OP(T, name, op_name, op) \ + PSNIP_SAFE__FUNCTION psnip_safe_##name##_larger \ + psnip_safe_larger_##name##_##op_name (T value) { \ + return (op ((psnip_safe_##name##_larger) value)); \ + } + +#define PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(T, name) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, add, +) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, sub, -) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mul, *) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, div, /) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mod, %) \ + PSNIP_SAFE_DEFINE_LARGER_UNARY_OP (T, name, neg, -) + +#define PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(T, name) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, add, +) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, sub, -) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mul, *) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, div, /) \ + PSNIP_SAFE_DEFINE_LARGER_BINARY_OP(T, name, mod, %) + +#define PSNIP_SAFE_IS_LARGER(ORIG_MAX, DEST_MAX) ((DEST_MAX / ORIG_MAX) >= ORIG_MAX) + +#if defined(__GNUC__) && ((__GNUC__ >= 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__SIZEOF_INT128__) && !defined(__ibmxl__) +#define PSNIP_SAFE_HAVE_128 +typedef __int128 psnip_safe_int128_t; +typedef unsigned __int128 psnip_safe_uint128_t; +#endif /* defined(__GNUC__) */ + +#if !defined(PSNIP_SAFE_NO_FIXED) +#define PSNIP_SAFE_HAVE_INT8_LARGER +#define PSNIP_SAFE_HAVE_UINT8_LARGER +typedef psnip_int16_t psnip_safe_int8_larger; +typedef psnip_uint16_t psnip_safe_uint8_larger; + +#define PSNIP_SAFE_HAVE_INT16_LARGER +typedef psnip_int32_t psnip_safe_int16_larger; +typedef psnip_uint32_t psnip_safe_uint16_larger; + +#define PSNIP_SAFE_HAVE_INT32_LARGER +typedef psnip_int64_t psnip_safe_int32_larger; +typedef psnip_uint64_t psnip_safe_uint32_larger; + +#if defined(PSNIP_SAFE_HAVE_128) +#define PSNIP_SAFE_HAVE_INT64_LARGER +typedef psnip_safe_int128_t psnip_safe_int64_larger; +typedef psnip_safe_uint128_t psnip_safe_uint64_larger; +#endif /* defined(PSNIP_SAFE_HAVE_128) */ +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ + +#define PSNIP_SAFE_HAVE_LARGER_SCHAR +#if PSNIP_SAFE_IS_LARGER(SCHAR_MAX, SHRT_MAX) +typedef short psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, INT_MAX) +typedef int psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, LONG_MAX) +typedef long psnip_safe_schar_larger; +#elif PSNIP_SAFE_IS_LARGER(SCHAR_MAX, LLONG_MAX) +typedef long long psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SCHAR_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_schar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (SCHAR_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_schar_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SCHAR +#endif + +#define PSNIP_SAFE_HAVE_LARGER_UCHAR +#if PSNIP_SAFE_IS_LARGER(UCHAR_MAX, USHRT_MAX) +typedef unsigned short psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, UINT_MAX) +typedef unsigned int psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_uchar_larger; +#elif PSNIP_SAFE_IS_LARGER(UCHAR_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffU) +typedef psnip_uint16_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UCHAR_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_uchar_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (UCHAR_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_uchar_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_UCHAR +#endif + +#if CHAR_MIN == 0 && defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +#define PSNIP_SAFE_HAVE_LARGER_CHAR +typedef psnip_safe_uchar_larger psnip_safe_char_larger; +#elif CHAR_MIN < 0 && defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +#define PSNIP_SAFE_HAVE_LARGER_CHAR +typedef psnip_safe_schar_larger psnip_safe_char_larger; +#endif + +#define PSNIP_SAFE_HAVE_LARGER_SHRT +#if PSNIP_SAFE_IS_LARGER(SHRT_MAX, INT_MAX) +typedef int psnip_safe_short_larger; +#elif PSNIP_SAFE_IS_LARGER(SHRT_MAX, LONG_MAX) +typedef long psnip_safe_short_larger; +#elif PSNIP_SAFE_IS_LARGER(SHRT_MAX, LLONG_MAX) +typedef long long psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(SHRT_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_short_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (SHRT_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_short_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SHRT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_USHRT +#if PSNIP_SAFE_IS_LARGER(USHRT_MAX, UINT_MAX) +typedef unsigned int psnip_safe_ushort_larger; +#elif PSNIP_SAFE_IS_LARGER(USHRT_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_ushort_larger; +#elif PSNIP_SAFE_IS_LARGER(USHRT_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(USHRT_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ushort_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (USHRT_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ushort_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_USHRT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_INT +#if PSNIP_SAFE_IS_LARGER(INT_MAX, LONG_MAX) +typedef long psnip_safe_int_larger; +#elif PSNIP_SAFE_IS_LARGER(INT_MAX, LLONG_MAX) +typedef long long psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(INT_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_int_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (INT_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_int_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_INT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_UINT +#if PSNIP_SAFE_IS_LARGER(UINT_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_uint_larger; +#elif PSNIP_SAFE_IS_LARGER(UINT_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(UINT_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_uint_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (UINT_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_uint_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_UINT +#endif + +#define PSNIP_SAFE_HAVE_LARGER_LONG +#if PSNIP_SAFE_IS_LARGER(LONG_MAX, LLONG_MAX) +typedef long long psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LONG_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_long_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (LONG_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_long_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_LONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_ULONG +#if PSNIP_SAFE_IS_LARGER(ULONG_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULONG_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ulong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (ULONG_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ulong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_ULONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_LLONG +#if !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fff) +typedef psnip_int16_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fffffffLL) +typedef psnip_int32_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(LLONG_MAX, 0x7fffffffffffffffLL) +typedef psnip_int64_t psnip_safe_llong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (LLONG_MAX <= 0x7fffffffffffffffLL) +typedef psnip_safe_int128_t psnip_safe_llong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_LLONG +#endif + +#define PSNIP_SAFE_HAVE_LARGER_ULLONG +#if !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(ULLONG_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_ullong_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (ULLONG_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_ullong_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_ULLONG +#endif + +#if defined(PSNIP_SAFE_SIZE_MAX) +#define PSNIP_SAFE_HAVE_LARGER_SIZE +#if PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, USHRT_MAX) +typedef unsigned short psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, UINT_MAX) +typedef unsigned int psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, ULONG_MAX) +typedef unsigned long psnip_safe_size_larger; +#elif PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, ULLONG_MAX) +typedef unsigned long long psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffff) +typedef psnip_uint16_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffffffffUL) +typedef psnip_uint32_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && PSNIP_SAFE_IS_LARGER(PSNIP_SAFE_SIZE_MAX, 0xffffffffffffffffULL) +typedef psnip_uint64_t psnip_safe_size_larger; +#elif !defined(PSNIP_SAFE_NO_FIXED) && defined(PSNIP_SAFE_HAVE_128) && (PSNIP_SAFE_SIZE_MAX <= 0xffffffffffffffffULL) +typedef psnip_safe_uint128_t psnip_safe_size_larger; +#else +#undef PSNIP_SAFE_HAVE_LARGER_SIZE +#endif +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(signed char, schar) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned char, uchar) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +#if CHAR_MIN == 0 +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(char, char) +#else +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(char, char) +#endif +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SHORT) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(short, short) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_USHORT) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned short, ushort) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_INT) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(int, int) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_UINT) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned int, uint) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_LONG) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(long, long) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_ULONG) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned long, ulong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_LLONG) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(long long, llong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_ULLONG) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(unsigned long long, ullong) +#endif + +#if defined(PSNIP_SAFE_HAVE_LARGER_SIZE) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(size_t, size) +#endif + +#if !defined(PSNIP_SAFE_NO_FIXED) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int8_t, int8) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint8_t, uint8) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int16_t, int16) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint16_t, uint16) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int32_t, int32) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint32_t, uint32) +#if defined(PSNIP_SAFE_HAVE_128) +PSNIP_SAFE_DEFINE_LARGER_SIGNED_OPS(psnip_int64_t, int64) +PSNIP_SAFE_DEFINE_LARGER_UNSIGNED_OPS(psnip_uint64_t, uint64) +#endif +#endif + +#endif /* !defined(PSNIP_SAFE_NO_PROMOTIONS) */ + +#define PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(T, name, op_name) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + return !__builtin_##op_name##_overflow(a, b, res); \ + } + +#define PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(T, name, op_name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + const psnip_safe_##name##_larger r = psnip_safe_larger_##name##_##op_name(a, b); \ + *res = (T) r; \ + return (r >= min) && (r <= max); \ + } + +#define PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(T, name, op_name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op_name(T* res, T a, T b) { \ + const psnip_safe_##name##_larger r = psnip_safe_larger_##name##_##op_name(a, b); \ + *res = (T) r; \ + return (r <= max); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_ADD(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_add (T* res, T a, T b) { \ + psnip_safe_bool r = !( ((b > 0) && (a > (max - b))) || \ + ((b < 0) && (a < (min - b))) ); \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a + b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_ADD(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_add (T* res, T a, T b) { \ + *res = (T) (a + b); \ + return !PSNIP_SAFE_UNLIKELY((b > 0) && (a > (max - b))); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_SUB(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_sub (T* res, T a, T b) { \ + psnip_safe_bool r = !((b > 0 && a < (min + b)) || \ + (b < 0 && a > (max + b))); \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a - b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_SUB(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_sub (T* res, T a, T b) { \ + *res = a - b; \ + return !PSNIP_SAFE_UNLIKELY(b > a); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_MUL(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mul (T* res, T a, T b) { \ + psnip_safe_bool r = 1; \ + if (a > 0) { \ + if (b > 0) { \ + if (a > (max / b)) { \ + r = 0; \ + } \ + } else { \ + if (b < (min / a)) { \ + r = 0; \ + } \ + } \ + } else { \ + if (b > 0) { \ + if (a < (min / b)) { \ + r = 0; \ + } \ + } else { \ + if ( (a != 0) && (b < (max / a))) { \ + r = 0; \ + } \ + } \ + } \ + if(PSNIP_SAFE_LIKELY(r)) \ + *res = a * b; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_MUL(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mul (T* res, T a, T b) { \ + *res = (T) (a * b); \ + return !PSNIP_SAFE_UNLIKELY((a > 0) && (b > 0) && (a > (max / b))); \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_DIV(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_div (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else if (PSNIP_SAFE_UNLIKELY(a == min && b == -1)) { \ + *res = min; \ + return 0; \ + } else { \ + *res = (T) (a / b); \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_DIV(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_div (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else { \ + *res = a / b; \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_MOD(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mod (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else if (PSNIP_SAFE_UNLIKELY(a == min && b == -1)) { \ + *res = min; \ + return 0; \ + } else { \ + *res = (T) (a % b); \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_UNSIGNED_MOD(T, name, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_mod (T* res, T a, T b) { \ + if (PSNIP_SAFE_UNLIKELY(b == 0)) { \ + *res = 0; \ + return 0; \ + } else { \ + *res = a % b; \ + return 1; \ + } \ + } + +#define PSNIP_SAFE_DEFINE_SIGNED_NEG(T, name, min, max) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_neg (T* res, T value) { \ + psnip_safe_bool r = value != min; \ + *res = PSNIP_SAFE_LIKELY(r) ? -value : max; \ + return r; \ + } + +#define PSNIP_SAFE_DEFINE_INTSAFE(T, name, op, isf) \ + PSNIP_SAFE__FUNCTION psnip_safe_bool \ + psnip_safe_##name##_##op (T* res, T a, T b) { \ + return isf(a, b, res) == S_OK; \ + } + +#if CHAR_MIN == 0 +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, add, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, sub, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(char, char, mul, CHAR_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(char, char, CHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(char, char, CHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(char, char, CHAR_MAX) +#else /* CHAR_MIN != 0 */ +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(char, char, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_CHAR) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, add, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, sub, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(char, char, mul, CHAR_MIN, CHAR_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(char, char, CHAR_MIN, CHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(char, char, CHAR_MIN, CHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(char, char, CHAR_MIN, CHAR_MAX) +#endif + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(signed char, schar, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SCHAR) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, add, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, sub, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(signed char, schar, mul, SCHAR_MIN, SCHAR_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(signed char, schar, SCHAR_MIN, SCHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(signed char, schar, SCHAR_MIN, SCHAR_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(signed char, schar, SCHAR_MIN, SCHAR_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned char, uchar, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UCHAR) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, add, UCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, sub, UCHAR_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned char, uchar, mul, UCHAR_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned char, uchar, UCHAR_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned char, uchar, UCHAR_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned char, uchar, UCHAR_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(short, short, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SHORT) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, add, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, sub, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(short, short, mul, SHRT_MIN, SHRT_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(short, short, SHRT_MIN, SHRT_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(short, short, SHRT_MIN, SHRT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(short, short, SHRT_MIN, SHRT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned short, ushort, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, add, UShortAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, sub, UShortSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned short, ushort, mul, UShortMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_USHORT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, add, USHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, sub, USHRT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned short, ushort, mul, USHRT_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned short, ushort, USHRT_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned short, ushort, USHRT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned short, ushort, USHRT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(int, int, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, add, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, sub, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(int, int, mul, INT_MIN, INT_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(int, int, INT_MIN, INT_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(int, int, INT_MIN, INT_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(int, int, INT_MIN, INT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned int, uint, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, add, UIntAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, sub, UIntSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned int, uint, mul, UIntMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, add, UINT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, sub, UINT_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned int, uint, mul, UINT_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned int, uint, UINT_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned int, uint, UINT_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned int, uint, UINT_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long, long, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_LONG) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, add, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, sub, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long, long, mul, LONG_MIN, LONG_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(long, long, LONG_MIN, LONG_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(long, long, LONG_MIN, LONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(long, long, LONG_MIN, LONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long, ulong, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, add, ULongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, sub, ULongSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long, ulong, mul, ULongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_ULONG) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, add, ULONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, sub, ULONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long, ulong, mul, ULONG_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned long, ulong, ULONG_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned long, ulong, ULONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned long, ulong, ULONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(long long, llong, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_LLONG) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, add, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, sub, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(long long, llong, mul, LLONG_MIN, LLONG_MAX) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_SUB(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MUL(long long, llong, LLONG_MIN, LLONG_MAX) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_MOD(long long, llong, LLONG_MIN, LLONG_MAX) +PSNIP_SAFE_DEFINE_SIGNED_NEG(long long, llong, LLONG_MIN, LLONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(unsigned long long, ullong, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, add, ULongLongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, sub, ULongLongSub) +PSNIP_SAFE_DEFINE_INTSAFE(unsigned long long, ullong, mul, ULongLongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_ULLONG) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, add, ULLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, sub, ULLONG_MAX) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(unsigned long long, ullong, mul, ULLONG_MAX) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(unsigned long long, ullong, ULLONG_MAX) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(unsigned long long, ullong, ULLONG_MAX) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(unsigned long long, ullong, ULLONG_MAX) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(size_t, size, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, add, SizeTAdd) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, sub, SizeTSub) +PSNIP_SAFE_DEFINE_INTSAFE(size_t, size, mul, SizeTMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_SIZE) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, add, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, sub, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(size_t, size, mul, PSNIP_SAFE__SIZE_MAX_RT) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(size_t, size, PSNIP_SAFE__SIZE_MAX_RT) + +#if !defined(PSNIP_SAFE_NO_FIXED) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int8_t, int8, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT8) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, add, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, sub, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int8_t, int8, mul, (-0x7fLL-1), 0x7f) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int8_t, int8, (-0x7fLL-1), 0x7f) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint8_t, uint8, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT8) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, add, 0xff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, sub, 0xff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint8_t, uint8, mul, 0xff) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint8_t, uint8, 0xff) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint8_t, uint8, 0xff) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint8_t, uint8, 0xff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int16_t, int16, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT16) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, add, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, sub, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int16_t, int16, mul, (-32767-1), 0x7fff) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int16_t, int16, (-32767-1), 0x7fff) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int16_t, int16, (-32767-1), 0x7fff) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int16_t, int16, (-32767-1), 0x7fff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint16_t, uint16, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, add, UShortAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, sub, UShortSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint16_t, uint16, mul, UShortMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT16) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, add, 0xffff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, sub, 0xffff) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint16_t, uint16, mul, 0xffff) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint16_t, uint16, 0xffff) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint16_t, uint16, 0xffff) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint16_t, uint16, 0xffff) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int32_t, int32, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT32) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, add, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, sub, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int32_t, int32, mul, (-0x7fffffffLL-1), 0x7fffffffLL) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int32_t, int32, (-0x7fffffffLL-1), 0x7fffffffLL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint32_t, uint32, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, add, UIntAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, sub, UIntSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint32_t, uint32, mul, UIntMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT32) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, add, 0xffffffffUL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, sub, 0xffffffffUL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint32_t, uint32, mul, 0xffffffffUL) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint32_t, uint32, 0xffffffffUL) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint32_t, uint32, 0xffffffffUL) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint32_t, uint32, 0xffffffffUL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_int64_t, int64, mul) +#elif defined(PSNIP_SAFE_HAVE_LARGER_INT64) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, add, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, sub, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_PROMOTED_SIGNED_BINARY_OP(psnip_int64_t, int64, mul, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +#else +PSNIP_SAFE_DEFINE_SIGNED_ADD(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_SUB(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MUL(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +#endif +PSNIP_SAFE_DEFINE_SIGNED_DIV(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_MOD(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) +PSNIP_SAFE_DEFINE_SIGNED_NEG(psnip_int64_t, int64, (-0x7fffffffffffffffLL-1), 0x7fffffffffffffffLL) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, add) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, sub) +PSNIP_SAFE_DEFINE_BUILTIN_BINARY_OP(psnip_uint64_t, uint64, mul) +#elif defined(PSNIP_SAFE_HAVE_INTSAFE_H) && defined(_WIN32) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, add, ULongLongAdd) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, sub, ULongLongSub) +PSNIP_SAFE_DEFINE_INTSAFE(psnip_uint64_t, uint64, mul, ULongLongMult) +#elif defined(PSNIP_SAFE_HAVE_LARGER_UINT64) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, add, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, sub, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_PROMOTED_UNSIGNED_BINARY_OP(psnip_uint64_t, uint64, mul, 0xffffffffffffffffULL) +#else +PSNIP_SAFE_DEFINE_UNSIGNED_ADD(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_SUB(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_MUL(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +#endif +PSNIP_SAFE_DEFINE_UNSIGNED_DIV(psnip_uint64_t, uint64, 0xffffffffffffffffULL) +PSNIP_SAFE_DEFINE_UNSIGNED_MOD(psnip_uint64_t, uint64, 0xffffffffffffffffULL) + +#endif /* !defined(PSNIP_SAFE_NO_FIXED) */ + +#define PSNIP_SAFE_C11_GENERIC_SELECTION(res, op) \ + _Generic((*res), \ + char: psnip_safe_char_##op, \ + unsigned char: psnip_safe_uchar_##op, \ + short: psnip_safe_short_##op, \ + unsigned short: psnip_safe_ushort_##op, \ + int: psnip_safe_int_##op, \ + unsigned int: psnip_safe_uint_##op, \ + long: psnip_safe_long_##op, \ + unsigned long: psnip_safe_ulong_##op, \ + long long: psnip_safe_llong_##op, \ + unsigned long long: psnip_safe_ullong_##op) + +#define PSNIP_SAFE_C11_GENERIC_BINARY_OP(op, res, a, b) \ + PSNIP_SAFE_C11_GENERIC_SELECTION(res, op)(res, a, b) +#define PSNIP_SAFE_C11_GENERIC_UNARY_OP(op, res, v) \ + PSNIP_SAFE_C11_GENERIC_SELECTION(res, op)(res, v) + +#if defined(PSNIP_SAFE_HAVE_BUILTIN_OVERFLOW) +#define psnip_safe_add(res, a, b) !__builtin_add_overflow(a, b, res) +#define psnip_safe_sub(res, a, b) !__builtin_sub_overflow(a, b, res) +#define psnip_safe_mul(res, a, b) !__builtin_mul_overflow(a, b, res) +#define psnip_safe_div(res, a, b) !__builtin_div_overflow(a, b, res) +#define psnip_safe_mod(res, a, b) !__builtin_mod_overflow(a, b, res) +#define psnip_safe_neg(res, v) PSNIP_SAFE_C11_GENERIC_UNARY_OP (neg, res, v) + +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +/* The are no fixed-length or size selections because they cause an + * error about _Generic specifying two compatible types. Hopefully + * this doesn't cause problems on exotic platforms, but if it does + * please let me know and I'll try to figure something out. */ + +#define psnip_safe_add(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(add, res, a, b) +#define psnip_safe_sub(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(sub, res, a, b) +#define psnip_safe_mul(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(mul, res, a, b) +#define psnip_safe_div(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(div, res, a, b) +#define psnip_safe_mod(res, a, b) PSNIP_SAFE_C11_GENERIC_BINARY_OP(mod, res, a, b) +#define psnip_safe_neg(res, v) PSNIP_SAFE_C11_GENERIC_UNARY_OP (neg, res, v) +#endif + +#if !defined(PSNIP_SAFE_HAVE_BUILTINS) && (defined(PSNIP_SAFE_EMULATE_NATIVE) || defined(PSNIP_BUILTIN_EMULATE_NATIVE)) +# define __builtin_sadd_overflow(a, b, res) (!psnip_safe_int_add(res, a, b)) +# define __builtin_saddl_overflow(a, b, res) (!psnip_safe_long_add(res, a, b)) +# define __builtin_saddll_overflow(a, b, res) (!psnip_safe_llong_add(res, a, b)) +# define __builtin_uadd_overflow(a, b, res) (!psnip_safe_uint_add(res, a, b)) +# define __builtin_uaddl_overflow(a, b, res) (!psnip_safe_ulong_add(res, a, b)) +# define __builtin_uaddll_overflow(a, b, res) (!psnip_safe_ullong_add(res, a, b)) + +# define __builtin_ssub_overflow(a, b, res) (!psnip_safe_int_sub(res, a, b)) +# define __builtin_ssubl_overflow(a, b, res) (!psnip_safe_long_sub(res, a, b)) +# define __builtin_ssubll_overflow(a, b, res) (!psnip_safe_llong_sub(res, a, b)) +# define __builtin_usub_overflow(a, b, res) (!psnip_safe_uint_sub(res, a, b)) +# define __builtin_usubl_overflow(a, b, res) (!psnip_safe_ulong_sub(res, a, b)) +# define __builtin_usubll_overflow(a, b, res) (!psnip_safe_ullong_sub(res, a, b)) + +# define __builtin_smul_overflow(a, b, res) (!psnip_safe_int_mul(res, a, b)) +# define __builtin_smull_overflow(a, b, res) (!psnip_safe_long_mul(res, a, b)) +# define __builtin_smulll_overflow(a, b, res) (!psnip_safe_llong_mul(res, a, b)) +# define __builtin_umul_overflow(a, b, res) (!psnip_safe_uint_mul(res, a, b)) +# define __builtin_umull_overflow(a, b, res) (!psnip_safe_ulong_mul(res, a, b)) +# define __builtin_umulll_overflow(a, b, res) (!psnip_safe_ullong_mul(res, a, b)) +#endif + +#endif /* !defined(PSNIP_SAFE_H) */ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h new file mode 100644 index 0000000000000000000000000000000000000000..a18e8c762daaaaa0db8974232a876315ed6ca6f2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/xxhash/xxhash.h @@ -0,0 +1,6773 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (C) 2012-2021 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/*! + * @mainpage xxHash + * + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed + * limits. + * + * It is proposed in four flavors, in three families: + * 1. @ref XXH32_family + * - Classic 32-bit hash function. Simple, compact, and runs on almost all + * 32-bit and 64-bit systems. + * 2. @ref XXH64_family + * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most + * 64-bit systems (but _not_ 32-bit systems). + * 3. @ref XXH3_family + * - Modern 64-bit and 128-bit hash function family which features improved + * strength and performance across the board, especially on smaller data. + * It benefits greatly from SIMD and 64-bit without requiring it. + * + * Benchmarks + * --- + * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. + * The open source benchmark program is compiled with clang v10.0 using -O3 flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | + * | -------------------- | ------- | ----: | ---------------: | ------------------: | + * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | + * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | + * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | + * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | + * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | + * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | + * | RAM sequential read | | N/A | 28.0 GB/s | N/A | + * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | + * | City64 | | 64 | 22.0 GB/s | 76.6 | + * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | + * | City128 | | 128 | 21.7 GB/s | 57.7 | + * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | + * | XXH64() | | 64 | 19.4 GB/s | 71.0 | + * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | + * | Mum | | 64 | 18.0 GB/s | 67.0 | + * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | + * | XXH32() | | 32 | 9.7 GB/s | 71.9 | + * | City32 | | 32 | 9.1 GB/s | 66.0 | + * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | + * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | + * | SipHash* | | 64 | 3.0 GB/s | 43.2 | + * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | + * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | + * | FNV64 | | 64 | 1.2 GB/s | 62.7 | + * | Blake2* | | 256 | 1.1 GB/s | 5.1 | + * | SHA1* | | 160 | 0.8 GB/s | 5.6 | + * | MD5* | | 128 | 0.6 GB/s | 7.8 | + * @note + * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, + * even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic + * by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for small + * data. For more accurate information, see the wiki. + * - More benchmarks and strength tests are found on the wiki: + * https://github.com/Cyan4973/xxHash/wiki + * + * Usage + * ------ + * All xxHash variants use a similar API. Changing the algorithm is a trivial + * substitution. + * + * @pre + * For functions which take an input and length parameter, the following + * requirements are assumed: + * - The range from [`input`, `input + length`) is valid, readable memory. + * - The only exception is if the `length` is `0`, `input` may be `NULL`. + * - For C++, the objects must have the *TriviallyCopyable* property, as the + * functions access bytes directly as if it was an array of `unsigned char`. + * + * @anchor single_shot_example + * **Single Shot** + * + * These functions are stateless functions which hash a contiguous block of memory, + * immediately returning the result. They are the easiest and usually the fastest + * option. + * + * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() + * + * @code{.c} + * #include + * #include "xxhash.h" + * + * // Example for a function which hashes a null terminated string with XXH32(). + * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * { + * // NULL pointers are only valid if the length is zero + * size_t length = (string == NULL) ? 0 : strlen(string); + * return XXH32(string, length, seed); + * } + * @endcode + * + * @anchor streaming_example + * **Streaming** + * + * These groups of functions allow incremental hashing of unknown size, even + * more than what would fit in a size_t. + * + * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() + * + * @code{.c} + * #include + * #include + * #include "xxhash.h" + * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). + * XXH64_hash_t hashFile(FILE* f) + * { + * // Allocate a state struct. Do not just use malloc() or new. + * XXH3_state_t* state = XXH3_createState(); + * assert(state != NULL && "Out of memory!"); + * // Reset the state to start a new hashing session. + * XXH3_64bits_reset(state); + * char buffer[4096]; + * size_t count; + * // Read the file in chunks + * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + * // Run update() as many times as necessary to process the data + * XXH3_64bits_update(state, buffer, count); + * } + * // Retrieve the finalized hash. This will not change the state. + * XXH64_hash_t result = XXH3_64bits_digest(state); + * // Free the state. Do not use free(). + * XXH3_freeState(state); + * return result; + * } + * @endcode + * + * @file xxhash.h + * xxHash prototypes and implementation + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* **************************** + * INLINE mode + ******************************/ +/*! + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Gives access to internal state declaration, required for static allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ +# define XXH_STATIC_LINKING_ONLY +/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ + +/*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ +# define XXH_IMPLEMENTATION +/* Do not undef XXH_IMPLEMENTATION for Doxygen */ + +/*! + * @brief Exposes the implementation and marks all functions as `inline`. + * + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * @code{.c} + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * @endcode + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ +# define XXH_INLINE_ALL +# undef XXH_INLINE_ALL +/*! + * @brief Exposes the implementation without marking functions as inline. + */ +# define XXH_PRIVATE_API +# undef XXH_PRIVATE_API +/*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* note: this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif + + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ + /* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ +# undef XXH_versionNumber + /* XXH32 */ +# undef XXH32 +# undef XXH32_createState +# undef XXH32_freeState +# undef XXH32_reset +# undef XXH32_update +# undef XXH32_digest +# undef XXH32_copyState +# undef XXH32_canonicalFromHash +# undef XXH32_hashFromCanonical + /* XXH64 */ +# undef XXH64 +# undef XXH64_createState +# undef XXH64_freeState +# undef XXH64_reset +# undef XXH64_update +# undef XXH64_digest +# undef XXH64_copyState +# undef XXH64_canonicalFromHash +# undef XXH64_hashFromCanonical + /* XXH3_64bits */ +# undef XXH3_64bits +# undef XXH3_64bits_withSecret +# undef XXH3_64bits_withSeed +# undef XXH3_64bits_withSecretandSeed +# undef XXH3_createState +# undef XXH3_freeState +# undef XXH3_copyState +# undef XXH3_64bits_reset +# undef XXH3_64bits_reset_withSeed +# undef XXH3_64bits_reset_withSecret +# undef XXH3_64bits_update +# undef XXH3_64bits_digest +# undef XXH3_generateSecret + /* XXH3_128bits */ +# undef XXH128 +# undef XXH3_128bits +# undef XXH3_128bits_withSeed +# undef XXH3_128bits_withSecret +# undef XXH3_128bits_reset +# undef XXH3_128bits_reset_withSeed +# undef XXH3_128bits_reset_withSecret +# undef XXH3_128bits_reset_withSecretandSeed +# undef XXH3_128bits_update +# undef XXH3_128bits_digest +# undef XXH128_isEqual +# undef XXH128_cmp +# undef XXH128_canonicalFromHash +# undef XXH128_hashFromCanonical + /* Finally, free the namespace itself */ +# undef XXH_NAMESPACE + + /* employ the namespace for XXH_INLINE_ALL */ +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. + */ +# define XXH_IPREF(Id) XXH_NAMESPACE ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +/*! @brief Marks a global symbol. */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* XXH32 */ +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +/* XXH64 */ +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +/* XXH3_64bits */ +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) +/* XXH3_128bits */ +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) +#endif + + +/* ************************************* +* Compiler specifics +***************************************/ + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#if defined (__GNUC__) +# define XXH_CONSTF __attribute__((const)) +# define XXH_PUREF __attribute__((pure)) +# define XXH_MALLOCF __attribute__((malloc)) +#else +# define XXH_CONSTF /* disable */ +# define XXH_PUREF +# define XXH_MALLOCF +#endif + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 8 +#define XXH_VERSION_RELEASE 2 +/*! @brief Version number, encoded as two digits each */ +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) + +/*! + * @brief Obtains the xxHash version. + * + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. + * + * @return @ref XXH_VERSION_NUMBER of the invoked library. + */ +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); + + +/* **************************** +* Common basic types +******************************/ +#include /* size_t */ +/*! + * @brief Exit code for the streaming API. + */ +typedef enum { + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ +} XXH_errorcode; + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* Don't show include */ +/*! + * @brief An unsigned 32-bit integer. + * + * Not necessarily defined to `uint32_t` but functionally equivalent. + */ +typedef uint32_t XXH32_hash_t; + +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint32_t XXH32_hash_t; + +#else +# include +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# elif ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform: need a 32-bit type" +# endif +#endif + +/*! + * @} + * + * @defgroup XXH32_family XXH32 family + * @ingroup public + * Contains functions used in the classic 32-bit xxHash algorithm. + * + * @note + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that the @ref XXH3_family provides competitive speed for both 32-bit + * and 64-bit systems, and offers true 64/128 bit hash results. + * + * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families + * @see @ref XXH32_impl for implementation details + * @{ + */ + +/*! + * @brief Calculates the 32-bit hash of @p input using xxHash32. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * See @ref single_shot_example "Single Shot Example" for an example. + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 32-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 32-bit hash value. + * + * @see + * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); + +#ifndef XXH_NO_STREAM +/*! + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * @see streaming_example at the top of @ref xxhash.h for an example. + */ + +/*! + * @typedef struct XXH32_state_s XXH32_state_t + * @brief The opaque state struct for the XXH32 streaming API. + * + * @see XXH32_state_s for details. + */ +typedef struct XXH32_state_s XXH32_state_t; + +/*! + * @brief Allocates an @ref XXH32_state_t. + * + * Must be freed with XXH32_freeState(). + * @return An allocated XXH32_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); +/*! + * @brief Frees an @ref XXH32_state_t. + * + * Must be allocated with XXH32_createState(). + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Copies one @ref XXH32_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +/*! + * @brief Resets an @ref XXH32_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH32_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 32-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH32_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH32_state_t. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash32 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + */ + +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +typedef struct { + unsigned char digest[4]; /*!< Hash bytes, big endian */ +} XXH32_canonical_t; + +/*! + * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. + * + * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param hash The @ref XXH32_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); + +/*! + * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. + * + * @param src The @ref XXH32_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + + +/*! @cond Doxygen ignores this part */ +#ifdef __has_attribute +# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define XXH_HAS_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ +#define XXH_C23_VN 201711L +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) +# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define XXH_HAS_C_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define XXH_HAS_CPP_ATTRIBUTE(x) 0 +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute + * introduced in CPP17 and C23. + * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough + */ +#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_ATTRIBUTE(__fallthrough__) +# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) +#else +# define XXH_FALLTHROUGH /* fallthrough */ +#endif +/*! @endcond */ + +/*! @cond Doxygen ignores this part */ +/* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ +#if XXH_HAS_ATTRIBUTE(noescape) +# define XXH_NOESCAPE __attribute__((noescape)) +#else +# define XXH_NOESCAPE +#endif +/*! @endcond */ + + +/*! + * @} + * @ingroup public + * @{ + */ + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* don't include */ +/*! + * @brief An unsigned 64-bit integer. + * + * Not necessarily defined to `uint64_t` but functionally equivalent. + */ +typedef uint64_t XXH64_hash_t; +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t XXH64_hash_t; +#else +# include +# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL + /* LP64 ABI says uint64_t is unsigned long */ + typedef unsigned long XXH64_hash_t; +# else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +# endif +#endif + +/*! + * @} + * + * @defgroup XXH64_family XXH64 family + * @ingroup public + * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. + * + * @note + * XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. + */ + +/*! + * @brief Calculates the 64-bit hash of @p input using xxHash64. + * + * This function usually runs faster on 64-bit systems, but slower on 32-bit + * systems (see benchmark). + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit hash. + * + * @see + * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/*! + * @brief The opaque state struct for the XXH64 streaming API. + * + * @see XXH64_state_s for details. + */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + +/*! + * @brief Allocates an @ref XXH64_state_t. + * + * Must be freed with XXH64_freeState(). + * @return An allocated XXH64_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); + +/*! + * @brief Frees an @ref XXH64_state_t. + * + * Must be allocated with XXH64_createState(). + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH64_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +/*! + * @brief Resets an @ref XXH64_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH64_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH64_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH64_state_t. + * + * @note + * Calling XXH64_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash64 value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ +/******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; + +/*! + * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. + * + * @param dst The @ref XXH64_canonical_t pointer to be stored to. + * @param hash The @ref XXH64_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); + +/*! + * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. + * + * @param src The @ref XXH64_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); + +#ifndef XXH_NO_XXH3 + +/*! + * @} + * ************************************************************************ + * @defgroup XXH3_family XXH3 family + * @ingroup public + * @{ + * + * XXH3 is a more recent hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: + * + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * Compared to XXH64, expect XXH3 to run approximately + * ~2x faster on large inputs and >3x faster on small ones, + * exact differences vary depending on platform. + * + * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, + * but does not require it. + * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 + * at competitive speeds, even without vector support. Further details are + * explained in the implementation. + * + * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD + * implementations for many common platforms: + * - AVX512 + * - AVX2 + * - SSE2 + * - ARM NEON + * - WebAssembly SIMD128 + * - POWER8 VSX + * - s390x ZVector + * This can be controlled via the @ref XXH_VECTOR macro, but it automatically + * selects the best version according to predefined macros. For the x86 family, an + * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. + * + * XXH3 implementation is portable: + * it has a generic C90 formulation that can be compiled on any platform, + * all implementations generate exactly the same hash value on all platforms. + * Starting from v0.8.0, it's also labelled "stable", meaning that + * any future version will also generate the same hash value. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * + * When only 64 bits are needed, prefer invoking the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + */ +/*-********************************************************************** +* XXH3 64-bit variant +************************************************************************/ + +/*! + * @brief 64-bit unseeded variant of XXH3. + * + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants + * @see + * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief 64-bit seeded variant of XXH3 + * + * This variant generates a custom secret on the fly based on default secret + * altered using the `seed` value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * @param input The data to hash + * @param length The length + * @param seed The 64-bit seed to alter the state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); + +/*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ +#define XXH3_SECRET_SIZE_MIN 136 + +/*! + * @brief 64-bit variant of XXH3 with a custom "secret". + * + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing "XXH3_generateSecret()" instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + */ + +/*! + * @brief The state struct for the XXH3 streaming API. + * + * @see XXH3_state_s for details. + */ +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); + +/*! + * @brief Copies one @ref XXH3_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update(). + * Digest will be equivalent to `XXH3_64bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); + +/*! + * XXH3_64bits_reset_withSecret(): + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 64-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* note : canonical representation of XXH3 is the same as XXH64 + * since they both produce XXH64_hash_t values */ + + +/*-********************************************************************** +* XXH3 128-bit variant +************************************************************************/ + +/*! + * @brief The return value from 128-bit hashes. + * + * Stored in little endian order, although the fields themselves are in native + * endianness. + */ +typedef struct { + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ +} XXH128_hash_t; + +/*! + * @brief Unseeded 128-bit variant of XXH3 + * + * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead + * for shorter inputs. + * + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however + * it may have slightly better performance due to constant propagation of the + * defaults. + * + * @see + * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms + * @see + * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants + * @see + * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); +/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); + +/******* Streaming *******/ +#ifndef XXH_NO_STREAM +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + * + * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). + * Use already declared XXH3_createState() and XXH3_freeState(). + * + * All reset and streaming functions have same meaning as their 64-bit counterpart. + */ + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits()`. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update(). + * Digest will be equivalent to `XXH3_128bits_withSeed()`. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the state. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); + +/*! + * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. + * + * @note + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 128-bit hash value from that state. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); +#endif /* !XXH_NO_STREAM */ + +/* Following helper functions make it possible to compare XXH128_hast_t values. + * Since XXH128_hash_t is a structure, this capability is not offered by the language. + * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + +/*! + * XXH128_isEqual(): + * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/*! + * @brief Compares two @ref XXH128_hash_t + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * @return: >0 if *h128_1 > *h128_2 + * =0 if *h128_1 == *h128_2 + * <0 if *h128_1 < *h128_2 + */ +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); + + +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; + + +/*! + * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. + * + * @param dst The @ref XXH128_canonical_t pointer to be stored to. + * @param hash The @ref XXH128_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); + +/*! + * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. + * + * @param src The @ref XXH128_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); + + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ + +/*! + * @} + */ +#endif /* XXHASH_H_5627135585666179 */ + + + +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ + +/* + * These definitions are only present to allow static allocation + * of XXH states, on stack or in a struct, for example. + * Never **ever** access their members directly. + */ + +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ +struct XXH32_state_s { + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ +}; /* typedef'd to XXH32_state_t */ + + +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ +struct XXH64_state_s { + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ +}; /* typedef'd to XXH64_state_t */ + +#ifndef XXH_NO_XXH3 + +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ +/* In C++ alignas() is a keyword */ +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +/*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ +#define XXH3_INTERNALBUFFER_SIZE 256 + +/*! + * @internal + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ +#define XXH3_SECRET_DEFAULT_SIZE 192 + +/*! + * @internal + * @brief Structure for XXH3 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. + * Otherwise it is an opaque type. + * Never use this definition in combination with dynamic library. + * This allows fields to safely be changed in the future. + * + * @note ** This structure has a strict alignment requirement of 64 bytes!! ** + * Do not allocate this with `malloc()` or `new`, + * it will not be sufficiently aligned. + * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. + * + * Typedef'd to @ref XXH3_state_t. + * Do never access the members of this struct directly. + * + * @see XXH3_INITSTATE() for stack initialization. + * @see XXH3_createState(), XXH3_freeState(). + * @see XXH32_state_s, XXH64_state_s + */ +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ + XXH32_hash_t bufferedSize; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ + size_t nbStripesSoFar; + /*!< Number or stripes processed. */ + XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ + size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ + size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ + XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ + XXH64_hash_t reserved64; + /*!< Reserved field. */ + const unsigned char* extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER + +/*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret mode. + * This operation isn't necessary when the state is created with XXH3_createState(). + * Note that this doesn't prepare the state for a streaming operation, + * it's still necessary to use XXH3_NNbits_reset*() afterwards. + */ +#define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + } while(0) + + +/*! + * simple alias to pre-selected XXH3_128bits variant + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/*! + * XXH3_generateSecret(): + * + * Derive a high-entropy secret from any user-defined content, named customSeed. + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection + * than 64-bit seed, as it becomes much more difficult for an external actor to + * guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length @p secretSize into an + * already allocated buffer @p secretBuffer. + * + * The generated secret can then be used with any `*_withSecret()` variant. + * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), + * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() + * are part of this list. They all accept a `secret` parameter + * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can + * be employed to ensure proper quality. + * + * @p customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch + * of zeroes. The resulting `secret` will nonetheless provide all required qualities. + * + * @pre + * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + * + * Example code: + * @code{.c} + * #include + * #include + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Hashes argv[2] using the entropy from argv[1]. + * int main(int argc, char* argv[]) + * { + * char secret[XXH3_SECRET_SIZE_MIN]; + * if (argv != 3) { return 1; } + * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); + * XXH64_hash_t h = XXH3_64bits_withSecret( + * argv[2], strlen(argv[2]), + * secret, sizeof(secret) + * ); + * printf("%016llx\n", (unsigned long long) h); + * } + * @endcode + */ +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); + +/*! + * @brief Generate the same secret as the _withSeed() variants. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * + * Example C++ `std::string` hash class: + * @code{.cpp} + * #include + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Slow, seeds each time + * class HashSlow { + * XXH64_hash_t seed; + * public: + * HashSlow(XXH64_hash_t s) : seed{s} {} + * size_t operator()(const std::string& x) const { + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; + * } + * }; + * // Fast, caches the seeded secret for future uses. + * class HashFast { + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; + * public: + * HashFast(XXH64_hash_t s) { + * XXH3_generateSecret_fromSeed(secret, seed); + * } + * size_t operator()(const std::string& x) const { + * return size_t{ + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) + * }; + * } + * }; + * @endcode + * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes + * @param seed The seed to seed the state. + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); + +/*! + * These variants generate hash values using either + * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) + * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @p secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, via its impact to the seed. + * + * This is not guaranteed when using the secret directly in "small data" scenarios, + * because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#ifndef XXH_NO_STREAM +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +/*! @copydoc XXH3_64bits_withSecretandSeed() */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, + XXH_NOESCAPE const void* secret, size_t secretSize, + XXH64_hash_t seed64); +#endif /* !XXH_NO_STREAM */ + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION +#endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be hosted inside xxhash.c. + * + * However, inlining requires implementation to be visible to the compiler, + * hence be included alongside the header. + * Previously, implementation was hosted inside xxhash.c, + * which was then #included when inlining was activated. + * This construction created issues with a few build and install systems, + * as it required xxhash.c to be stored in /include directory. + * + * xxHash implementation is now directly integrated within xxhash.h. + * As a consequence, xxhash.c is no longer needed in /include. + * + * xxhash.c is still available and is still useful. + * In a "normal" setup, when xxhash is not inlined, + * xxhash.h only exposes the prototypes and public symbols, + * while xxhash.c can be built into an object file xxhash.o + * which can then be linked into the final binary. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ + +/*! + * @defgroup tuning Tuning parameters + * @{ + * + * Various macros to control xxHash's behavior. + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. + */ +# define XXH_NO_LONG_LONG +# undef XXH_NO_LONG_LONG /* don't actually */ +/*! + * @brief Controls how unaligned memory is accessed. + * + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers will + * eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences an + * unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's the + * only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. However, some compilers + * will emit literal byteshifts even if the target supports unaligned access. + * + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may cause + * another to read garbage data or even crash. + * + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) + */ +# define XXH_FORCE_MEMORY_ACCESS 0 + +/*! + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. + * + * xxHash, when compiled, tends to result in a rather large binary size. This + * is mostly due to heavy usage to forced inlining and constant folding of the + * @ref XXH3_family to increase performance. + * + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed + * comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies the + * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, + * and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use the + * streaming API. + */ +# define XXH_SIZE_OPT 0 + +/*! + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() + * and XXH64() only). + * + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, + * but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips + * which are platforms known to offer good unaligned memory accesses performance. + * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +# define XXH_FORCE_ALIGN_CHECK 0 + +/*! + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. + */ +# define XXH_NO_INLINE_HINTS 0 + +/*! + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). + * + * However, if the secret size is not known, it doesn't have any benefit. This + * happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. + * + * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers + * that are *sometimes* force inline on -Og, and it is impossible to automatically + * detect this optimization level. + */ +# define XXH3_INLINE_SECRET 0 + +/*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the finalizer. + * This is generally preferable for performance, + * but depending on exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ +# define XXH32_ENDJMP 0 + +/*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use this. + */ +# define XXH_OLD_NAMES +# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + +/*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, disabling + * the streaming functions can improve code size significantly, especially with + * the @ref XXH3_family which tends to make constant folded copies of itself. + */ +# define XXH_NO_STREAM +# undef XXH_NO_STREAM /* don't actually */ +#endif /* XXH_DOXYGEN */ +/*! + * @} + */ + +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy + * which for some reason does unaligned loads. */ +# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +#ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ +# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) +# define XXH_SIZE_OPT 1 +# else +# define XXH_SIZE_OPT 0 +# endif +#endif + +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ +# if XXH_SIZE_OPT >= 1 || \ + defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +#ifndef XXH_NO_INLINE_HINTS +# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +#ifndef XXH3_INLINE_SECRET +# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ + || !defined(XXH_INLINE_ALL) +# define XXH3_INLINE_SECRET 0 +# else +# define XXH3_INLINE_SECRET 1 +# endif +#endif + +#ifndef XXH32_ENDJMP +/* generally preferable for performance */ +# define XXH32_ENDJMP 0 +#endif + +/*! + * @defgroup impl Implementation + * @{ + */ + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +#if defined(XXH_NO_STREAM) +/* nothing */ +#elif defined(XXH_NO_STDLIB) + +/* When requesting to disable any mention of stdlib, + * the library loses the ability to invoked malloc / free. + * In practice, it means that functions like `XXH*_createState()` + * will always fail, and return NULL. + * This flag is useful in situations where + * xxhash.h is integrated into some kernel, embedded or limited environment + * without access to dynamic allocation. + */ + +static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } +static void XXH_free(void* p) { (void)p; } + +#else + +/* + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than malloc(). + */ +static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } + +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ +static void XXH_free(void* p) { free(p); } + +#endif /* XXH_NO_STDLIB */ + +#include + +/*! + * @internal + * @brief Modify this function to use a different routine than memcpy(). + */ +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} + +#include /* ULLONG_MAX */ + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined(_MSC_VER) /* Visual Studio */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +#else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +#endif + +#if XXH3_INLINE_SECRET +# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE +#else +# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE +#endif + + +/* ************************************* +* Debug +***************************************/ +/*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif +#endif + +#if (XXH_DEBUGLEVEL>=1) +# include /* note: can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# if defined(__INTEL_COMPILER) +# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) +# else +# define XXH_ASSERT(c) XXH_ASSUME(c) +# endif +#endif + +/* note: use after variable declarations */ +#ifndef XXH_STATIC_ASSERT +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) +# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# else +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) +# endif +# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) +#endif + +/*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ +#if defined(__GNUC__) || defined(__clang__) +# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) +#else +# define XXH_COMPILER_GUARD(var) ((void)0) +#endif + +/* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ +#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) +# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) +#else +# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) +#endif + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; +#endif +typedef XXH32_hash_t xxh_u32; + +#ifdef XXH_OLD_NAMES +# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif + +/* *** Memory access *** */ + +/*! + * @internal + * @fn xxh_u32 XXH_read32(const void* ptr) + * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit native endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32(const void* ptr) + * @brief Reads an unaligned 32-bit little endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readBE32(const void* ptr) + * @brief Reads an unaligned 32-bit big endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit big endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) + * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is + * always @ref XXH_alignment::XXH_unaligned. + * + * @param ptr The pointer to read from. + * @param align Whether @p ptr is aligned. + * @pre + * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte + * aligned. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* *** Endianness *** */ + +/*! + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. + */ +#ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +/*! + * @internal + * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. + * + * Most compilers will constant fold this. + */ +static int XXH_isLittleEndian(void) +{ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif + + + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 +#endif + + + +/* + * C23 and future versions have standard "unreachable()". + * Once it has been implemented reliably we can add it as an + * additional case: + * + * ``` + * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) + * # include + * # ifdef unreachable + * # define XXH_UNREACHABLE() unreachable() + * # endif + * #endif + * ``` + * + * Note C++23 also has std::unreachable() which can be detected + * as follows: + * ``` + * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) + * # include + * # define XXH_UNREACHABLE() std::unreachable() + * #endif + * ``` + * NB: `__cpp_lib_unreachable` is defined in the `` header. + * We don't use that as including `` in `extern "C"` blocks + * doesn't work on GCC12 + */ + +#if XXH_HAS_BUILTIN(__builtin_unreachable) +# define XXH_UNREACHABLE() __builtin_unreachable() + +#elif defined(_MSC_VER) +# define XXH_UNREACHABLE() __assume(0) + +#else +# define XXH_UNREACHABLE() +#endif + +#if XXH_HAS_BUILTIN(__builtin_assume) +# define XXH_ASSUME(c) __builtin_assume(c) +#else +# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } +#endif + +/*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) +#endif + +/*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* *************************** +* Memory reads +*****************************/ + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +typedef enum { + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ +} XXH_alignment; + +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} + +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} + + +/* ************************************* +* Misc +***************************************/ +/*! @ingroup public */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +/*! + * @} + * @defgroup XXH32_impl XXH32 implementation + * @ingroup impl + * + * Details on the XXH32 implementation. + * @{ + */ + /* #define instead of static const, to be used as initializers */ +#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ +#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ +#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ +#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ +#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif + +/*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) +{ + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; +#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing + * the loop. NEON is only faster on the A53, and with the newer cores, it is less + * than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. + */ + XXH_COMPILER_GUARD(acc); +#endif + return acc; +} + +/*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param hash The hash to avalanche. + * @return The avalanched hash. + */ +static xxh_u32 XXH32_avalanche(xxh_u32 hash) +{ + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + * @see XXH64_finalize(). + */ +static XXH_PUREF xxh_u32 +XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1 do { \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ +} while (0) + +#define XXH_PROCESS4 do { \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ +} while (0) + + if (ptr==NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { + len &= 15; + while (len >= 4) { + XXH_PROCESS4; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1; + --len; + } + return XXH32_avalanche(hash); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 8: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 4: XXH_PROCESS4; + return XXH32_avalanche(hash); + + case 13: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 9: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 5: XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 14: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 10: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 15: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 11: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 7: XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 3: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 2: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 1: XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 0: return XXH32_avalanche(hash); + } + XXH_ASSERT(0); + return hash; /* reaching this point is deemed impossible */ + } +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + xxh_u32 h32; + + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=16) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + XXH_PRIME32_5; + } + + h32 += (xxh_u32)len; + + return XXH32_finalize(h32, input, len&15, align); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) +{ +#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +#endif +} + + + +/******* Hash streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + + do { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) +{ + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v[0], 1) + + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + + XXH_rotl32(state->v[3], 18); + } else { + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! + * @ingroup XXH32_family + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * + * The canonical representation uses big endian convention, the same convention + * as human-readable numbers (large digits first). + * + * This way, hash values can be written into a file or buffer, remaining + * comparable across different systems. + * + * The following functions allow transformation of hash values to and from their + * canonical format. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ +/*! + * @} + * @ingroup impl + * @{ + */ +/******* Memory access *******/ + +typedef XXH64_hash_t xxh_u64; + +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + return *(const xxh_u64*) memPtr; +} + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually it + * can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64*)ptr); +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static xxh_u64 XXH_swap64(xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); +} + +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); +} + + +/******* xxh64 *******/ +/*! + * @} + * @defgroup XXH64_impl XXH64 implementation + * @ingroup impl + * + * Details on the XXH64 implementation. + * @{ + */ +/* #define rather that static const, to be used as initializers */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ +#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif + +/*! @copydoc XXH32_round */ +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; + return acc; +} + +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; +} + +/*! @copydoc XXH32_avalanche */ +static xxh_u64 XXH64_avalanche(xxh_u64 hash) +{ + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ +static XXH_PUREF xxh_u64 +XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ + if (ptr==NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + hash ^= k1; + hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + } + if (len >= 4) { + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + } + while (len > 0) { + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; + --len; + } + return XXH64_avalanche(hash); +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + xxh_u64 h64; + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=32) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 31; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8*)input, len); + return XXH64_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + +#endif +} + +/******* Hash Streaming *******/ +#ifndef XXH_NO_STREAM +/*! @ingroup XXH64_family*/ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; + return XXH_OK; +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + + do { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } else { + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; + } + + h64 += (xxh_u64) state->total_len; + + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); +} +#endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} + +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#ifndef XXH_NO_XXH3 + +/* ********************************************************************* +* XXH3 +* New generation hash designed for speed on small keys and vectorization +************************************************************************ */ +/*! + * @} + * @defgroup XXH3_impl XXH3 implementation + * @ingroup impl + * @{ + */ + +/* === Compiler specifics === */ + +#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ +# define XXH_RESTRICT /* disable */ +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ + || (defined (__clang__)) \ + || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ + || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) +/* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ +# define XXH_RESTRICT __restrict +#else +# define XXH_RESTRICT /* disable */ +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ + || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ + || defined(__clang__) +# define XXH_likely(x) __builtin_expect(x, 1) +# define XXH_unlikely(x) __builtin_expect(x, 0) +#else +# define XXH_likely(x) (x) +# define XXH_unlikely(x) (x) +#endif + +#ifndef XXH_HAS_INCLUDE +# ifdef __has_include +# define XXH_HAS_INCLUDE(x) __has_include(x) +# else +# define XXH_HAS_INCLUDE(x) 0 +# endif +#endif + +#if defined(__GNUC__) || defined(__clang__) +# if defined(__ARM_FEATURE_SVE) +# include +# endif +# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ + || (defined(_M_ARM) && _M_ARM >= 7) \ + || defined(_M_ARM64) || defined(_M_ARM64EC) \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ +# define inline __inline__ /* circumvent a clang bug */ +# include +# undef inline +# elif defined(__AVX2__) +# include +# elif defined(__SSE2__) +# include +# endif +#endif + +#if defined(_MSC_VER) +# include +#endif + +/* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because the + * shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the lower + * 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and therefore, + * we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and 64-bit + * platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one + * notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand allowing free + * shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we will + * emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably need + * to specify -march, as you likely meant to compile for a newer architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ + +#ifdef XXH_DOXYGEN +/*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the best + * implementation. + */ +# define XXH_VECTOR XXH_SCALAR +/*! + * @ingroup tuning + * @brief Possible values for @ref XXH_VECTOR. + * + * Note that these are actually implemented as macros. + * + * If this is not defined, it is detected automatically. + * internal macro XXH_X86DISPATCH overrides this. + */ +enum XXH_VECTOR_TYPE /* fake enum */ { + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ +}; +/*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment required for said vector + * type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ +# define XXH_ACC_ALIGN 8 +#endif + +/* Actual definition */ +#ifndef XXH_DOXYGEN +# define XXH_SCALAR 0 +# define XXH_SSE2 1 +# define XXH_AVX2 2 +# define XXH_AVX512 3 +# define XXH_NEON 4 +# define XXH_VSX 5 +# define XXH_SVE 6 +#endif + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if defined(__ARM_FEATURE_SVE) +# define XXH_VECTOR XXH_SVE +# elif ( \ + defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ + ) && ( \ + defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ + ) +# define XXH_VECTOR XXH_NEON +# elif defined(__AVX512F__) +# define XXH_VECTOR XXH_AVX512 +# elif defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ + || (defined(__s390x__) && defined(__VEC__)) \ + && defined(__GNUC__) /* TODO: IBM XL */ +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ +#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) +# ifdef _MSC_VER +# pragma warning(once : 4606) +# else +# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." +# endif +# undef XXH_VECTOR +# define XXH_VECTOR XXH_SCALAR +#endif + +/* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ +#ifndef XXH_ACC_ALIGN +# if defined(XXH_X86DISPATCH) +# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ +# elif XXH_VECTOR == XXH_SCALAR /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == XXH_NEON /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_VSX /* vsx */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ +# define XXH_ACC_ALIGN 64 +# elif XXH_VECTOR == XXH_SVE /* sve */ +# define XXH_ACC_ALIGN 64 +# endif +#endif + +#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ + || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#elif XXH_VECTOR == XXH_SVE +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#else +# define XXH_SEC_ALIGN 8 +#endif + +#if defined(__GNUC__) || defined(__clang__) +# define XXH_ALIASING __attribute__((may_alias)) +#else +# define XXH_ALIASING /* nothing */ +#endif + +/* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling resulting + * in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which + * only applies to Sandy and Ivy Bridge... which don't even support AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use either + * -O2 -mavx2 -march=haswell + * or + * -O2 -mavx2 -mno-avx256-split-unaligned-load + * for decent performance, or to use Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC into + * -O2, but the other one we can't control without "failed to inline always + * inline function due to target mismatch" warnings. + */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ +# pragma GCC push_options +# pragma GCC optimize("-O2") +#endif + +#if XXH_VECTOR == XXH_NEON + +/* + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 + * optimizes out the entire hashLong loop because of the aliasing violation. + * + * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, + * so the only option is to mark it as aliasing. + */ +typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; + +/*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. + * + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only + * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). + * + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it + * prohibits load-store optimizations. Therefore, a direct dereference is used. + * + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe + * unaligned load. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ +{ + return *(xxh_aliasing_uint64x2_t const *)ptr; +} +#else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) +{ + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); +} +#endif + +/*! + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. + * + * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with + * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` + * with `vmlal_u32`. + */ +#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); + return acc; +} +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); +} +#else +/* Portable intrinsic versions */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); +} +/*! @copydoc XXH_vmlal_low_u32 + * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ +XXH_FORCE_INLINE uint64x2_t +XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) +{ + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); +} +#endif + +/*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 + * + * This can be set to 2, 4, 6, or 8. + * + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. + * + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those + * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU + * bandwidth. + * + * This is even more noticeable on the more advanced cores like the Cortex-A76 which + * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. + * + * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes + * and 2 scalar lanes, which is chosen by default. + * + * This does not apply to Apple processors or 32-bit processors, which run better with + * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. + * + * This change benefits CPUs with large micro-op buffers without negatively affecting + * most other CPUs: + * + * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | + * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | + * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | + * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | + * + * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. + * + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning + * it effectively becomes worse 4. + * + * @see XXH3_accumulate_512_neon() + */ +# ifndef XXH3_NEON_LANES +# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ + && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 +# define XXH3_NEON_LANES 6 +# else +# define XXH3_NEON_LANES XXH_ACC_NB +# endif +# endif +#endif /* XXH_VECTOR == XXH_NEON */ + +/* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ +#if XXH_VECTOR == XXH_VSX +/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, + * and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ +# pragma push_macro("bool") +# pragma push_macro("vector") +# pragma push_macro("pixel") +/* silence potential macro redefined warnings */ +# undef bool +# undef vector +# undef pixel + +# if defined(__s390x__) +# include +# else +# include +# endif + +/* Restore the original macro values, if applicable. */ +# pragma pop_macro("pixel") +# pragma pop_macro("vector") +# pragma pop_macro("bool") + +typedef __vector unsigned long long xxh_u64x2; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; + +/* + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. + */ +typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; + +# ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +# endif /* !defined(XXH_VSX_BE) */ + +# if XXH_VSX_BE +# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) +# define XXH_vec_revb vec_revb +# else +/*! + * A polyfill for POWER9's vec_revb(). + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) +{ + xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +# endif +# endif /* XXH_VSX_BE */ + +/*! + * Performs an unaligned vector load and byte swaps it on big endian. + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) +{ + xxh_u64x2 ret; + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); +# if XXH_VSX_BE + ret = XXH_vec_revb(ret); +# endif + return ret; +} + +/* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a while, + * and they are endian dependent. Also, their meaning swap depending on version. + * */ +# if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ +# define XXH_vec_mulo vec_mulo +# define XXH_vec_mule vec_mule +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) +/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +# else +/* gcc needs inline assembly */ +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +# endif /* XXH_vec_mulo, XXH_vec_mule */ +#endif /* XXH_VECTOR == XXH_VSX */ + +#if XXH_VECTOR == XXH_SVE +#define ACCRND(acc, offset) \ +do { \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ +} while (0) +#endif /* XXH_VECTOR == XXH_SVE */ + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if XXH_SIZE_OPT >= 1 +# define XXH_PREFETCH(ptr) (void)(ptr) +# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + + +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +/*! Pseudorandom secret taken directly from FARSH. */ +XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + +static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ +static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ + +#ifdef XXH_OLD_NAMES +# define kSecret XXH3_kSecret +#endif + +#ifdef XXH_DOXYGEN +/*! + * @brief Calculates a 32-bit to 64-bit long multiply. + * + * Implemented as a macro. + * + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't + * need to (but it shouldn't need to anyways, it is about 7 instructions to do + * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we + * use that instead of the normal method. + * + * If you are compiling for platforms like Thumb-1 and don't have a better option, + * you may also want to write your own long multiply routine here. + * + * @param x, y Numbers to be multiplied + * @return 64-bit product of the low 32 bits of @p x and @p y. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64(xxh_u64 x, xxh_u64 y) +{ + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +} +#elif defined(_MSC_VER) && defined(_M_IX86) +# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) +#else +/* + * Downcast + upcast is usually better than masking on older compilers like + * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands + * and perform a full 64x64 multiply -- entirely redundant on 32-bit. + */ +# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) +#endif + +/*! + * @brief Calculates a 64->128-bit long multiply. + * + * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar + * version. + * + * @param lhs , rhs The 64-bit integers to be multiplied + * @return The 128-bit result represented in an @ref XXH128_hash_t. + */ +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this type + * despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; + + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. + */ +#elif defined(_M_ARM64) || defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(__umulh) +#endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; +#endif +} + +/*! + * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. + * + * The reason for the separate function is to prevent passing too many structs + * around by value. This will hopefully inline the multiply, but we don't force it. + * + * @param lhs , rhs The 64-bit integers to multiply + * @return The low 64 bits of the product XOR'd by the high 64 bits. + * @see XXH_mult64to128() + */ +static xxh_u64 +XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + +/*! Seems to produce slightly better code on GCC for some reason. */ +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +{ + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); +} + +/* + * This is a fast avalanche stage, + * suitable when input bits are already partially mixed + */ +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) +{ + h64 = XXH_xorshift64(h64, 37); + h64 *= PRIME_MX1; + h64 = XXH_xorshift64(h64, 32); + return h64; +} + +/* + * This is a stronger avalanche, + * inspired by Pelle Evensen's rrmxmx + * preferable when input has not been previously mixed + */ +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) +{ + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= PRIME_MX2; + h64 ^= (h64 >> 35) + len ; + h64 *= PRIME_MX2; + return XXH_xorshift64(h64, 28); +} + + +/* ========================================== + * Short keys + * ========================================== + * One of the shortcomings of XXH32 and XXH64 was that their performance was + * sub-optimal on short lengths. It used an iterative algorithm which strongly + * favored lengths that were a multiple of 4 or 8. + * + * Instead of iterating over individual inputs, we use a set of single shot + * functions which piece together a range of lengths and operate in constant time. + * + * Additionally, the number of multiplies has been significantly reduced. This + * reduces latency, especially when emulating 64-bit multiplies on 32-bit. + * + * Depending on the platform, this may or may not be faster than XXH32, but it + * is almost guaranteed to be faster than XXH64. + */ + +/* + * At very short lengths, there isn't enough input to fully hide secrets, or use + * the entire secret. + * + * There is also only a limited amount of mixing we can do before significantly + * impacting performance. + * + * Therefore, we use different sections of the secret and always mix two secret + * samples with an XOR. This should have no effect on performance on the + * seedless or withSeed variants because everything _should_ be constant folded + * by modern compilers. + * + * The XOR mixing hides individual parts of the secret and increases entropy. + * + * This adds an extra layer of strength for custom secrets. + */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; + xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); + } +} + +/* + * DISCLAIMER: There are known *seed-dependent* multicollisions here due to + * multiplication by zero, affecting hashes of lengths 17 to 240. + * + * However, they are very unlikely. + * + * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all + * unseeded non-cryptographic hashes, it does not attempt to defend itself + * against specially crafted inputs, only random inputs. + * + * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes + * cancelling out the secret is taken an arbitrary number of times (addressed + * in XXH3_accumulate_512), this collision is very unlikely with random inputs + * and/or proper seeding: + * + * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a + * function that is only called up to 16 times per hash with up to 240 bytes of + * input. + * + * This is not too bad for a non-cryptographic hash function, especially with + * only 64 bit outputs. + * + * The 128-bit variant (which trades some speed for strength) is NOT affected + * by this, although it is always a good idea to use a proper seed if you care + * about strength. + */ +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ +#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + XXH_COMPILER_GUARD(seed64); +#endif + { xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) + ); + } +} + +/* For mid range keys, XXH3 uses a Mum-hash variant. */ +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * XXH_PRIME64_1; +#if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); + acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); + } while (i-- != 0); +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3_mix16B(input+48, secret+96, seed); + acc += XXH3_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3_mix16B(input+32, secret+64, seed); + acc += XXH3_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3_mix16B(input+16, secret+32, seed); + acc += XXH3_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3_mix16B(input+0, secret+0, seed); + acc += XXH3_mix16B(input+len-16, secret+16, seed); +#endif + return XXH3_avalanche(acc); + } +} + +#define XXH3_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * XXH_PRIME64_1; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + for (i=0; i<8; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); + } + /* last bytes */ + acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); +#if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. + * In everywhere else, it uses scalar code. + * + * For 64->128-bit multiplies, even if the NEON was 100% optimal, it + * would still be slower than UMAAL (see XXH_mult64to128). + * + * Unfortunately, Clang doesn't handle the long multiplies properly and + * converts them to the nonexistent "vmulq_u64" intrinsic, which is then + * scalarized into an ugly mess of VMOV.32 instructions. + * + * This mess is difficult to avoid without turning autovectorization + * off completely, but they are usually relatively minor and/or not + * worth it to fix. + * + * This loop is the easiest to fix, as unlike XXH32, this pragma + * _actually works_ because it is a loop vectorization instead of an + * SLP vectorization. + */ + #pragma clang loop vectorize(disable) +#endif + for (i=8 ; i < nbRounds; i++) { + /* + * Prevents clang for unrolling the acc loop and interleaving with this one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } + return XXH3_avalanche(acc + acc_end); + } +} + + +/* ======= Long Keys ======= */ + +#define XXH_STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + +#ifdef XXH_OLD_NAMES +# define STRIPE_LEN XXH_STRIPE_LEN +# define ACC_NB XXH_ACC_NB +#endif + +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_() and it calls + * XXH3_accumulate_512_(). + * + * It may be useful to hand implement this function if the compiler fails to + * optimize the inline function. + */ +#define XXH3_ACCUMULATE_TEMPLATE(name) \ +void \ +XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ + const xxh_u8* XXH_RESTRICT input, \ + const xxh_u8* XXH_RESTRICT secret, \ + size_t nbStripes) \ +{ \ + size_t n; \ + for (n = 0; n < nbStripes; n++ ) { \ + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name( \ + acc, \ + in, \ + secret + n*XXH_SECRET_CONSUME_RATE); \ + } \ +} + + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + XXH_memcpy(dst, &v64, sizeof(v64)); +} + +/* Several intrinsic functions below are supposed to accept __int64 as argument, + * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) + typedef int64_t xxh_i64; +#else + /* the following type must have a width of 64-bit */ + typedef long long xxh_i64; +#endif + + +/* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as the product. + * + * This means that in the (relatively likely) case of a multiply by zero, the + * original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together in + * the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +#if (XXH_VECTOR == XXH_AVX512) \ + || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) + +#ifndef XXH_TARGET_AVX512 +# define XXH_TARGET_AVX512 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + __m512i* const xacc = (__m512i *) acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + + { + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512 (input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + } +} +XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) + +/* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into the + * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid + * extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { __m512i* const xacc = (__m512i*) acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); + __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); + + const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); + __m512i* const dest = ( __m512i*) customSecret; + int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); + for (i=0; i < nbRounds; ++i) { + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_AVX2) \ + || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) + +#ifndef XXH_TARGET_AVX2 +# define XXH_TARGET_AVX2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xinput = (const __m256i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); + + const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); + __m256i* dest = ( __m256i*) customSecret; + +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dest); +# endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); + } +} + +#endif + +/* x86dispatch always generates SSE2 */ +#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_SSE2 +# define XXH_TARGET_SSE2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xinput = (const __m128i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + } } +} +XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + +# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; + __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); +# else + __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); +# endif + int i; + + const void* const src16 = XXH3_kSecret; + __m128i* dst16 = (__m128i*) customSecret; +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dst16); +# endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); + + for (i=0; i < nbRounds; ++i) { + dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_NEON) + +/* forward declarations for the scalar routines */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, size_t lane); + +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, size_t lane); + +/*! + * @internal + * @brief The bulk processing loop for NEON and WASM SIMD128. + * + * The NEON code path is actually partially scalar when running on AArch64. This + * is to optimize the pipelining and can have up to 15% speedup depending on the + * CPU, and it also mitigates some GCC codegen issues. + * + * @see XXH3_NEON_LANES for configuring this and details about this optimization. + * + * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit + * integers instead of the other platforms which mask full 64-bit vectors, + * so the setup is more complicated than just shifting right. + * + * Additionally, there is an optimization for 4 lanes at once noted below. + * + * Since, as stated, the most optimal amount of lanes for Cortexes is 6, + * there needs to be *three* versions of the accumulate operation used + * for the remaining 2 lanes. + * + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap + * nearly perfectly. + */ + +XXH_FORCE_INLINE void +XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* xinput = (const uint8_t *) input; + uint8_t const* xsecret = (const uint8_t *) secret; + + size_t i; +#ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); +#endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } + i = 0; + /* 4 NEON lanes at a time. */ + for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32( + vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2) + ); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for most + * widening intrinsics, have a variant that works on both high half vectors + * for free on AArch64. A similar instruction is available on SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); + } + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64 (xacc[i], sum); + } + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; + uint8_t const* xsecret = (uint8_t const*) secret; + + size_t i; + /* WASM uses operator overloads and doesn't need these. */ +#ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); +#endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } + for (i=0; i < XXH3_NEON_LANES / 2; i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); + uint64x2_t data_vec = veorq_u64(acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* xacc[i] *= XXH_PRIME32_1 */ +#ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; +#else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector + * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits + * and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); +#endif + } + } +} +#endif + +#if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* presumed aligned */ + xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ + xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ + xxh_u64x2 const v32 = { 32, 32 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = xacc[i]; + acc_vec += product; + + /* swap high and low halves */ +#ifdef __s390x__ + acc_vec += vec_permi(data_vec, data_vec, 2); +#else + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); +#endif + xacc[i] = acc_vec; + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; + const xxh_u8* const xsecret = (const xxh_u8*) secret; + /* constants */ + xxh_u64x2 const v32 = { 32, 32 }; + xxh_u64x2 const v47 = { 47, 47 }; + xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } +} + +XXH_FORCE_INLINE void +XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes) +{ + if (nbStripes != 0) { + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(vacc, 0); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + } else if (element_count == 2) { /* sve128 */ + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + xinput += 8; + xsecret += 1; + nbStripes--; + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + } + } +} + +#endif + +/* scalar variants - universal */ + +#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +/* + * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they + * emit an excess mask and a full 64-bit multiply-add (MADD X-form). + * + * While this might not seem like much, as AArch64 is a 64-bit architecture, only + * big Cortex designs have a full 64-bit multiplier. + * + * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit + * multiplies expand to 2-3 multiplies in microcode. This has a major penalty + * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. + * + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does + * not have this penalty and does the mask automatically. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); + return ret; +} +#else +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) +{ + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; +} +#endif + +/*! + * @internal + * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* xacc = (xxh_u64*) acc; + xxh_u8 const* xinput = (xxh_u8 const*) input; + xxh_u8 const* xsecret = (xxh_u8 const*) secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + { + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); + } +} + +/*! + * @internal + * @brief Processes a 64 byte block of data using the scalar path. + */ +XXH_FORCE_INLINE void +XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + size_t i; + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__arm__) || defined(__thumb2__)) \ + && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ + && XXH_SIZE_OPT <= 0 +# pragma GCC unroll 8 +#endif + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } +} +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) + +/*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + XXH_ASSERT(lane < XXH_ACC_NB); + { + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[lane] = acc64; + } +} + +/*! + * @internal + * @brief Scrambles the accumulators after a large chunk has been read + */ +XXH_FORCE_INLINE void +XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + size_t i; + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } +} + +XXH_FORCE_INLINE void +XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8* kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + +#if defined(__GNUC__) && defined(__aarch64__) + /* + * UGLY HACK: + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes the compiler to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + XXH_COMPILER_GUARD(kSecretPtr); +#endif + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i=0; i < nbRounds; i++) { + /* + * The asm hack causes the compiler to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; + XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); + XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); + } } +} + + +typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); +typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); +typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + + +#if (XXH_VECTOR == XXH_AVX512) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_accumulate XXH3_accumulate_avx512 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + +#elif (XXH_VECTOR == XXH_AVX2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_accumulate XXH3_accumulate_avx2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + +#elif (XXH_VECTOR == XXH_SSE2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_accumulate XXH3_accumulate_sse2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + +#elif (XXH_VECTOR == XXH_NEON) + +#define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_accumulate XXH3_accumulate_neon +#define XXH3_scrambleAcc XXH3_scrambleAcc_neon +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_VSX) + +#define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_accumulate XXH3_accumulate_vsx +#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_SVE) +#define XXH3_accumulate_512 XXH3_accumulate_512_sve +#define XXH3_accumulate XXH3_accumulate_sve +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#else /* scalar */ + +#define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_accumulate XXH3_accumulate_scalar +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#endif + +#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ +# undef XXH3_initCustomSecret +# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar +#endif + +XXH_FORCE_INLINE void +XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); + + /* last stripe */ + { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ + XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); +#if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + XXH_COMPILER_GUARD(result64); +#endif + } + + return XXH3_avalanche(result64); +} + +#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, + const void* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator */ +#define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); +} + +/* + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction cache. + * Note that inside this no_inline function, we do inline the internal loop, + * and provide a statically defined secret size to allow optimization of vector loop. + */ +XXH_NO_INLINE XXH_PUREF XXH64_hash_t +XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * XXH3_hashLong_64b_withSeed(): + * Generate a custom key based on alteration of default XXH3_kSecret with the seed, + * and then use this key for long mode hashing. + * + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + * + * It's important for performance that XXH3_hashLong is not inlined. Not sure + * why (uop cache maybe?), but the difference is large and easily measurable. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, + XXH64_hash_t seed, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ +#if XXH_SIZE_OPT <= 0 + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); +#endif + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + + +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong64_f f_hashLong) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); +} + + +/* === Public entry point === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) +{ + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) +{ + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); +} + + +/* === XXH3 streaming === */ +#ifndef XXH_NO_STREAM +/* + * Malloc's a pointer that is always aligned to align. + * + * This must be freed with `XXH_alignedFree()`. + * + * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte + * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 + * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. + * + * This underalignment previously caused a rather obvious crash which went + * completely unnoticed due to XXH3_createState() not actually being tested. + * Credit to RedSpah for noticing this bug. + * + * The alignment is done manually: Functions like posix_memalign or _mm_malloc + * are avoided: To maintain portability, we would have to write a fallback + * like this anyways, and besides, testing for the existence of library + * functions without relying on external build tools is impossible. + * + * The method is simple: Overallocate, manually align, and store the offset + * to the original behind the returned pointer. + * + * Align must be a power of 2 and 8 <= align <= 128. + */ +static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) +{ + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); + if (base != NULL) { + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8* ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + return NULL; + } +} +/* + * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass + * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. + */ +static void XXH_alignedFree(void* p) +{ + if (p != NULL) { + xxh_u8* ptr = (xxh_u8*)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8* base = ptr - offset; + XXH_free(base); + } +} +/*! @ingroup XXH3_family */ +/*! + * @brief Allocate an @ref XXH3_state_t. + * + * Must be freed with XXH3_freeState(). + * @return An allocated XXH3_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) +{ + XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state==NULL) return NULL; + XXH3_INITSTATE(state); + return state; +} + +/*! @ingroup XXH3_family */ +/*! + * @brief Frees an @ref XXH3_state_t. + * + * Must be allocated with XXH3_createState(). + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) +{ + XXH_alignedFree(statePtr); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) +{ + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char*)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->useSeed = (seed != 0); + statePtr->extSecret = (const unsigned char*)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_64bits_reset(statePtr); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) +{ + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; +} + +/*! + * @internal + * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). + * + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. + * + * @param acc Pointer to the 8 accumulator lanes + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* + * @param nbStripesPerBlock Number of stripes in a block + * @param input Input pointer + * @param nbStripes Number of stripes to process + * @param secret Secret pointer + * @param secretLimit Offset of the last block in @p secret + * @param f_acc Pointer to an XXH3_accumulate implementation + * @param f_scramble Pointer to an XXH3_scrambleAcc implementation + * @return Pointer past the end of @p input after processing + */ +XXH_FORCE_INLINE const xxh_u8 * +XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, + size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, + const xxh_u8* XXH_RESTRICT input, size_t nbStripes, + const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + } while (nbStripes >= nbStripesPerBlock); + *nbStripesSoFarPtr = 0; + } + /* Process a partial block */ + if (nbStripes > 0) { + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; + *nbStripesSoFarPtr += nbStripes; + } + /* Return end pointer */ + return input; +} + +#ifndef XXH3_STREAM_USE_STACK +# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ +# define XXH3_STREAM_USE_STACK 1 +# endif +#endif +/* + * Both XXH3_64bits_update and XXH3_128bits_update use this routine. + */ +XXH_FORCE_INLINE XXH_errorcode +XXH3_update(XXH3_state_t* XXH_RESTRICT const state, + const xxh_u8* XXH_RESTRICT input, size_t len, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + XXH_ASSERT(state != NULL); + { const xxh_u8* const bEnd = input + len; + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); +#else + xxh_u64* XXH_RESTRICT const acc = state->acc; +#endif + state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); + + /* small input : just fill in tmp buffer */ + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc, f_scramble); + state->bufferedSize = 0; + } + XXH_ASSERT(input < bEnd); + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + input = XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, nbStripes, + secret, state->secretLimit, + f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + + } + /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + XXH_memcpy(state->acc, acc, sizeof(acc)); +#endif + } + + return XXH_OK; +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate, XXH3_scrambleAcc); +} + + +XXH_FORCE_INLINE void +XXH3_digest_long (XXH64_hash_t* acc, + const XXH3_state_t* state, + const unsigned char* secret) +{ + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8* lastStripePtr; + + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + XXH_memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + /* Consume remaining stripes then point to remaining data in buffer */ + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, + secret, state->secretLimit, + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; + } else { /* bufferedSize < XXH_STRIPE_LEN */ + /* Copy to temp buffer */ + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + lastStripePtr = lastStripe; + } + /* Last stripe */ + XXH3_accumulate_512(acc, + lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + } + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->useSeed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ + + +/* ========================================== + * XXH3 128 bits (a.k.a XXH128) + * ========================================== + * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, + * even without counting the significantly larger output size. + * + * For example, extra steps are taken to avoid the seed-dependent collisions + * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). + * + * This strength naturally comes at the cost of some speed, especially on short + * lengths. Note that longer hashes are about as fast as the 64-bit version + * due to it using only a slight modification of the 64-bit loop. + * + * XXH128 is also more oriented towards 64-bit machines. It is still extremely + * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). + */ + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= PRIME_MX2; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + } +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; + xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + } else { + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; + + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + } } +} + +/* + * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN + */ +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t h128; + xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); + xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche( seed ^ bitfliph); + return h128; + } } +} + +/* + * A bit slower than XXH3_mix16B, but handles multiply by zero better. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, + const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + +#if XXH_SIZE_OPT >= 1 + { + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); + } while (i-- != 0); + } +#else + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); +#endif + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { XXH128_hash_t acc; + unsigned i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + i - 32, + seed); + } + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i=160; i <= len; i += 32) { + acc = XXH128_mix32B(acc, + input + i - 32, + input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, + seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, + input + len - 16, + input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + (XXH64_hash_t)0 - seed); + + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + secretSize + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + } +} + +/* + * It's important for performance that XXH3_hashLong() is not inlined. + */ +XXH_NO_INLINE XXH_PUREF XXH128_hash_t +XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_accumulate, XXH3_scrambleAcc); +} + +/* + * It's important for performance to pass @p secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. + * + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE + * breaks -Og, this is XXH_NO_INLINE. + */ +XXH3_WITH_SECRET_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, + XXH3_accumulate, XXH3_scrambleAcc); +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), + f_acc, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const void* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_128bits_internal(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} + + +/* === Public XXH128 API === */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_128bits_internal(input, len, 0, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_128bits_internal(input, len, 0, + (const xxh_u8*)secret, secretSize, + XXH3_hashLong_128b_withSecret); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_internal(input, len, seed, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ +#ifndef XXH_NO_STREAM +/* + * All initialization and update functions are identical to 64-bit streaming variant. + * The only difference is the finalization routine. + */ + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) +{ + return XXH3_64bits_reset(statePtr); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) +{ + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSeed(statePtr, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) +{ + return XXH3_64bits_update(state, input, len); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } + } + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} +#endif /* !XXH_NO_STREAM */ +/* 128-bit utility functions */ + +#include /* memcmp, memcpy */ + +/* return : 1 is equal, 0 if different */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * @return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + + + +/* ========================================== + * Secret generators + * ========================================== + */ +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) +{ + XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); + XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) +{ +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); +#else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; +#endif + + if (customSeedSize == 0) { + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + } +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); +#else + if (customSeed == NULL) return XXH_ERROR; +#endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { size_t pos = 0; + while (pos < secretSize) { + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char*)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + } } + + { size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n=0; n