diff --git a/.gitattributes b/.gitattributes index 2d202d418aa2a619cf9a0c7acc16499bec273cb3..32089612b3a001426427536dac63755bcd5f3363 100644 --- a/.gitattributes +++ b/.gitattributes @@ -76,3 +76,5 @@ llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs d llmeval-env/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h new file mode 100644 index 0000000000000000000000000000000000000000..562b7c1808ec12b933a76586b7bd316a9014be5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/config.h" // IWYU pragma: export + +#include "arrow/filesystem/filesystem.h" // IWYU pragma: export +#ifdef ARROW_AZURE +#include "arrow/filesystem/azurefs.h" // IWYU pragma: export +#endif +#ifdef ARROW_GCS +#include "arrow/filesystem/gcsfs.h" // IWYU pragma: export +#endif +#include "arrow/filesystem/hdfs.h" // IWYU pragma: export +#include "arrow/filesystem/localfs.h" // IWYU pragma: export +#include "arrow/filesystem/mockfs.h" // IWYU pragma: export +#ifdef ARROW_S3 +#include "arrow/filesystem/s3fs.h" // IWYU pragma: export +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h new file mode 100644 index 0000000000000000000000000000000000000000..350014954f056055f8e29ed2d1660ef717e1e8c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h @@ -0,0 +1,358 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/macros.h" +#include "arrow/util/uri.h" + +namespace Azure::Core::Credentials { +class TokenCredential; +} + +namespace Azure::Storage { +class StorageSharedKeyCredential; +} + +namespace Azure::Storage::Blobs { +class BlobServiceClient; +} + +namespace Azure::Storage::Files::DataLake { +class DataLakeFileSystemClient; +class DataLakeServiceClient; +} // namespace Azure::Storage::Files::DataLake + +namespace arrow::fs { + +class TestAzureFileSystem; +class TestAzureOptions; + +/// Options for the AzureFileSystem implementation. +/// +/// By default, authentication is handled by the Azure SDK's credential chain +/// which may read from multiple environment variables, such as: +/// - `AZURE_TENANT_ID` +/// - `AZURE_CLIENT_ID` +/// - `AZURE_CLIENT_SECRET` +/// - `AZURE_AUTHORITY_HOST` +/// - `AZURE_CLIENT_CERTIFICATE_PATH` +/// - `AZURE_FEDERATED_TOKEN_FILE` +/// +/// Functions are provided for explicit configuration of credentials if that is preferred. +struct ARROW_EXPORT AzureOptions { + friend class TestAzureOptions; + + /// \brief The name of the Azure Storage Account being accessed. + /// + /// All service URLs will be constructed using this storage account name. + /// `ConfigureAccountKeyCredential` assumes the user wants to authenticate + /// this account. + std::string account_name; + + /// \brief hostname[:port] of the Azure Blob Storage Service. + /// + /// If the hostname is a relative domain name (one that starts with a '.'), then storage + /// account URLs will be constructed by prepending the account name to the hostname. + /// If the hostname is a fully qualified domain name, then the hostname will be used + /// as-is and the account name will follow the hostname in the URL path. + /// + /// Default: ".blob.core.windows.net" + std::string blob_storage_authority = ".blob.core.windows.net"; + + /// \brief hostname[:port] of the Azure Data Lake Storage Gen 2 Service. + /// + /// If the hostname is a relative domain name (one that starts with a '.'), then storage + /// account URLs will be constructed by prepending the account name to the hostname. + /// If the hostname is a fully qualified domain name, then the hostname will be used + /// as-is and the account name will follow the hostname in the URL path. + /// + /// Default: ".dfs.core.windows.net" + std::string dfs_storage_authority = ".dfs.core.windows.net"; + + /// \brief Azure Blob Storage connection transport. + /// + /// Default: "https" + std::string blob_storage_scheme = "https"; + + /// \brief Azure Data Lake Storage Gen 2 connection transport. + /// + /// Default: "https" + std::string dfs_storage_scheme = "https"; + + // TODO(GH-38598): Add support for more auth methods. + // std::string connection_string; + // std::string sas_token; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + private: + enum class CredentialKind { + kDefault, + kAnonymous, + kStorageSharedKey, + kClientSecret, + kManagedIdentity, + kWorkloadIdentity, + } credential_kind_ = CredentialKind::kDefault; + + std::shared_ptr + storage_shared_key_credential_; + mutable std::shared_ptr token_credential_; + + public: + AzureOptions(); + ~AzureOptions(); + + private: + void ExtractFromUriSchemeAndHierPart(const Uri& uri, std::string* out_path); + Status ExtractFromUriQuery(const Uri& uri); + + public: + /// \brief Construct a new AzureOptions from an URI. + /// + /// Supported formats: + /// + /// 1. abfs[s]://[:\@]\.blob.core.windows.net + /// [/\[/\]] + /// 2. abfs[s]://\[:\]@\.dfs.core.windows.net + /// [/path] + /// 3. abfs[s]://[\]@]\[\<:port\>] + /// [/\[/path]] + /// 4. abfs[s]://[\]@]\[/path] + /// + /// 1. and 2. are compatible with the Azure Data Lake Storage Gen2 URIs: + /// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri + /// + /// 3. is for Azure Blob Storage compatible service including Azurite. + /// + /// 4. is a shorter version of 1. and 2. + /// + /// Note that there is no difference between abfs and abfss. HTTPS is + /// used with abfs by default. You can force to use HTTP by specifying + /// "enable_tls=false" query. + /// + /// Supported query parameters: + /// + /// * blob_storage_authority: Set AzureOptions::blob_storage_authority + /// * dfs_storage_authority: Set AzureOptions::dfs_storage_authority + /// * enable_tls: If it's "false" or "0", HTTP not HTTPS is used. + /// * credential_kind: One of "default", "anonymous", + /// "workload_identity". If "default" is specified, it's just + /// ignored. If "anonymous" is specified, + /// AzureOptions::ConfigureAnonymousCredential() is called. If + /// "workload_identity" is specified, + /// AzureOptions::ConfigureWorkloadIdentityCredential() is called. + /// * tenant_id: You must specify "client_id" and "client_secret" + /// too. AzureOptions::ConfigureClientSecretCredential() is called. + /// * client_id: If you don't specify "tenant_id" and + /// "client_secret", + /// AzureOptions::ConfigureManagedIdentityCredential() is + /// called. If you specify "tenant_id" and "client_secret" too, + /// AzureOptions::ConfigureClientSecretCredential() is called. + /// * client_secret: You must specify "tenant_id" and "client_id" + /// too. AzureOptions::ConfigureClientSecretCredential() is called. + static Result FromUri(const Uri& uri, std::string* out_path); + static Result FromUri(const std::string& uri, std::string* out_path); + + Status ConfigureDefaultCredential(); + Status ConfigureAnonymousCredential(); + Status ConfigureAccountKeyCredential(const std::string& account_key); + Status ConfigureClientSecretCredential(const std::string& tenant_id, + const std::string& client_id, + const std::string& client_secret); + Status ConfigureManagedIdentityCredential(const std::string& client_id = std::string()); + Status ConfigureWorkloadIdentityCredential(); + + bool Equals(const AzureOptions& other) const; + + std::string AccountBlobUrl(const std::string& account_name) const; + std::string AccountDfsUrl(const std::string& account_name) const; + + Result> + MakeBlobServiceClient() const; + + Result> + MakeDataLakeServiceClient() const; +}; + +/// \brief FileSystem implementation backed by Azure Blob Storage (ABS) [1] and +/// Azure Data Lake Storage Gen2 (ADLS Gen2) [2]. +/// +/// ADLS Gen2 isn't a dedicated service or account type. It's a set of capabilities that +/// support high throughput analytic workloads, built on Azure Blob Storage. All the data +/// ingested via the ADLS Gen2 APIs is persisted as blobs in the storage account. +/// ADLS Gen2 provides filesystem semantics, file-level security, and Hadoop +/// compatibility. ADLS Gen1 exists as a separate object that will retired on 2024-02-29 +/// and new ADLS accounts use Gen2 instead. +/// +/// ADLS Gen2 and Blob APIs can operate on the same data, but there are +/// some limitations [3]. The ones that are relevant to this +/// implementation are listed here: +/// +/// - You can't use Blob APIs, and ADLS APIs to write to the same instance of a file. If +/// you write to a file by using ADLS APIs then that file's blocks won't be visible +/// to calls to the GetBlockList Blob API. The only exception is when you're +/// overwriting. +/// - When you use the ListBlobs operation without specifying a delimiter, the results +/// include both directories and blobs. If you choose to use a delimiter, use only a +/// forward slash (/) -- the only supported delimiter. +/// - If you use the DeleteBlob API to delete a directory, that directory is deleted only +/// if it's empty. This means that you can't use the Blob API delete directories +/// recursively. +/// +/// [1]: https://azure.microsoft.com/en-us/products/storage/blobs +/// [2]: https://azure.microsoft.com/en-us/products/storage/data-lake-storage +/// [3]: +/// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-known-issues +class ARROW_EXPORT AzureFileSystem : public FileSystem { + private: + class Impl; + std::unique_ptr impl_; + + explicit AzureFileSystem(std::unique_ptr&& impl); + + friend class TestAzureFileSystem; + void ForceCachedHierarchicalNamespaceSupport(int hns_support); + + public: + ~AzureFileSystem() override = default; + + static Result> Make( + const AzureOptions& options, const io::IOContext& = io::default_io_context()); + + std::string type_name() const override { return "abfs"; } + + /// Return the original Azure options when constructing the filesystem + const AzureOptions& options() const; + + bool Equals(const FileSystem& other) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + /// \brief Delete a directory and its contents recursively. + /// + /// Atomicity is guaranteed only on Hierarchical Namespace Storage accounts. + Status DeleteDir(const std::string& path) override; + + /// \brief Non-atomically deletes the contents of a directory. + /// + /// This function can return a bad Status after only partially deleting the + /// contents of the directory. + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + + /// \brief Deletion of all the containers in the storage account (not + /// implemented for safety reasons). + /// + /// \return Status::NotImplemented + Status DeleteRootDirContents() override; + + /// \brief Deletes a file. + /// + /// Supported on both flat namespace and Hierarchical Namespace storage + /// accounts. A check is made to guarantee the parent directory doesn't + /// disappear after the blob is deleted and while this operation is running, + /// no other client can delete the parent directory due to the use of leases. + /// + /// This means applications can safely retry this operation without coordination to + /// guarantee only one client/process is trying to delete the same file. + Status DeleteFile(const std::string& path) override; + + /// \brief Move/rename a file or directory. + /// + /// There are no files immediately at the root directory, so paths like + /// "/segment" always refer to a container of the storage account and are + /// treated as directories. + /// + /// If `dest` exists but the operation fails for some reason, `Move` + /// guarantees `dest` is not lost. + /// + /// Conditions for a successful move: + /// + /// 1. `src` must exist. + /// 2. `dest` can't contain a strict path prefix of `src`. More generally, + /// a directory can't be made a subdirectory of itself. + /// 3. If `dest` already exists and it's a file, `src` must also be a file. + /// `dest` is then replaced by `src`. + /// 4. All components of `dest` must exist, except for the last. + /// 5. If `dest` already exists and it's a directory, `src` must also be a + /// directory and `dest` must be empty. `dest` is then replaced by `src` + /// and its contents. + /// + /// Leases are used to guarantee the pre-condition checks and the rename + /// operation are atomic: other clients can't invalidate the pre-condition in + /// the time between the checks and the actual rename operation. + /// + /// This is possible because Move() is only support on storage accounts with + /// Hierarchical Namespace Support enabled. + /// + /// ## Limitations + /// + /// - Moves are not supported on storage accounts without + /// Hierarchical Namespace support enabled + /// - Moves across different containers are not supported + /// - Moving a path of the form `/container` is not supported as it would + /// require moving all the files in a container to another container. + /// The only exception is a `Move("/container_a", "/container_b")` where + /// both containers are empty or `container_b` doesn't even exist. + /// The atomicity of the emptiness checks followed by the renaming operation + /// is guaranteed by the use of leases. + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + + Result> OpenInputStream(const FileInfo& info) override; + + Result> OpenInputFile( + const std::string& path) override; + + Result> OpenInputFile( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; +}; + +} // namespace arrow::fs diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h new file mode 100644 index 0000000000000000000000000000000000000000..272e42256a38801171cd7158449fba793f407ae8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h @@ -0,0 +1,697 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/filesystem/type_fwd.h" +#include "arrow/io/interfaces.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compare.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow { +namespace fs { + +using arrow::util::Uri; + +// A system clock time point expressed as a 64-bit (or more) number of +// nanoseconds since the epoch. +using TimePoint = + std::chrono::time_point; + +ARROW_EXPORT std::string ToString(FileType); + +ARROW_EXPORT std::ostream& operator<<(std::ostream& os, FileType); + +static const int64_t kNoSize = -1; +static const TimePoint kNoTime = TimePoint(TimePoint::duration(-1)); + +/// \brief FileSystem entry info +struct ARROW_EXPORT FileInfo : public util::EqualityComparable { + FileInfo() = default; + FileInfo(FileInfo&&) = default; + FileInfo& operator=(FileInfo&&) = default; + FileInfo(const FileInfo&) = default; + FileInfo& operator=(const FileInfo&) = default; + + explicit FileInfo(std::string path, FileType type = FileType::Unknown) + : path_(std::move(path)), type_(type) {} + + /// The file type + FileType type() const { return type_; } + void set_type(FileType type) { type_ = type; } + + /// The full file path in the filesystem + const std::string& path() const { return path_; } + void set_path(std::string path) { path_ = std::move(path); } + + /// The file base name (component after the last directory separator) + std::string base_name() const; + + // The directory base name (component before the file base name). + std::string dir_name() const; + + /// The size in bytes, if available + /// + /// Only regular files are guaranteed to have a size. + int64_t size() const { return size_; } + void set_size(int64_t size) { size_ = size; } + + /// The file extension (excluding the dot) + std::string extension() const; + + /// The time of last modification, if available + TimePoint mtime() const { return mtime_; } + void set_mtime(TimePoint mtime) { mtime_ = mtime; } + + bool IsFile() const { return type_ == FileType::File; } + bool IsDirectory() const { return type_ == FileType::Directory; } + + bool Equals(const FileInfo& other) const { + return type() == other.type() && path() == other.path() && size() == other.size() && + mtime() == other.mtime(); + } + + std::string ToString() const; + + /// Function object implementing less-than comparison and hashing by + /// path, to support sorting infos, using them as keys, and other + /// interactions with the STL. + struct ByPath { + bool operator()(const FileInfo& l, const FileInfo& r) const { + return l.path() < r.path(); + } + + size_t operator()(const FileInfo& i) const { + return std::hash{}(i.path()); + } + }; + + protected: + std::string path_; + FileType type_ = FileType::Unknown; + int64_t size_ = kNoSize; + TimePoint mtime_ = kNoTime; +}; + +ARROW_EXPORT std::ostream& operator<<(std::ostream& os, const FileInfo&); + +/// \brief File selector for filesystem APIs +struct ARROW_EXPORT FileSelector { + /// The directory in which to select files. + /// If the path exists but doesn't point to a directory, this should be an error. + std::string base_dir; + /// The behavior if `base_dir` isn't found in the filesystem. If false, + /// an error is returned. If true, an empty selection is returned. + bool allow_not_found; + /// Whether to recurse into subdirectories. + bool recursive; + /// The maximum number of subdirectories to recurse into. + int32_t max_recursion; + + FileSelector() : allow_not_found(false), recursive(false), max_recursion(INT32_MAX) {} +}; + +/// \brief FileSystem, path pair +struct ARROW_EXPORT FileLocator { + std::shared_ptr filesystem; + std::string path; +}; + +using FileInfoVector = std::vector; +using FileInfoGenerator = std::function()>; + +} // namespace fs + +template <> +struct IterationTraits { + static fs::FileInfoVector End() { return {}; } + static bool IsEnd(const fs::FileInfoVector& val) { return val.empty(); } +}; + +namespace fs { + +/// \brief Abstract file system API +class ARROW_EXPORT FileSystem + /// \cond false + : public std::enable_shared_from_this +/// \endcond +{ // NOLINT + public: + virtual ~FileSystem(); + + virtual std::string type_name() const = 0; + + /// EXPERIMENTAL: The IOContext associated with this filesystem. + const io::IOContext& io_context() const { return io_context_; } + + /// Normalize path for the given filesystem + /// + /// The default implementation of this method is a no-op, but subclasses + /// may allow normalizing irregular path forms (such as Windows local paths). + virtual Result NormalizePath(std::string path); + + /// \brief Ensure a URI (or path) is compatible with the given filesystem and return the + /// path + /// + /// \param uri_string A URI representing a resource in the given filesystem. + /// + /// This method will check to ensure the given filesystem is compatible with the + /// URI. This can be useful when the user provides both a URI and a filesystem or + /// when a user provides multiple URIs that should be compatible with the same + /// filesystem. + /// + /// uri_string can be an absolute path instead of a URI. In that case it will ensure + /// the filesystem (if supplied) is the local filesystem (or some custom filesystem that + /// is capable of reading local paths) and will normalize the path's file separators. + /// + /// Note, this method only checks to ensure the URI scheme is valid. It will not detect + /// inconsistencies like a mismatching region or endpoint override. + /// + /// \return The path inside the filesystem that is indicated by the URI. + virtual Result PathFromUri(const std::string& uri_string) const; + + virtual bool Equals(const FileSystem& other) const = 0; + + virtual bool Equals(const std::shared_ptr& other) const { + return Equals(*other); + } + + /// Get info for the given target. + /// + /// Any symlink is automatically dereferenced, recursively. + /// A nonexistent or unreachable file returns an Ok status and + /// has a FileType of value NotFound. An error status indicates + /// a truly exceptional condition (low-level I/O error, etc.). + virtual Result GetFileInfo(const std::string& path) = 0; + /// Same, for many targets at once. + virtual Result GetFileInfo(const std::vector& paths); + /// Same, according to a selector. + /// + /// The selector's base directory will not be part of the results, even if + /// it exists. + /// If it doesn't exist, see `FileSelector::allow_not_found`. + virtual Result GetFileInfo(const FileSelector& select) = 0; + + /// Async version of GetFileInfo + virtual Future GetFileInfoAsync(const std::vector& paths); + + /// Streaming async version of GetFileInfo + /// + /// The returned generator is not async-reentrant, i.e. you need to wait for + /// the returned future to complete before calling the generator again. + virtual FileInfoGenerator GetFileInfoGenerator(const FileSelector& select); + + /// Create a directory and subdirectories. + /// + /// This function succeeds if the directory already exists. + virtual Status CreateDir(const std::string& path, bool recursive) = 0; + Status CreateDir(const std::string& path) { return CreateDir(path, true); } + + /// Delete a directory and its contents, recursively. + virtual Status DeleteDir(const std::string& path) = 0; + + /// Delete a directory's contents, recursively. + /// + /// Like DeleteDir, but doesn't delete the directory itself. + /// Passing an empty path ("" or "/") is disallowed, see DeleteRootDirContents. + virtual Status DeleteDirContents(const std::string& path, bool missing_dir_ok) = 0; + Status DeleteDirContents(const std::string& path) { + return DeleteDirContents(path, false); + } + + /// Async version of DeleteDirContents. + virtual Future<> DeleteDirContentsAsync(const std::string& path, bool missing_dir_ok); + + /// Async version of DeleteDirContents. + /// + /// This overload allows missing directories. + Future<> DeleteDirContentsAsync(const std::string& path); + + /// EXPERIMENTAL: Delete the root directory's contents, recursively. + /// + /// Implementations may decide to raise an error if this operation is + /// too dangerous. + // NOTE: may decide to remove this if it's deemed not useful + virtual Status DeleteRootDirContents() = 0; + + /// Delete a file. + virtual Status DeleteFile(const std::string& path) = 0; + /// Delete many files. + /// + /// The default implementation issues individual delete operations in sequence. + virtual Status DeleteFiles(const std::vector& paths); + + /// Move / rename a file or directory. + /// + /// If the destination exists: + /// - if it is a non-empty directory, an error is returned + /// - otherwise, if it has the same type as the source, it is replaced + /// - otherwise, behavior is unspecified (implementation-dependent). + virtual Status Move(const std::string& src, const std::string& dest) = 0; + + /// Copy a file. + /// + /// If the destination exists and is a directory, an error is returned. + /// Otherwise, it is replaced. + virtual Status CopyFile(const std::string& src, const std::string& dest) = 0; + + /// Open an input stream for sequential reading. + virtual Result> OpenInputStream( + const std::string& path) = 0; + + /// Open an input stream for sequential reading. + /// + /// This override assumes the given FileInfo validly represents the file's + /// characteristics, and may optimize access depending on them (for example + /// avoid querying the file size or its existence). + virtual Result> OpenInputStream(const FileInfo& info); + + /// Open an input file for random access reading. + virtual Result> OpenInputFile( + const std::string& path) = 0; + + /// Open an input file for random access reading. + /// + /// This override assumes the given FileInfo validly represents the file's + /// characteristics, and may optimize access depending on them (for example + /// avoid querying the file size or its existence). + virtual Result> OpenInputFile( + const FileInfo& info); + + /// Async version of OpenInputStream + virtual Future> OpenInputStreamAsync( + const std::string& path); + + /// Async version of OpenInputStream + virtual Future> OpenInputStreamAsync( + const FileInfo& info); + + /// Async version of OpenInputFile + virtual Future> OpenInputFileAsync( + const std::string& path); + + /// Async version of OpenInputFile + virtual Future> OpenInputFileAsync( + const FileInfo& info); + + /// Open an output stream for sequential writing. + /// + /// If the target already exists, existing data is truncated. + virtual Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) = 0; + Result> OpenOutputStream(const std::string& path); + + /// Open an output stream for appending. + /// + /// If the target doesn't exist, a new empty file is created. + /// + /// Note: some filesystem implementations do not support efficient appending + /// to an existing file, in which case this method will return NotImplemented. + /// Consider writing to multiple files (using e.g. the dataset layer) instead. + virtual Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) = 0; + Result> OpenAppendStream(const std::string& path); + + protected: + explicit FileSystem(io::IOContext io_context = io::default_io_context()) + : io_context_(std::move(io_context)) {} + + io::IOContext io_context_; + // Whether metadata operations (such as GetFileInfo or OpenInputStream) + // are cheap enough that the default async variants don't bother with + // a thread pool. + bool default_async_is_sync_ = true; +}; + +using FileSystemFactory = std::function>( + const Uri& uri, const io::IOContext& io_context, std::string* out_path)>; + +/// \brief A FileSystem implementation that delegates to another +/// implementation after prepending a fixed base path. +/// +/// This is useful to expose a logical view of a subtree of a filesystem, +/// for example a directory in a LocalFileSystem. +/// This works on abstract paths, i.e. paths using forward slashes and +/// and a single root "/". Windows paths are not guaranteed to work. +/// This makes no security guarantee. For example, symlinks may allow to +/// "escape" the subtree and access other parts of the underlying filesystem. +class ARROW_EXPORT SubTreeFileSystem : public FileSystem { + public: + // This constructor may abort if base_path is invalid. + explicit SubTreeFileSystem(const std::string& base_path, + std::shared_ptr base_fs); + ~SubTreeFileSystem() override; + + std::string type_name() const override { return "subtree"; } + std::string base_path() const { return base_path_; } + std::shared_ptr base_fs() const { return base_fs_; } + + Result NormalizePath(std::string path) override; + Result PathFromUri(const std::string& uri_string) const override; + + bool Equals(const FileSystem& other) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + + Future> OpenInputStreamAsync( + const std::string& path) override; + Future> OpenInputStreamAsync( + const FileInfo& info) override; + Future> OpenInputFileAsync( + const std::string& path) override; + Future> OpenInputFileAsync( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + protected: + SubTreeFileSystem() = default; + + const std::string base_path_; + std::shared_ptr base_fs_; + + Result PrependBase(const std::string& s) const; + Result PrependBaseNonEmpty(const std::string& s) const; + Result StripBase(const std::string& s) const; + Status FixInfo(FileInfo* info) const; + + static Result NormalizeBasePath( + std::string base_path, const std::shared_ptr& base_fs); +}; + +/// \brief A FileSystem implementation that delegates to another +/// implementation but inserts latencies at various points. +class ARROW_EXPORT SlowFileSystem : public FileSystem { + public: + SlowFileSystem(std::shared_ptr base_fs, + std::shared_ptr latencies); + SlowFileSystem(std::shared_ptr base_fs, double average_latency); + SlowFileSystem(std::shared_ptr base_fs, double average_latency, + int32_t seed); + + std::string type_name() const override { return "slow"; } + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + protected: + std::shared_ptr base_fs_; + std::shared_ptr latencies_; +}; + +/// \brief Ensure all registered filesystem implementations are finalized. +/// +/// Individual finalizers may wait for concurrent calls to finish so as to avoid +/// race conditions. After this function has been called, all filesystem APIs +/// will fail with an error. +/// +/// The user is responsible for synchronization of calls to this function. +void EnsureFinalized(); + +/// \defgroup filesystem-factories Functions for creating FileSystem instances +/// +/// @{ + +/// \brief Create a new FileSystem by URI +/// +/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3", +/// "gs" and "gcs". +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// \param[in] uri a URI-based path, ex: file:///some/local/path +/// \param[out] out_path (optional) Path inside the filesystem. +/// \return out_fs FileSystem instance. +ARROW_EXPORT +Result> FileSystemFromUri(const std::string& uri, + std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI with a custom IO context +/// +/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3", +/// "gs" and "gcs". +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// \param[in] uri a URI-based path, ex: file:///some/local/path +/// \param[in] io_context an IOContext which will be associated with the filesystem +/// \param[out] out_path (optional) Path inside the filesystem. +/// \return out_fs FileSystem instance. +ARROW_EXPORT +Result> FileSystemFromUri(const std::string& uri, + const io::IOContext& io_context, + std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// Same as FileSystemFromUri, but in addition also recognize non-URIs +/// and treat them as local filesystem paths. Only absolute local filesystem +/// paths are allowed. +ARROW_EXPORT +Result> FileSystemFromUriOrPath( + const std::string& uri, std::string* out_path = NULLPTR); + +/// \brief Create a new FileSystem by URI with a custom IO context +/// +/// Support for other schemes can be added using RegisterFileSystemFactory. +/// +/// Same as FileSystemFromUri, but in addition also recognize non-URIs +/// and treat them as local filesystem paths. Only absolute local filesystem +/// paths are allowed. +ARROW_EXPORT +Result> FileSystemFromUriOrPath( + const std::string& uri, const io::IOContext& io_context, + std::string* out_path = NULLPTR); + +/// @} + +/// \defgroup filesystem-factory-registration Helpers for FileSystem registration +/// +/// @{ + +/// \brief Register a FileSystem factory +/// +/// Support for custom URI schemes can be added by registering a factory +/// for the corresponding FileSystem. +/// +/// \param[in] scheme a Uri scheme which the factory will handle. +/// If a factory has already been registered for a scheme, +/// the new factory will be ignored. +/// \param[in] factory a function which can produce a FileSystem for Uris which match +/// scheme. +/// \param[in] finalizer a function which must be called to finalize the factory before +/// the process exits, or nullptr if no finalization is necessary. +/// \return raises KeyError if a name collision occurs. +ARROW_EXPORT Status RegisterFileSystemFactory(std::string scheme, + FileSystemFactory factory, + std::function finalizer = {}); + +/// \brief Register FileSystem factories from a shared library +/// +/// FileSystem implementations may be housed in separate shared libraries and only +/// registered when the shared library is explicitly loaded. FileSystemRegistrar is +/// provided to simplify definition of such libraries: each instance at namespace scope +/// in the library will register a factory for a scheme. Any library which uses +/// FileSystemRegistrars and which must be dynamically loaded should be loaded using +/// LoadFileSystemFactories(), which will additionally merge registries are if necessary +/// (static linkage to arrow can produce isolated registries). +ARROW_EXPORT Status LoadFileSystemFactories(const char* libpath); + +struct ARROW_EXPORT FileSystemRegistrar { + /// \brief Register a FileSystem factory at load time + /// + /// Support for custom URI schemes can be added by registering a factory for the + /// corresponding FileSystem. An instance of this helper can be defined at namespace + /// scope to cause the factory to be registered at load time. + /// + /// Global constructors will finish execution before main() starts if the registrar is + /// linked into the same binary as main(), or before dlopen()/LoadLibrary() returns if + /// the library in which the registrar is defined is dynamically loaded. + /// + /// \code + /// FileSystemRegistrar kSlowFileSystemModule{ + /// "slowfile", + /// [](const Uri& uri, const io::IOContext& io_context, std::string* out_path) + /// ->Result> { + /// auto local_uri = "file" + uri.ToString().substr(uri.scheme().size()); + /// ARROW_ASSIGN_OR_RAISE(auto base_fs, + /// FileSystemFromUri(local_uri, io_context, out_path)); + /// double average_latency = 1; + /// int32_t seed = 0xDEADBEEF; + /// ARROW_ASSIGN_OR_RAISE(auto params, uri.query_item()); + /// for (const auto& [key, value] : params) { + /// if (key == "average_latency") { + /// average_latency = std::stod(value); + /// } + /// if (key == "seed") { + /// seed = std::stoi(value, nullptr, /*base=*/16); + /// } + /// } + /// return std::make_shared(base_fs, average_latency, seed); + /// })); + /// \endcode + /// + /// \param[in] scheme a Uri scheme which the factory will handle. + /// If a factory has already been registered for a scheme, the + /// new factory will be ignored. + /// \param[in] factory a function which can produce a FileSystem for Uris which match + /// scheme. + /// \param[in] finalizer a function which must be called to finalize the factory before + /// the process exits, or nullptr if no finalization is necessary. + FileSystemRegistrar(std::string scheme, FileSystemFactory factory, + std::function finalizer = {}); +}; + +/// @} + +namespace internal { +ARROW_EXPORT void* GetFileSystemRegistry(); +} // namespace internal + +/// \brief Copy files, including from one FileSystem to another +/// +/// If a source and destination are resident in the same FileSystem FileSystem::CopyFile +/// will be used, otherwise the file will be opened as a stream in both FileSystems and +/// chunks copied from the source to the destination. No directories will be created. +ARROW_EXPORT +Status CopyFiles(const std::vector& sources, + const std::vector& destinations, + const io::IOContext& io_context = io::default_io_context(), + int64_t chunk_size = 1024 * 1024, bool use_threads = true); + +/// \brief Copy selected files, including from one FileSystem to another +/// +/// Directories will be created under the destination base directory as needed. +ARROW_EXPORT +Status CopyFiles(const std::shared_ptr& source_fs, + const FileSelector& source_sel, + const std::shared_ptr& destination_fs, + const std::string& destination_base_dir, + const io::IOContext& io_context = io::default_io_context(), + int64_t chunk_size = 1024 * 1024, bool use_threads = true); + +struct FileSystemGlobalOptions { + /// Path to a single PEM file holding all TLS CA certificates + /// + /// If empty, the underlying TLS library's defaults will be used. + std::string tls_ca_file_path; + + /// Path to a directory holding TLS CA certificates in individual PEM files + /// named along the OpenSSL "hashed" format. + /// + /// If empty, the underlying TLS library's defaults will be used. + std::string tls_ca_dir_path; +}; + +/// EXPERIMENTAL: optional global initialization routine +/// +/// This is for environments (such as manylinux) where the path +/// to TLS CA certificates needs to be configured at runtime. +ARROW_EXPORT +Status Initialize(const FileSystemGlobalOptions& options); + +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h new file mode 100644 index 0000000000000000000000000000000000000000..d610c72237a5a6afdfa20a905bf7d2d1203b0b0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/filesystem/filesystem.h" + +namespace arrow::fs { +extern "C" { + +// ARROW_FORCE_EXPORT ensures this function's visibility is +// _declspec(dllexport)/[[gnu::visibility("default")]] even when +// this header is #included by a non-arrow source, as in a third +// party filesystem implementation. +ARROW_FORCE_EXPORT void* arrow_filesystem_get_registry() { + // In the case where libarrow is linked statically both to the executable and to a + // dynamically loaded filesystem implementation library, the library contains a + // duplicate definition of the registry into which the library's instances of + // FileSystemRegistrar insert their factories. This function is made accessible to + // dlsym/GetProcAddress to enable detection of such duplicate registries and merging + // into the registry accessible to the executable. + return internal::GetFileSystemRegistry(); +} +} +} // namespace arrow::fs diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h new file mode 100644 index 0000000000000000000000000000000000000000..f1fbc95bf957c850b9738561c07d09d258b367ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h @@ -0,0 +1,246 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/uri.h" + +namespace arrow { +namespace fs { +namespace internal { + +// Opaque wrapper for GCS's library credentials to avoid exposing in Arrow headers. +struct GcsCredentialsHolder; + +} // namespace internal + +class GcsFileSystem; + +/// \brief Container for GCS Credentials and information necessary to recreate them. +class ARROW_EXPORT GcsCredentials { + public: + bool Equals(const GcsCredentials& other) const; + bool anonymous() const { return anonymous_; } + const std::string& access_token() const { return access_token_; } + TimePoint expiration() const { return expiration_; } + const std::string& target_service_account() const { return target_service_account_; } + const std::string& json_credentials() const { return json_credentials_; } + const std::shared_ptr& holder() const { + return holder_; + } + + private: + GcsCredentials() = default; + bool anonymous_ = false; + std::string access_token_; + TimePoint expiration_; + std::string target_service_account_; + std::string json_credentials_; + std::shared_ptr holder_; + friend class GcsFileSystem; + friend struct GcsOptions; +}; + +/// Options for the GcsFileSystem implementation. +struct ARROW_EXPORT GcsOptions { + /// \brief Equivalent to GcsOptions::Defaults(). + GcsOptions(); + GcsCredentials credentials; + + std::string endpoint_override; + std::string scheme; + /// \brief Location to use for creating buckets. + std::string default_bucket_location; + + /// \brief If set used to control total time allowed for retrying underlying + /// errors. + /// + /// The default policy is to retry for up to 15 minutes. + std::optional retry_limit_seconds; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + /// \brief The project to use for creating buckets. + /// + /// If not set, the library uses the GOOGLE_CLOUD_PROJECT environment + /// variable. Most I/O operations do not need a project id, only applications + /// that create new buckets need a project id. + std::optional project_id; + + bool Equals(const GcsOptions& other) const; + + /// \brief Initialize with Google Default Credentials + /// + /// Create options configured to use [Application Default Credentials][aip/4110]. The + /// details of this mechanism are too involved to describe here, but suffice is to say + /// that applications can override any defaults using an environment variable + /// (`GOOGLE_APPLICATION_CREDENTIALS`), and that the defaults work with most Google + /// Cloud Platform deployment environments (GCE, GKE, Cloud Run, etc.), and that have + /// the same behavior as the `gcloud` CLI tool on your workstation. + /// + /// \see https://cloud.google.com/docs/authentication + /// + /// [aip/4110]: https://google.aip.dev/auth/4110 + static GcsOptions Defaults(); + + /// \brief Initialize with anonymous credentials + static GcsOptions Anonymous(); + + /// \brief Initialize with access token + /// + /// These credentials are useful when using an out-of-band mechanism to fetch access + /// tokens. Note that access tokens are time limited, you will need to manually refresh + /// the tokens created by the out-of-band mechanism. + static GcsOptions FromAccessToken(const std::string& access_token, + TimePoint expiration); + + /// \brief Initialize with service account impersonation + /// + /// Service account impersonation allows one principal (a user or service account) to + /// impersonate a service account. It requires that the calling principal has the + /// necessary permissions *on* the service account. + static GcsOptions FromImpersonatedServiceAccount( + const GcsCredentials& base_credentials, const std::string& target_service_account); + + /// Creates service account credentials from a JSON object in string form. + /// + /// The @p json_object is expected to be in the format described by [aip/4112]. Such an + /// object contains the identity of a service account, as well as a private key that can + /// be used to sign tokens, showing the caller was holding the private key. + /// + /// In GCP one can create several "keys" for each service account, and these keys are + /// downloaded as a JSON "key file". The contents of such a file are in the format + /// required by this function. Remember that key files and their contents should be + /// treated as any other secret with security implications, think of them as passwords + /// (because they are!), don't store them or output them where unauthorized persons may + /// read them. + /// + /// Most applications should probably use default credentials, maybe pointing them to a + /// file with these contents. Using this function may be useful when the json object is + /// obtained from a Cloud Secret Manager or a similar service. + /// + /// [aip/4112]: https://google.aip.dev/auth/4112 + static GcsOptions FromServiceAccountCredentials(const std::string& json_object); + + /// Initialize from URIs such as "gs://bucket/object". + static Result FromUri(const arrow::util::Uri& uri, std::string* out_path); + static Result FromUri(const std::string& uri, std::string* out_path); +}; + +/// \brief GCS-backed FileSystem implementation. +/// +/// GCS (Google Cloud Storage - https://cloud.google.com/storage) is a scalable object +/// storage system for any amount of data. The main abstractions in GCS are buckets and +/// objects. A bucket is a namespace for objects, buckets can store any number of objects, +/// tens of millions and even billions is not uncommon. Each object contains a single +/// blob of data, up to 5TiB in size. Buckets are typically configured to keep a single +/// version of each object, but versioning can be enabled. Versioning is important because +/// objects are immutable, once created one cannot append data to the object or modify the +/// object data in any way. +/// +/// GCS buckets are in a global namespace, if a Google Cloud customer creates a bucket +/// named `foo` no other customer can create a bucket with the same name. Note that a +/// principal (a user or service account) may only list the buckets they are entitled to, +/// and then only within a project. It is not possible to list "all" the buckets. +/// +/// Within each bucket objects are in flat namespace. GCS does not have folders or +/// directories. However, following some conventions it is possible to emulate +/// directories. To this end, this class: +/// +/// - All buckets are treated as directories at the "root" +/// - Creating a root directory results in a new bucket being created, this may be slower +/// than most GCS operations. +/// - The class creates marker objects for a directory, using a metadata attribute to +/// annotate the file. +/// - GCS can list all the objects with a given prefix, this is used to emulate listing +/// of directories. +/// - In object lists GCS can summarize all the objects with a common prefix as a single +/// entry, this is used to emulate non-recursive lists. Note that GCS list time is +/// proportional to the number of objects in the prefix. Listing recursively takes +/// almost the same time as non-recursive lists. +/// +class ARROW_EXPORT GcsFileSystem : public FileSystem { + public: + ~GcsFileSystem() override = default; + + std::string type_name() const override; + const GcsOptions& options() const; + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + Result GetFileInfo(const std::string& path) override; + Result GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + + Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override; + + /// This is not implemented in GcsFileSystem, as it would be too dangerous. + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputStream(const FileInfo& info) override; + + Result> OpenInputFile( + const std::string& path) override; + Result> OpenInputFile( + const FileInfo& info) override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + ARROW_DEPRECATED( + "Deprecated. " + "OpenAppendStream is unsupported on the GCS FileSystem.") + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + /// Create a GcsFileSystem instance from the given options. + // TODO(ARROW-16884): make this return Result for consistency + static std::shared_ptr Make( + const GcsOptions& options, const io::IOContext& = io::default_io_context()); + + private: + explicit GcsFileSystem(const GcsOptions& options, const io::IOContext& io_context); + + class Impl; + std::shared_ptr impl_; +}; + +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h new file mode 100644 index 0000000000000000000000000000000000000000..25604a39e3aceb26b2e7da5dc72e97a0cbd635d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/io/hdfs.h" +#include "arrow/util/uri.h" + +namespace arrow::fs { + +/// Options for the HDFS implementation. +struct ARROW_EXPORT HdfsOptions { + HdfsOptions() = default; + ~HdfsOptions() = default; + + /// Hdfs configuration options, contains host, port, driver + io::HdfsConnectionConfig connection_config; + + /// Used by Hdfs OpenWritable Interface. + int32_t buffer_size = 0; + int16_t replication = 3; + int64_t default_block_size = 0; + + void ConfigureEndPoint(std::string host, int port); + void ConfigureReplication(int16_t replication); + void ConfigureUser(std::string user_name); + void ConfigureBufferSize(int32_t buffer_size); + void ConfigureBlockSize(int64_t default_block_size); + void ConfigureKerberosTicketCachePath(std::string path); + void ConfigureExtraConf(std::string key, std::string val); + + bool Equals(const HdfsOptions& other) const; + + static Result FromUri(const ::arrow::util::Uri& uri); + static Result FromUri(const std::string& uri); +}; + +/// HDFS-backed FileSystem implementation. +/// +/// implementation notes: +/// - This is a wrapper of arrow/io/hdfs, so we can use FileSystem API to handle hdfs. +class ARROW_EXPORT HadoopFileSystem : public FileSystem { + public: + ~HadoopFileSystem() override; + + std::string type_name() const override { return "hdfs"; } + HdfsOptions options() const; + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + /// Create a HdfsFileSystem instance from the given options. + static Result> Make( + const HdfsOptions& options, const io::IOContext& = io::default_io_context()); + + protected: + HadoopFileSystem(const HdfsOptions& options, const io::IOContext&); + + class Impl; + std::unique_ptr impl_; +}; + +} // namespace arrow::fs diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h new file mode 100644 index 0000000000000000000000000000000000000000..45a3da317f6637f32fd4b5aba805176632ef5755 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" + +namespace arrow { +namespace internal { + +class Uri; + +} + +namespace fs { + +/// Options for the LocalFileSystem implementation. +struct ARROW_EXPORT LocalFileSystemOptions { + static constexpr int32_t kDefaultDirectoryReadahead = 16; + static constexpr int32_t kDefaultFileInfoBatchSize = 1000; + + /// Whether OpenInputStream and OpenInputFile return a mmap'ed file, + /// or a regular one. + bool use_mmap = false; + + /// Options related to `GetFileInfoGenerator` interface. + + /// EXPERIMENTAL: The maximum number of directories processed in parallel + /// by `GetFileInfoGenerator`. + int32_t directory_readahead = kDefaultDirectoryReadahead; + + /// EXPERIMENTAL: The maximum number of entries aggregated into each + /// FileInfoVector chunk by `GetFileInfoGenerator`. + /// + /// Since each FileInfo entry needs a separate `stat` system call, a + /// directory with a very large number of files may take a lot of time to + /// process entirely. By generating a FileInfoVector after this chunk + /// size is reached, we ensure FileInfo entries can start being consumed + /// from the FileInfoGenerator with less initial latency. + int32_t file_info_batch_size = kDefaultFileInfoBatchSize; + + /// \brief Initialize with defaults + static LocalFileSystemOptions Defaults(); + + bool Equals(const LocalFileSystemOptions& other) const; + + static Result FromUri(const ::arrow::util::Uri& uri, + std::string* out_path); +}; + +/// \brief A FileSystem implementation accessing files on the local machine. +/// +/// This class handles only `/`-separated paths. If desired, conversion +/// from Windows backslash-separated paths should be done by the caller. +/// Details such as symlinks are abstracted away (symlinks are always +/// followed, except when deleting an entry). +class ARROW_EXPORT LocalFileSystem : public FileSystem { + public: + explicit LocalFileSystem(const io::IOContext& = io::default_io_context()); + explicit LocalFileSystem(const LocalFileSystemOptions&, + const io::IOContext& = io::default_io_context()); + ~LocalFileSystem() override; + + std::string type_name() const override { return "local"; } + + Result NormalizePath(std::string path) override; + Result PathFromUri(const std::string& uri_string) const override; + + bool Equals(const FileSystem& other) const override; + + LocalFileSystemOptions options() const { return options_; } + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + protected: + LocalFileSystemOptions options_; +}; + +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h new file mode 100644 index 0000000000000000000000000000000000000000..5626560e08363f20c5479a1b5f540d6aed1a2d04 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow::fs::internal { + +struct MockDirInfo { + std::string full_path; + TimePoint mtime; + + bool operator==(const MockDirInfo& other) const { + return mtime == other.mtime && full_path == other.full_path; + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockDirInfo&); +}; + +struct MockFileInfo { + std::string full_path; + TimePoint mtime; + std::string_view data; + + bool operator==(const MockFileInfo& other) const { + return mtime == other.mtime && full_path == other.full_path && data == other.data; + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockFileInfo&); +}; + +/// A mock FileSystem implementation that holds its contents in memory. +/// +/// Useful for validating the FileSystem API, writing conformance suite, +/// and bootstrapping FileSystem-based APIs. +class ARROW_EXPORT MockFileSystem : public FileSystem { + public: + explicit MockFileSystem(TimePoint current_time, + const io::IOContext& = io::default_io_context()); + ~MockFileSystem() override; + + std::string type_name() const override { return "mock"; } + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + // Contents-dumping helpers to ease testing. + // Output is lexicographically-ordered by full path. + std::vector AllDirs(); + std::vector AllFiles(); + + // Create a File with a content from a string. + Status CreateFile(const std::string& path, std::string_view content, + bool recursive = true); + + // Create a MockFileSystem out of (empty) FileInfo. The content of every + // file is empty and of size 0. All directories will be created recursively. + static Result> Make(TimePoint current_time, + const std::vector& infos); + + class Impl; + + protected: + std::unique_ptr impl_; +}; + +class ARROW_EXPORT MockAsyncFileSystem : public MockFileSystem { + public: + explicit MockAsyncFileSystem(TimePoint current_time, + const io::IOContext& io_context = io::default_io_context()) + : MockFileSystem(current_time, io_context) { + default_async_is_sync_ = false; + } + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; +}; + +} // namespace arrow::fs::internal diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h new file mode 100644 index 0000000000000000000000000000000000000000..d49d9d2efa7f6aa92e568f8305c15dc06c86c806 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/type_fwd.h" + +namespace arrow { +namespace fs { +namespace internal { + +constexpr char kSep = '/'; + +// Computations on abstract paths (not local paths with system-dependent behaviour). +// Abstract paths are typically used in URIs. + +// Split an abstract path into its individual components. +ARROW_EXPORT +std::vector SplitAbstractPath(const std::string& path, char sep = kSep); + +// Slice the individual components of an abstract path and combine them +// +// If offset or length are negative then an empty string is returned +// If offset is >= the number of components then an empty string is returned +// If offset + length is >= the number of components then length is truncated +ARROW_EXPORT +std::string SliceAbstractPath(const std::string& path, int offset, int length, + char sep = kSep); + +// Return the extension of the file +ARROW_EXPORT std::string GetAbstractPathExtension(const std::string& s); + +// Return the depth (number of components) of an abstract path +// +// Trailing slashes do not count towards depth +// Leading slashes do not count towards depth +// +// The root path ("/") has depth 0 +ARROW_EXPORT int GetAbstractPathDepth(std::string_view path); + +// Return the parent directory and basename of an abstract path. Both values may be +// empty. +ARROW_EXPORT +std::pair GetAbstractPathParent(const std::string& s); + +// Validate an abstract path. +ARROW_EXPORT +Status ValidateAbstractPath(std::string_view path); + +// Validate the components of an abstract path. +ARROW_EXPORT +Status ValidateAbstractPathParts(const std::vector& parts); + +// Append a non-empty stem to an abstract path. +ARROW_EXPORT +std::string ConcatAbstractPath(std::string_view base, std::string_view stem); + +// Make path relative to base, if it starts with base. Otherwise error out. +ARROW_EXPORT +Result MakeAbstractPathRelative(const std::string& base, + const std::string& path); + +ARROW_EXPORT +std::string EnsureLeadingSlash(std::string_view s); + +ARROW_EXPORT +std::string_view RemoveLeadingSlash(std::string_view s); + +ARROW_EXPORT +std::string EnsureTrailingSlash(std::string_view s); + +/// \brief remove the forward slash (if any) from the given path +/// \param s the input path +/// \param preserve_root if true, allow a path of just "/" to remain unchanged +ARROW_EXPORT +std::string_view RemoveTrailingSlash(std::string_view s, bool preserve_root = false); + +ARROW_EXPORT +Status AssertNoTrailingSlash(std::string_view s); + +inline bool HasTrailingSlash(std::string_view s) { + return !s.empty() && s.back() == kSep; +} + +inline bool HasLeadingSlash(std::string_view s) { + return !s.empty() && s.front() == kSep; +} + +ARROW_EXPORT +bool IsAncestorOf(std::string_view ancestor, std::string_view descendant); + +ARROW_EXPORT +std::optional RemoveAncestor(std::string_view ancestor, + std::string_view descendant); + +/// Return a vector of ancestors between a base path and a descendant. +/// For example, +/// +/// AncestorsFromBasePath("a/b", "a/b/c/d/e") -> ["a/b/c", "a/b/c/d"] +ARROW_EXPORT +std::vector AncestorsFromBasePath(std::string_view base_path, + std::string_view descendant); + +/// Given a vector of paths of directories which must be created, produce a the minimal +/// subset for passing to CreateDir(recursive=true) by removing redundant parent +/// directories +ARROW_EXPORT +std::vector MinimalCreateDirSet(std::vector dirs); + +// Join the components of an abstract path. +template +std::string JoinAbstractPath(StringIt it, StringIt end, char sep = kSep) { + std::string path; + for (; it != end; ++it) { + if (it->empty()) continue; + + if (!path.empty()) { + path += sep; + } + path += *it; + } + return path; +} + +template +std::string JoinAbstractPath(const StringRange& range, char sep = kSep) { + return JoinAbstractPath(range.begin(), range.end(), sep); +} + +/// Convert slashes to backslashes, on all platforms. Mostly useful for testing. +ARROW_EXPORT +std::string ToBackslashes(std::string_view s); + +/// Ensure a local path is abstract, by converting backslashes to regular slashes +/// on Windows. Return the path unchanged on other systems. +ARROW_EXPORT +std::string ToSlashes(std::string_view s); + +ARROW_EXPORT +bool IsEmptyPath(std::string_view s); + +ARROW_EXPORT +bool IsLikelyUri(std::string_view s); + +class ARROW_EXPORT Globber { + public: + ~Globber(); + explicit Globber(std::string pattern); + bool Matches(const std::string& path); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e270a6e1c469abdc8905b6f00da6510bbb585258 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include + +#include "arrow/filesystem/s3fs.h" +#include "arrow/status.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/util.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace fs { + +// A minio test server, managed as a child process + +class MinioTestServer { + public: + MinioTestServer(); + ~MinioTestServer(); + + Status Start(); + + Status Stop(); + + std::string connect_string() const; + + std::string access_key() const; + + std::string secret_key() const; + + private: + struct Impl; + std::unique_ptr impl_; +}; + +// A Minio "environment" that spawns Minio processes in advances, such as +// to hide process launch latencies during testing. + +class MinioTestEnvironment : public ::testing::Environment { + public: + MinioTestEnvironment(); + ~MinioTestEnvironment(); + + void SetUp() override; + + Result> GetOneServer(); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +// A global test "environment", to ensure that the S3 API is initialized before +// running unit tests. + +class S3Environment : public ::testing::Environment { + public: + // We set this environment variable to speed up tests by ensuring + // DefaultAWSCredentialsProviderChain does not query (inaccessible) + // EC2 metadata endpoint. + // This must be done before spawning any Minio child process to avoid any race + // condition accessing environment variables. + S3Environment() : ec2_metadata_disabled_guard_("AWS_EC2_METADATA_DISABLED", "true") {} + + void SetUp() override { + // Change this to increase logging during tests + S3GlobalOptions options; + options.log_level = S3LogLevel::Fatal; + ASSERT_OK(InitializeS3(options)); + } + + void TearDown() override { ASSERT_OK(FinalizeS3()); } + + private: + EnvVarGuard ec2_metadata_disabled_guard_; +}; + +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h new file mode 100644 index 0000000000000000000000000000000000000000..82d08bc5ea89a2f84888d1a9017bff6e6f2f57bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h @@ -0,0 +1,401 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/util/macros.h" +#include "arrow/util/uri.h" + +namespace Aws { +namespace Auth { + +class AWSCredentialsProvider; +class STSAssumeRoleCredentialsProvider; + +} // namespace Auth +namespace STS { +class STSClient; +} +} // namespace Aws + +namespace arrow { +namespace fs { + +/// Options for using a proxy for S3 +struct ARROW_EXPORT S3ProxyOptions { + std::string scheme; + std::string host; + int port = -1; + std::string username; + std::string password; + + /// Initialize from URI such as http://username:password@host:port + /// or http://host:port + static Result FromUri(const std::string& uri); + static Result FromUri(const ::arrow::util::Uri& uri); + + bool Equals(const S3ProxyOptions& other) const; +}; + +enum class S3CredentialsKind : int8_t { + /// Anonymous access (no credentials used) + Anonymous, + /// Use default AWS credentials, configured through environment variables + Default, + /// Use explicitly-provided access key pair + Explicit, + /// Assume role through a role ARN + Role, + /// Use web identity token to assume role, configured through environment variables + WebIdentity +}; + +/// Pure virtual class for describing custom S3 retry strategies +class ARROW_EXPORT S3RetryStrategy { + public: + virtual ~S3RetryStrategy() = default; + + /// Simple struct where each field corresponds to a field in Aws::Client::AWSError + struct AWSErrorDetail { + /// Corresponds to AWSError::GetErrorType() + int error_type; + /// Corresponds to AWSError::GetMessage() + std::string message; + /// Corresponds to AWSError::GetExceptionName() + std::string exception_name; + /// Corresponds to AWSError::ShouldRetry() + bool should_retry; + }; + /// Returns true if the S3 request resulting in the provided error should be retried. + virtual bool ShouldRetry(const AWSErrorDetail& error, int64_t attempted_retries) = 0; + /// Returns the time in milliseconds the S3 client should sleep for until retrying. + virtual int64_t CalculateDelayBeforeNextRetry(const AWSErrorDetail& error, + int64_t attempted_retries) = 0; + /// Returns a stock AWS Default retry strategy. + static std::shared_ptr GetAwsDefaultRetryStrategy( + int64_t max_attempts); + /// Returns a stock AWS Standard retry strategy. + static std::shared_ptr GetAwsStandardRetryStrategy( + int64_t max_attempts); +}; + +/// Options for the S3FileSystem implementation. +struct ARROW_EXPORT S3Options { + /// \brief AWS region to connect to. + /// + /// If unset, the AWS SDK will choose a default value. The exact algorithm + /// depends on the SDK version. Before 1.8, the default is hardcoded + /// to "us-east-1". Since 1.8, several heuristics are used to determine + /// the region (environment variables, configuration profile, EC2 metadata + /// server). + std::string region; + + /// \brief Socket connection timeout, in seconds + /// + /// If negative, the AWS SDK default value is used (typically 1 second). + double connect_timeout = -1; + + /// \brief Socket read timeout on Windows and macOS, in seconds + /// + /// If negative, the AWS SDK default value is used (typically 3 seconds). + /// This option is ignored on non-Windows, non-macOS systems. + double request_timeout = -1; + + /// If non-empty, override region with a connect string such as "localhost:9000" + // XXX perhaps instead take a URL like "http://localhost:9000"? + std::string endpoint_override; + /// S3 connection transport, default "https" + std::string scheme = "https"; + + /// ARN of role to assume + std::string role_arn; + /// Optional identifier for an assumed role session. + std::string session_name; + /// Optional external identifier to pass to STS when assuming a role + std::string external_id; + /// Frequency (in seconds) to refresh temporary credentials from assumed role + int load_frequency = 900; + + /// If connection is through a proxy, set options here + S3ProxyOptions proxy_options; + + /// AWS credentials provider + std::shared_ptr credentials_provider; + + /// Type of credentials being used. Set along with credentials_provider. + S3CredentialsKind credentials_kind = S3CredentialsKind::Default; + + /// Whether to use virtual addressing of buckets + /// + /// If true, then virtual addressing is always enabled. + /// If false, then virtual addressing is only enabled if `endpoint_override` is empty. + /// + /// This can be used for non-AWS backends that only support virtual hosted-style access. + bool force_virtual_addressing = false; + + /// Whether OutputStream writes will be issued in the background, without blocking. + bool background_writes = true; + + /// Whether to allow creation of buckets + /// + /// When S3FileSystem creates new buckets, it does not pass any non-default settings. + /// In AWS S3, the bucket and all objects will be not publicly visible, and there + /// will be no bucket policies and no resource tags. To have more control over how + /// buckets are created, use a different API to create them. + bool allow_bucket_creation = false; + + /// Whether to allow deletion of buckets + bool allow_bucket_deletion = false; + + /// \brief Default metadata for OpenOutputStream. + /// + /// This will be ignored if non-empty metadata is passed to OpenOutputStream. + std::shared_ptr default_metadata; + + /// Optional retry strategy to determine which error types should be retried, and the + /// delay between retries. + std::shared_ptr retry_strategy; + + S3Options(); + + /// Configure with the default AWS credentials provider chain. + void ConfigureDefaultCredentials(); + + /// Configure with anonymous credentials. This will only let you access public buckets. + void ConfigureAnonymousCredentials(); + + /// Configure with explicit access and secret key. + void ConfigureAccessKey(const std::string& access_key, const std::string& secret_key, + const std::string& session_token = ""); + + /// Configure with credentials from an assumed role. + void ConfigureAssumeRoleCredentials( + const std::string& role_arn, const std::string& session_name = "", + const std::string& external_id = "", int load_frequency = 900, + const std::shared_ptr& stsClient = NULLPTR); + + /// Configure with credentials from role assumed using a web identity token + void ConfigureAssumeRoleWithWebIdentityCredentials(); + + std::string GetAccessKey() const; + std::string GetSecretKey() const; + std::string GetSessionToken() const; + + bool Equals(const S3Options& other) const; + + /// \brief Initialize with default credentials provider chain + /// + /// This is recommended if you use the standard AWS environment variables + /// and/or configuration file. + static S3Options Defaults(); + + /// \brief Initialize with anonymous credentials. + /// + /// This will only let you access public buckets. + static S3Options Anonymous(); + + /// \brief Initialize with explicit access and secret key. + /// + /// Optionally, a session token may also be provided for temporary credentials + /// (from STS). + static S3Options FromAccessKey(const std::string& access_key, + const std::string& secret_key, + const std::string& session_token = ""); + + /// \brief Initialize from an assumed role. + static S3Options FromAssumeRole( + const std::string& role_arn, const std::string& session_name = "", + const std::string& external_id = "", int load_frequency = 900, + const std::shared_ptr& stsClient = NULLPTR); + + /// \brief Initialize from an assumed role with web-identity. + /// Uses the AWS SDK which uses environment variables to + /// generate temporary credentials. + static S3Options FromAssumeRoleWithWebIdentity(); + + static Result FromUri(const ::arrow::util::Uri& uri, + std::string* out_path = NULLPTR); + static Result FromUri(const std::string& uri, + std::string* out_path = NULLPTR); +}; + +/// S3-backed FileSystem implementation. +/// +/// Some implementation notes: +/// - buckets are special and the operations available on them may be limited +/// or more expensive than desired. +class ARROW_EXPORT S3FileSystem : public FileSystem { + public: + ~S3FileSystem() override; + + std::string type_name() const override { return "s3"; } + + /// Return the original S3 options when constructing the filesystem + S3Options options() const; + /// Return the actual region this filesystem connects to + std::string region() const; + + bool Equals(const FileSystem& other) const override; + Result PathFromUri(const std::string& uri_string) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::DeleteDirContentsAsync; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo(const FileSelector& select) override; + + FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Future<> DeleteDirContentsAsync(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + /// Create a sequential input stream for reading from a S3 object. + /// + /// NOTE: Reads from the stream will be synchronous and unbuffered. + /// You way want to wrap the stream in a BufferedInputStream or use + /// a custom readahead strategy to avoid idle waits. + Result> OpenInputStream( + const std::string& path) override; + /// Create a sequential input stream for reading from a S3 object. + /// + /// This override avoids a HEAD request by assuming the FileInfo + /// contains correct information. + Result> OpenInputStream(const FileInfo& info) override; + + /// Create a random access file for reading from a S3 object. + /// + /// See OpenInputStream for performance notes. + Result> OpenInputFile( + const std::string& path) override; + /// Create a random access file for reading from a S3 object. + /// + /// This override avoids a HEAD request by assuming the FileInfo + /// contains correct information. + Result> OpenInputFile( + const FileInfo& info) override; + + /// Create a sequential output stream for writing to a S3 object. + /// + /// NOTE: Writes to the stream will be buffered. Depending on + /// S3Options.background_writes, they can be synchronous or not. + /// It is recommended to enable background_writes unless you prefer + /// implementing your own background execution strategy. + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + /// Create a S3FileSystem instance from the given options. + static Result> Make( + const S3Options& options, const io::IOContext& = io::default_io_context()); + + protected: + explicit S3FileSystem(const S3Options& options, const io::IOContext&); + + class Impl; + std::shared_ptr impl_; +}; + +enum class S3LogLevel : int8_t { Off, Fatal, Error, Warn, Info, Debug, Trace }; + +struct ARROW_EXPORT S3GlobalOptions { + S3LogLevel log_level; + /// The number of threads to configure when creating AWS' I/O event loop + /// + /// Defaults to 1 as recommended by AWS' doc when the # of connections is + /// expected to be, at most, in the hundreds + /// + /// For more details see Aws::Crt::Io::EventLoopGroup + int num_event_loop_threads = 1; + + /// \brief Initialize with default options + /// + /// For log_level, this method first tries to extract a suitable value from the + /// environment variable ARROW_S3_LOG_LEVEL. + static S3GlobalOptions Defaults(); +}; + +/// \brief Initialize the S3 APIs with the specified set of options. +/// +/// It is required to call this function at least once before using S3FileSystem. +/// +/// Once this function is called you MUST call FinalizeS3 before the end of the +/// application in order to avoid a segmentation fault at shutdown. +ARROW_EXPORT +Status InitializeS3(const S3GlobalOptions& options); + +/// \brief Ensure the S3 APIs are initialized, but only if not already done. +/// +/// If necessary, this will call InitializeS3() with some default options. +ARROW_EXPORT +Status EnsureS3Initialized(); + +/// Whether S3 was initialized, and not finalized. +ARROW_EXPORT +bool IsS3Initialized(); + +/// Whether S3 was finalized. +ARROW_EXPORT +bool IsS3Finalized(); + +/// \brief Shutdown the S3 APIs. +/// +/// This can wait for some S3 concurrent calls to finish so as to avoid +/// race conditions. +/// After this function has been called, all S3 calls will fail with an error. +/// +/// Calls to InitializeS3() and FinalizeS3() should be serialized by the +/// application (this also applies to EnsureS3Initialized() and +/// EnsureS3Finalized()). +ARROW_EXPORT +Status FinalizeS3(); + +/// \brief Ensure the S3 APIs are shutdown, but only if not already done. +/// +/// If necessary, this will call FinalizeS3(). +ARROW_EXPORT +Status EnsureS3Finalized(); + +ARROW_EXPORT +Result ResolveS3BucketRegion(const std::string& bucket); + +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e70c787aa85c4fb0e05c45888c9a1ddac3be4999 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h @@ -0,0 +1,256 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/filesystem/mockfs.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/counting_semaphore.h" + +namespace arrow { +namespace fs { + +static constexpr double kTimeSlack = 2.0; // In seconds + +static inline FileInfo File(std::string path) { + return FileInfo(std::move(path), FileType::File); +} + +static inline FileInfo Dir(std::string path) { + return FileInfo(std::move(path), FileType::Directory); +} + +// A subclass of MockFileSystem that blocks operations until an unlock method is +// called. +// +// This is intended for testing fine-grained ordering of filesystem operations. +// +// N.B. Only OpenOutputStream supports gating at the moment but this is simply because +// it is all that has been needed so far. Feel free to add support for more methods +// as required. +class ARROW_TESTING_EXPORT GatedMockFilesystem : public internal::MockFileSystem { + public: + GatedMockFilesystem(TimePoint current_time, + const io::IOContext& = io::default_io_context()); + ~GatedMockFilesystem() override; + + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata = {}) override; + + // Wait until at least num_waiters are waiting on OpenOutputStream + Status WaitForOpenOutputStream(uint32_t num_waiters); + // Unlock `num_waiters` individual calls to OpenOutputStream + Status UnlockOpenOutputStream(uint32_t num_waiters); + + private: + util::CountingSemaphore open_output_sem_; +}; + +ARROW_TESTING_EXPORT +void CreateFile(FileSystem* fs, const std::string& path, const std::string& data); + +// Sort a vector of FileInfo by lexicographic path order +ARROW_TESTING_EXPORT +void SortInfos(FileInfoVector* infos); + +// Create a copy of a FileInfo vector sorted by lexicographic path order +ARROW_TESTING_EXPORT +FileInfoVector SortedInfos(const FileInfoVector& infos); + +ARROW_TESTING_EXPORT +void CollectFileInfoGenerator(FileInfoGenerator gen, FileInfoVector* out_infos); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + TimePoint mtime); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + TimePoint mtime, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type, + int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, + TimePoint mtime); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, + TimePoint mtime, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, int64_t size); + +ARROW_TESTING_EXPORT +void AssertFileContents(FileSystem* fs, const std::string& path, + const std::string& expected_data); + +template +void AssertDurationBetween(Duration d, double min_secs, double max_secs) { + auto seconds = std::chrono::duration_cast>(d); + ASSERT_GE(seconds.count(), min_secs); + ASSERT_LE(seconds.count(), max_secs); +} + +// Generic tests for FileSystem implementations. +// To use this class, subclass both from it and ::testing::Test, +// implement GetEmptyFileSystem(), and use GENERIC_FS_TEST_FUNCTIONS() +// to define the various tests. +class ARROW_TESTING_EXPORT GenericFileSystemTest { + public: + virtual ~GenericFileSystemTest(); + + void TestEmpty(); + void TestNormalizePath(); + void TestCreateDir(); + void TestDeleteDir(); + void TestDeleteDirContents(); + void TestDeleteRootDirContents(); + void TestDeleteFile(); + void TestDeleteFiles(); + void TestMoveFile(); + void TestMoveDir(); + void TestCopyFile(); + void TestGetFileInfo(); + void TestGetFileInfoVector(); + void TestGetFileInfoSelector(); + void TestGetFileInfoSelectorWithRecursion(); + void TestGetFileInfoAsync(); + void TestGetFileInfoGenerator(); + void TestOpenOutputStream(); + void TestOpenAppendStream(); + void TestOpenInputStream(); + void TestOpenInputStreamWithFileInfo(); + void TestOpenInputStreamAsync(); + void TestOpenInputFile(); + void TestOpenInputFileWithFileInfo(); + void TestOpenInputFileAsync(); + void TestSpecialChars(); + + protected: + // This function should return the filesystem under test. + virtual std::shared_ptr GetEmptyFileSystem() = 0; + + // Override the following functions to specify deviations from expected + // filesystem semantics. + // - Whether the filesystem may "implicitly" create intermediate directories + virtual bool have_implicit_directories() const { return false; } + // - Whether the filesystem may allow writing a file "over" a directory + virtual bool allow_write_file_over_dir() const { return false; } + // - Whether the filesystem allows reading a directory + virtual bool allow_read_dir_as_file() const { return false; } + // - Whether the filesystem allows moving a file + virtual bool allow_move_file() const { return true; } + // - Whether the filesystem allows moving a directory + virtual bool allow_move_dir() const { return true; } + // - Whether the filesystem allows moving a directory "over" a non-empty destination + virtual bool allow_move_dir_over_non_empty_dir() const { return false; } + // - Whether the filesystem allows appending to a file + virtual bool allow_append_to_file() const { return true; } + // - Whether the filesystem allows appending to a nonexistent file + virtual bool allow_append_to_new_file() const { return true; } + // - Whether the filesystem supports directory modification times + virtual bool have_directory_mtimes() const { return true; } + // - Whether some directory tree deletion tests may fail randomly + virtual bool have_flaky_directory_tree_deletion() const { return false; } + // - Whether the filesystem stores some metadata alongside files + virtual bool have_file_metadata() const { return false; } + // - Whether the filesystem has a false positive memory leak with generator + virtual bool have_false_positive_memory_leak_with_generator() const { return false; } + + void TestEmpty(FileSystem* fs); + void TestNormalizePath(FileSystem* fs); + void TestCreateDir(FileSystem* fs); + void TestDeleteDir(FileSystem* fs); + void TestDeleteDirContents(FileSystem* fs); + void TestDeleteRootDirContents(FileSystem* fs); + void TestDeleteFile(FileSystem* fs); + void TestDeleteFiles(FileSystem* fs); + void TestMoveFile(FileSystem* fs); + void TestMoveDir(FileSystem* fs); + void TestCopyFile(FileSystem* fs); + void TestGetFileInfo(FileSystem* fs); + void TestGetFileInfoVector(FileSystem* fs); + void TestGetFileInfoSelector(FileSystem* fs); + void TestGetFileInfoSelectorWithRecursion(FileSystem* fs); + void TestGetFileInfoAsync(FileSystem* fs); + void TestGetFileInfoGenerator(FileSystem* fs); + void TestOpenOutputStream(FileSystem* fs); + void TestOpenAppendStream(FileSystem* fs); + void TestOpenInputStream(FileSystem* fs); + void TestOpenInputStreamWithFileInfo(FileSystem* fs); + void TestOpenInputStreamAsync(FileSystem* fs); + void TestOpenInputFile(FileSystem* fs); + void TestOpenInputFileWithFileInfo(FileSystem* fs); + void TestOpenInputFileAsync(FileSystem* fs); + void TestSpecialChars(FileSystem* fs); +}; + +#define GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NAME) \ + TEST_MACRO(TEST_CLASS, NAME) { this->Test##NAME(); } + +#define GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_MACRO, TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, Empty) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NormalizePath) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CreateDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDirContents) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteRootDirContents) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFiles) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveDir) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CopyFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoVector) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelector) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelectorWithRecursion) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoGenerator) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenOutputStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenAppendStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStream) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamWithFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFile) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileWithFileInfo) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileAsync) \ + GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, SpecialChars) + +#define GENERIC_FS_TEST_FUNCTIONS(TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_F, TEST_CLASS) + +#define GENERIC_FS_TYPED_TEST_FUNCTIONS(TEST_CLASS) \ + GENERIC_FS_TEST_FUNCTIONS_MACROS(TYPED_TEST, TEST_CLASS) + +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..92c70799be16c73804353a1f3bcae8b5a3674057 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace fs { + +/// \brief FileSystem entry type +enum class FileType : int8_t { + /// Entry is not found + NotFound, + /// Entry exists but its type is unknown + /// + /// This can designate a special file such as a Unix socket or character + /// device, or Windows NUL / CON / ... + Unknown, + /// Entry is a regular file + File, + /// Entry is a directory + Directory +}; + +struct FileInfo; + +struct FileSelector; + +class FileSystem; +class AzureFileSystem; +class GcsFileSystem; +class LocalFileSystem; +class S3FileSystem; +class SlowFileSystem; +class SubTreeFileSystem; + +} // namespace fs +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e34fc858d07f60ac31b73d1e84b5dc1cf4189b3f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/thread_pool.h" + +namespace arrow { + +/// An executor which synchronously runs the task as part of the SpawnReal call. +class MockExecutor : public internal::Executor { + public: + int GetCapacity() override { return 0; } + + Status SpawnReal(internal::TaskHints hints, internal::FnOnce task, StopToken, + StopCallback&&) override { + spawn_count++; + std::move(task)(); + return Status::OK(); + } + + int spawn_count = 0; +}; + +/// An executor which does not actually run the task. Can be used to simulate situations +/// where the executor schedules a task in a long queue and doesn't get around to running +/// it for a while +class DelayedExecutor : public internal::Executor { + public: + int GetCapacity() override { return 0; } + + Status SpawnReal(internal::TaskHints hints, internal::FnOnce task, StopToken, + StopCallback&&) override { + captured_tasks.push_back(std::move(task)); + return Status::OK(); + } + + std::vector> captured_tasks; +}; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h new file mode 100644 index 0000000000000000000000000000000000000000..85b4c1f1f0138289d5717f7fcf6ade486ae044f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h @@ -0,0 +1,570 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/testing/gtest_compat.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/macros.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/type_fwd.h" + +// NOTE: failing must be inline in the macros below, to get correct file / line number +// reporting on test failures. + +// NOTE: using a for loop for this macro allows extra failure messages to be +// appended with operator<< +#define ASSERT_RAISES(ENUM, expr) \ + for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); \ + !_st.Is##ENUM();) \ + FAIL() << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " ARROW_STRINGIFY( \ + ENUM) ", but got " \ + << _st.ToString() + +#define ASSERT_RAISES_WITH_MESSAGE(ENUM, message, expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + if (!_st.Is##ENUM()) { \ + FAIL() << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " ARROW_STRINGIFY( \ + ENUM) ", but got " \ + << _st.ToString(); \ + } \ + ASSERT_EQ((message), _st.ToStringWithoutContextLines()); \ + } while (false) + +#define EXPECT_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + EXPECT_TRUE(_st.Is##ENUM()) << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " \ + << ARROW_STRINGIFY(ENUM) ", but got " << _st.ToString(); \ + EXPECT_THAT(_st.ToStringWithoutContextLines(), (matcher)); \ + } while (false) + +#define EXPECT_RAISES_WITH_CODE_AND_MESSAGE_THAT(code, matcher, expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + EXPECT_EQ(_st.CodeAsString(), Status::CodeAsString(code)); \ + EXPECT_THAT(_st.ToStringWithoutContextLines(), (matcher)); \ + } while (false) + +#define ASSERT_OK(expr) \ + for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); !_st.ok();) \ + FAIL() << "'" ARROW_STRINGIFY(expr) "' failed with " << _st.ToString() + +#define ASSERT_OK_NO_THROW(expr) ASSERT_NO_THROW(ASSERT_OK(expr)) + +#define ARROW_EXPECT_OK(expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + EXPECT_TRUE(_st.ok()) << "'" ARROW_STRINGIFY(expr) "' failed with " \ + << _st.ToString(); \ + } while (false) + +#define ASSERT_NOT_OK(expr) \ + for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); _st.ok();) \ + FAIL() << "'" ARROW_STRINGIFY(expr) "' did not failed" << _st.ToString() + +#define ABORT_NOT_OK(expr) \ + do { \ + auto _res = (expr); \ + ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \ + if (ARROW_PREDICT_FALSE(!_st.ok())) { \ + _st.Abort(); \ + } \ + } while (false); + +#define ASSIGN_OR_HANDLE_ERROR_IMPL(handle_error, status_name, lhs, rexpr) \ + auto&& status_name = (rexpr); \ + handle_error(status_name.status()); \ + lhs = std::move(status_name).ValueOrDie(); + +#define ASSERT_OK_AND_ASSIGN(lhs, rexpr) \ + ASSIGN_OR_HANDLE_ERROR_IMPL( \ + ASSERT_OK, ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), lhs, rexpr); + +#define ASSIGN_OR_ABORT(lhs, rexpr) \ + ASSIGN_OR_HANDLE_ERROR_IMPL(ABORT_NOT_OK, \ + ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \ + lhs, rexpr); + +#define EXPECT_OK_AND_ASSIGN(lhs, rexpr) \ + ASSIGN_OR_HANDLE_ERROR_IMPL(ARROW_EXPECT_OK, \ + ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \ + lhs, rexpr); + +#define ASSERT_OK_AND_EQ(expected, expr) \ + do { \ + ASSERT_OK_AND_ASSIGN(auto _actual, (expr)); \ + ASSERT_EQ(expected, _actual); \ + } while (0) + +// A generalized version of GTest's SCOPED_TRACE that takes arbitrary arguments. +// ARROW_SCOPED_TRACE("some variable = ", some_variable, ...) + +#define ARROW_SCOPED_TRACE(...) SCOPED_TRACE(::arrow::util::StringBuilder(__VA_ARGS__)) + +namespace arrow { + +// ---------------------------------------------------------------------- +// Useful testing::Types declarations + +inline void PrintTo(StatusCode code, std::ostream* os) { + *os << Status::CodeAsString(code); +} + +using NumericArrowTypes = + ::testing::Types; + +using RealArrowTypes = ::testing::Types; + +using IntegralArrowTypes = ::testing::Types; + +using PhysicalIntegralArrowTypes = + ::testing::Types; + +using PrimitiveArrowTypes = + ::testing::Types; + +using TemporalArrowTypes = + ::testing::Types; + +using DecimalArrowTypes = ::testing::Types; + +using BaseBinaryArrowTypes = + ::testing::Types; + +using BaseBinaryOrBinaryViewLikeArrowTypes = + ::testing::Types; + +using BinaryArrowTypes = ::testing::Types; + +using StringArrowTypes = ::testing::Types; + +using StringOrStringViewArrowTypes = + ::testing::Types; + +using ListArrowTypes = ::testing::Types; + +using UnionArrowTypes = ::testing::Types; + +class Array; +class ChunkedArray; +class RecordBatch; +class Table; +struct Datum; + +#define ASSERT_ARRAYS_EQUAL(lhs, rhs) AssertArraysEqual((lhs), (rhs)) +#define ASSERT_BATCHES_EQUAL(lhs, rhs) AssertBatchesEqual((lhs), (rhs)) +#define ASSERT_BATCHES_APPROX_EQUAL(lhs, rhs) AssertBatchesApproxEqual((lhs), (rhs)) +#define ASSERT_TABLES_EQUAL(lhs, rhs) AssertTablesEqual((lhs), (rhs)) + +// Default EqualOptions for testing +static inline EqualOptions TestingEqualOptions() { + return EqualOptions{}.nans_equal(true).signed_zeros_equal(false); +} + +// If verbose is true, then the arrays will be pretty printed +ARROW_TESTING_EXPORT void AssertArraysEqual( + const Array& expected, const Array& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertArraysApproxEqual( + const Array& expected, const Array& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +// Returns true when values are both null +ARROW_TESTING_EXPORT void AssertScalarsEqual( + const Scalar& expected, const Scalar& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertScalarsApproxEqual( + const Scalar& expected, const Scalar& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertBatchesEqual( + const RecordBatch& expected, const RecordBatch& actual, bool check_metadata = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertBatchesApproxEqual( + const RecordBatch& expected, const RecordBatch& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertChunkedEqual( + const ChunkedArray& expected, const ChunkedArray& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertChunkedEqual( + const ChunkedArray& actual, const ArrayVector& expected, + const EqualOptions& options = TestingEqualOptions()); +// Like ChunkedEqual, but permits different chunk layout +ARROW_TESTING_EXPORT void AssertChunkedEquivalent( + const ChunkedArray& expected, const ChunkedArray& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertChunkedApproxEquivalent( + const ChunkedArray& expected, const ChunkedArray& actual, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer, + const std::vector& expected); +ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer, + std::string_view expected); +ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer, const Buffer& expected); + +ARROW_TESTING_EXPORT void AssertTypeEqual(const DataType& lhs, const DataType& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertTypeEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldEqual(const Field& lhs, const Field& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaEqual(const Schema& lhs, const Schema& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); + +ARROW_TESTING_EXPORT void AssertTypeNotEqual(const DataType& lhs, const DataType& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertTypeNotEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldNotEqual(const Field& lhs, const Field& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertFieldNotEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaNotEqual(const Schema& lhs, const Schema& rhs, + bool check_metadata = false); +ARROW_TESTING_EXPORT void AssertSchemaNotEqual(const std::shared_ptr& lhs, + const std::shared_ptr& rhs, + bool check_metadata = false); + +ARROW_TESTING_EXPORT Result> PrintArrayDiff( + const ChunkedArray& expected, const ChunkedArray& actual); + +ARROW_TESTING_EXPORT void AssertTablesEqual( + const Table& expected, const Table& actual, bool same_chunk_layout = true, + bool flatten = false, const EqualOptions& options = TestingEqualOptions()); + +ARROW_TESTING_EXPORT void AssertDatumsEqual( + const Datum& expected, const Datum& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); +ARROW_TESTING_EXPORT void AssertDatumsApproxEqual( + const Datum& expected, const Datum& actual, bool verbose = false, + const EqualOptions& options = TestingEqualOptions()); + +template +void AssertNumericDataEqual(const C_TYPE* raw_data, + const std::vector& expected_values) { + for (auto expected : expected_values) { + ASSERT_EQ(expected, *raw_data); + ++raw_data; + } +} + +ARROW_TESTING_EXPORT void CompareBatch( + const RecordBatch& left, const RecordBatch& right, bool compare_metadata = true, + const EqualOptions& options = TestingEqualOptions()); + +ARROW_TESTING_EXPORT void ApproxCompareBatch( + const RecordBatch& left, const RecordBatch& right, bool compare_metadata = true, + const EqualOptions& options = TestingEqualOptions()); + +// Check if the padding of the buffers of the array is zero. +// Also cause valgrind warnings if the padding bytes are uninitialized. +ARROW_TESTING_EXPORT void AssertZeroPadded(const Array& array); + +// Check if the valid buffer bytes are initialized +// and cause valgrind warnings otherwise. +ARROW_TESTING_EXPORT void TestInitialized(const ArrayData& array); +ARROW_TESTING_EXPORT void TestInitialized(const Array& array); + +#define DECL_T() typedef typename TestFixture::T T; + +#define DECL_TYPE() typedef typename TestFixture::Type Type; + +// ArrayFromJSON: construct an Array from a simple JSON representation + +ARROW_TESTING_EXPORT +std::shared_ptr ArrayFromJSON(const std::shared_ptr&, + std::string_view json); + +ARROW_TESTING_EXPORT +std::shared_ptr DictArrayFromJSON(const std::shared_ptr& type, + std::string_view indices_json, + std::string_view dictionary_json); + +ARROW_TESTING_EXPORT +std::shared_ptr RecordBatchFromJSON(const std::shared_ptr&, + std::string_view); + +ARROW_TESTING_EXPORT +std::shared_ptr ChunkedArrayFromJSON(const std::shared_ptr&, + const std::vector& json); + +ARROW_TESTING_EXPORT +std::shared_ptr ScalarFromJSON(const std::shared_ptr&, + std::string_view json); + +ARROW_TESTING_EXPORT +std::shared_ptr DictScalarFromJSON(const std::shared_ptr&, + std::string_view index_json, + std::string_view dictionary_json); + +ARROW_TESTING_EXPORT +std::shared_ptr TableFromJSON(const std::shared_ptr&, + const std::vector& json); + +ARROW_TESTING_EXPORT +std::shared_ptr TensorFromJSON(const std::shared_ptr& type, + std::string_view data, std::string_view shape, + std::string_view strides = "[]", + std::string_view dim_names = "[]"); + +ARROW_TESTING_EXPORT +std::shared_ptr TensorFromJSON(const std::shared_ptr& type, + std::string_view data, + const std::vector& shape, + const std::vector& strides = {}, + const std::vector& dim_names = {}); + +ARROW_TESTING_EXPORT +Result> RunEndEncodeTableColumns( + const Table& table, const std::vector& column_indices); + +// Given an array, return a new identical array except for one validity bit +// set to a new value. +// This is useful to force the underlying "value" of null entries to otherwise +// invalid data and check that errors don't get reported. +ARROW_TESTING_EXPORT +std::shared_ptr TweakValidityBit(const std::shared_ptr& array, + int64_t index, bool validity); + +ARROW_TESTING_EXPORT +void SleepFor(double seconds); + +// Sleeps for a very small amount of time. The thread will be yielded +// at least once ensuring that context switches could happen. It is intended +// to be used for stress testing parallel code and shouldn't be assumed to do any +// reliable timing. +ARROW_TESTING_EXPORT +void SleepABit(); + +// Wait until predicate is true or timeout in seconds expires. +ARROW_TESTING_EXPORT +void BusyWait(double seconds, std::function predicate); + +// \see SleepABit +ARROW_TESTING_EXPORT +Future<> SleepABitAsync(); + +ARROW_TESTING_EXPORT bool FileIsClosed(int fd); + +template +std::vector IteratorToVector(Iterator iterator) { + EXPECT_OK_AND_ASSIGN(auto out, iterator.ToVector()); + return out; +} + +ARROW_TESTING_EXPORT +bool LocaleExists(const char* locale); + +#ifndef _WIN32 +ARROW_TESTING_EXPORT +void AssertChildExit(int child_pid, int expected_exit_status = 0); +#endif + +// A RAII-style object that switches to a new locale, and switches back +// to the old locale when going out of scope. Doesn't do anything if the +// new locale doesn't exist on the local machine. +// ATTENTION: may crash with an assertion failure on Windows debug builds. +// See ARROW-6108, also https://gerrit.libreoffice.org/#/c/54110/ +class ARROW_TESTING_EXPORT LocaleGuard { + public: + explicit LocaleGuard(const char* new_locale); + ~LocaleGuard(); + + protected: + class Impl; + std::unique_ptr impl_; +}; + +class ARROW_TESTING_EXPORT EnvVarGuard { + public: + EnvVarGuard(const std::string& name, const std::string& value); + ~EnvVarGuard(); + + protected: + const std::string name_; + std::string old_value_; + bool was_set_; +}; + +namespace internal { +class SignalHandler; +} + +class ARROW_TESTING_EXPORT SignalHandlerGuard { + public: + typedef void (*Callback)(int); + + SignalHandlerGuard(int signum, Callback cb); + SignalHandlerGuard(int signum, const internal::SignalHandler& handler); + ~SignalHandlerGuard(); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +#ifndef ARROW_LARGE_MEMORY_TESTS +#define LARGE_MEMORY_TEST(name) DISABLED_##name +#else +#define LARGE_MEMORY_TEST(name) name +#endif + +inline void PrintTo(const Status& st, std::ostream* os) { *os << st.ToString(); } + +template +void PrintTo(const Result& result, std::ostream* os) { + if (result.ok()) { + ::testing::internal::UniversalPrint(result.ValueOrDie(), os); + } else { + *os << result.status(); + } +} + +// A data type with only move constructors (no copy, no default). +struct MoveOnlyDataType { + explicit MoveOnlyDataType(int x) : data(new int(x)) {} + + MoveOnlyDataType(const MoveOnlyDataType& other) = delete; + MoveOnlyDataType& operator=(const MoveOnlyDataType& other) = delete; + + MoveOnlyDataType(MoveOnlyDataType&& other) { MoveFrom(&other); } + MoveOnlyDataType& operator=(MoveOnlyDataType&& other) { + MoveFrom(&other); + return *this; + } + + MoveOnlyDataType& operator=(int x) { + if (data != nullptr) { + delete data; + } + data = new int(x); + return *this; + } + + ~MoveOnlyDataType() { Destroy(); } + + void Destroy() { + if (data != nullptr) { + delete data; + data = nullptr; + moves = -1; + } + } + + void MoveFrom(MoveOnlyDataType* other) { + Destroy(); + data = other->data; + other->data = nullptr; + moves = other->moves + 1; + } + + int ToInt() const { return data == nullptr ? -42 : *data; } + + bool operator==(const MoveOnlyDataType& other) const { + return data != nullptr && other.data != nullptr && *data == *other.data; + } + bool operator<(const MoveOnlyDataType& other) const { + return data == nullptr || (other.data != nullptr && *data < *other.data); + } + + bool operator==(int other) const { return data != nullptr && *data == other; } + friend bool operator==(int left, const MoveOnlyDataType& right) { + return right == left; + } + + int* data = nullptr; + int moves = 0; +}; + +// A task that blocks until unlocked. Useful for timing tests. +class ARROW_TESTING_EXPORT GatingTask { + public: + explicit GatingTask(double timeout_seconds = 10); + /// \brief During destruction we wait for all pending tasks to finish + ~GatingTask(); + + /// \brief Creates a new waiting task (presumably to spawn on a thread). It will return + /// invalid if the timeout arrived before the unlock. The task will not complete until + /// unlocked or timed out + /// + /// Note: The GatingTask must outlive any Task instances + std::function Task(); + /// \brief Creates a new waiting task as a future. The future will not complete + /// until unlocked. + Future<> AsyncTask(); + /// \brief Waits until at least count tasks are running. + Status WaitForRunning(int count); + /// \brief Unlocks all waiting tasks. Returns an invalid status if any waiting task has + /// timed out + Status Unlock(); + + static std::shared_ptr Make(double timeout_seconds = 10); + + private: + class Impl; + std::shared_ptr impl_; +}; + +/// \brief create an exact copy of the data where each buffer has a max alignment of 1 +/// +/// This method does not recurse into the dictionary or children +ARROW_TESTING_EXPORT std::shared_ptr UnalignBuffers(const ArrayData& array); +/// \brief create an exact copy of the array where each buffer has a max alignment of 1 +/// +/// This method does not recurse into the dictionary or children +ARROW_TESTING_EXPORT std::shared_ptr UnalignBuffers(const Array& array); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h new file mode 100644 index 0000000000000000000000000000000000000000..01e3ced2d1f61b8eb3719208c13a5dc4e111e771 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/launder.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +template +class AlignedStorage { + public: + static constexpr bool can_memcpy = std::is_trivial::value; + + constexpr T* get() noexcept { + return arrow::internal::launder(reinterpret_cast(&data_)); + } + + constexpr const T* get() const noexcept { + // Use fully qualified name to avoid ambiguities with MSVC (ARROW-14800) + return arrow::internal::launder(reinterpret_cast(&data_)); + } + + void destroy() noexcept { + if (!std::is_trivially_destructible::value) { + get()->~T(); + } + } + + template + void construct(A&&... args) noexcept { + new (&data_) T(std::forward(args)...); + } + + template + void assign(V&& v) noexcept { + *get() = std::forward(v); + } + + void move_construct(AlignedStorage* other) noexcept { + new (&data_) T(std::move(*other->get())); + } + + void move_assign(AlignedStorage* other) noexcept { *get() = std::move(*other->get()); } + + template + static typename std::enable_if::type move_construct_several( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + memcpy(dest->get(), src->get(), memcpy_length * sizeof(T)); + } + + template + static typename std::enable_if::type + move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + memcpy(dest->get(), src->get(), memcpy_length * sizeof(T)); + } + + template + static typename std::enable_if::type move_construct_several( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + for (size_t i = 0; i < n; ++i) { + new (dest[i].get()) T(std::move(*src[i].get())); + } + } + + template + static typename std::enable_if::type + move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + for (size_t i = 0; i < n; ++i) { + new (dest[i].get()) T(std::move(*src[i].get())); + src[i].destroy(); + } + } + + static void move_construct_several(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, + size_t n) noexcept { + move_construct_several(src, dest, n, n); + } + + static void move_construct_several_and_destroy_source( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, + size_t n) noexcept { + move_construct_several_and_destroy_source(src, dest, n, n); + } + + static void destroy_several(AlignedStorage* p, size_t n) noexcept { + if (!std::is_trivially_destructible::value) { + for (size_t i = 0; i < n; ++i) { + p[i].destroy(); + } + } + } + + private: +#if !defined(__clang__) && defined(__GNUC__) && defined(__i386__) + // Workaround for GCC bug on i386: + // alignof(int64 | float64) can give different results depending on the + // compilation context, leading to internal ABI mismatch manifesting + // in incorrect propagation of Result between + // compilation units. + // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88115) + static constexpr size_t alignment() { + if (std::is_integral_v && sizeof(T) == 8) { + return 4; + } else if (std::is_floating_point_v && sizeof(T) == 8) { + return 4; + } + return alignof(T); + } + + typename std::aligned_storage::type data_; +#else + typename std::aligned_storage::type data_; +#endif +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..f9bcd534567c6c231192cc174a717997583dfb3c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h @@ -0,0 +1,2058 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/async_generator_fwd.h" +#include "arrow/util/async_util.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/io_util.h" +#include "arrow/util/iterator.h" +#include "arrow/util/mutex.h" +#include "arrow/util/queue.h" +#include "arrow/util/thread_pool.h" + +namespace arrow { + +// The methods in this file create, modify, and utilize AsyncGenerator which is an +// iterator of futures. This allows an asynchronous source (like file input) to be run +// through a pipeline in the same way that iterators can be used to create pipelined +// workflows. +// +// In order to support pipeline parallelism we introduce the concept of asynchronous +// reentrancy. This is different than synchronous reentrancy. With synchronous code a +// function is reentrant if the function can be called again while a previous call to that +// function is still running. Unless otherwise specified none of these generators are +// synchronously reentrant. Care should be taken to avoid calling them in such a way (and +// the utilities Visit/Collect/Await take care to do this). +// +// Asynchronous reentrancy on the other hand means the function is called again before the +// future returned by the function is marked finished (but after the call to get the +// future returns). Some of these generators are async-reentrant while others (e.g. +// those that depend on ordered processing like decompression) are not. Read the MakeXYZ +// function comments to determine which generators support async reentrancy. +// +// Note: Generators that are not asynchronously reentrant can still support readahead +// (\see MakeSerialReadaheadGenerator). +// +// Readahead operators, and some other operators, may introduce queueing. Any operators +// that introduce buffering should detail the amount of buffering they introduce in their +// MakeXYZ function comments. +// +// A generator should always be fully consumed before it is destroyed. +// A generator should not mark a future complete with an error status or a terminal value +// until all outstanding futures have completed. Generators that spawn multiple +// concurrent futures may need to hold onto an error while other concurrent futures wrap +// up. +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of AsyncGenerator, + /// an empty function indicates the end of iteration. + static AsyncGenerator End() { return AsyncGenerator(); } + + static bool IsEnd(const AsyncGenerator& val) { return !val; } +}; + +template +Future AsyncGeneratorEnd() { + return Future::MakeFinished(IterationTraits::End()); +} + +/// returning a future that completes when all have been visited +template +Future<> VisitAsyncGenerator(AsyncGenerator generator, Visitor visitor) { + struct LoopBody { + struct Callback { + Result> operator()(const T& next) { + if (IsIterationEnd(next)) { + return Break(); + } else { + auto visited = visitor(next); + if (visited.ok()) { + return Continue(); + } else { + return visited; + } + } + } + + Visitor visitor; + }; + + Future> operator()() { + Callback callback{visitor}; + auto next = generator(); + return next.Then(std::move(callback)); + } + + AsyncGenerator generator; + Visitor visitor; + }; + + return Loop(LoopBody{std::move(generator), std::move(visitor)}); +} + +/// \brief Wait for an async generator to complete, discarding results. +template +Future<> DiscardAllFromAsyncGenerator(AsyncGenerator generator) { + std::function visitor = [](const T&) { return Status::OK(); }; + return VisitAsyncGenerator(generator, visitor); +} + +/// \brief Collect the results of an async generator into a vector +template +Future> CollectAsyncGenerator(AsyncGenerator generator) { + auto vec = std::make_shared>(); + auto loop_body = [generator = std::move(generator), + vec = std::move(vec)]() -> Future>> { + auto next = generator(); + return next.Then([vec](const T& result) -> Result>> { + if (IsIterationEnd(result)) { + return Break(*vec); + } else { + vec->push_back(result); + return Continue(); + } + }); + }; + return Loop(std::move(loop_body)); +} + +/// \see MakeMappedGenerator +template +class MappingGenerator { + public: + MappingGenerator(AsyncGenerator source, std::function(const T&)> map) + : state_(std::make_shared(std::move(source), std::move(map))) {} + + Future operator()() { + auto future = Future::Make(); + bool should_trigger; + { + auto guard = state_->mutex.Lock(); + if (state_->finished) { + return AsyncGeneratorEnd(); + } + should_trigger = state_->waiting_jobs.empty(); + state_->waiting_jobs.push_back(future); + } + if (should_trigger) { + state_->source().AddCallback(Callback{state_}); + } + return future; + } + + private: + struct State { + State(AsyncGenerator source, std::function(const T&)> map) + : source(std::move(source)), + map(std::move(map)), + waiting_jobs(), + mutex(), + finished(false) {} + + void Purge() { + // This might be called by an original callback (if the source iterator fails or + // ends) or by a mapped callback (if the map function fails or ends prematurely). + // Either way it should only be called once and after finished is set so there is no + // need to guard access to `waiting_jobs`. + while (!waiting_jobs.empty()) { + waiting_jobs.front().MarkFinished(IterationTraits::End()); + waiting_jobs.pop_front(); + } + } + + AsyncGenerator source; + std::function(const T&)> map; + std::deque> waiting_jobs; + util::Mutex mutex; + bool finished; + }; + + struct Callback; + + struct MappedCallback { + void operator()(const Result& maybe_next) { + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + if (end) { + { + auto guard = state->mutex.Lock(); + should_purge = !state->finished; + state->finished = true; + } + } + sink.MarkFinished(maybe_next); + if (should_purge) { + state->Purge(); + } + } + std::shared_ptr state; + Future sink; + }; + + struct Callback { + void operator()(const Result& maybe_next) { + Future sink; + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + bool should_trigger; + { + auto guard = state->mutex.Lock(); + // A MappedCallback may have purged or be purging the queue; + // we shouldn't do anything here. + if (state->finished) return; + if (end) { + should_purge = !state->finished; + state->finished = true; + } + sink = state->waiting_jobs.front(); + state->waiting_jobs.pop_front(); + should_trigger = !end && !state->waiting_jobs.empty(); + } + if (should_purge) { + state->Purge(); + } + if (should_trigger) { + state->source().AddCallback(Callback{state}); + } + if (maybe_next.ok()) { + const T& val = maybe_next.ValueUnsafe(); + if (IsIterationEnd(val)) { + sink.MarkFinished(IterationTraits::End()); + } else { + Future mapped_fut = state->map(val); + mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)}); + } + } else { + sink.MarkFinished(maybe_next.status()); + } + } + + std::shared_ptr state; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that will apply the map function to each element of +/// source. The map function is not called on the end token. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeMappedGenerator(AsyncGenerator source_generator, MapFn map) { + auto map_callback = [map = std::move(map)](const T& val) mutable -> Future { + return ToFuture(map(val)); + }; + return MappingGenerator(std::move(source_generator), std::move(map_callback)); +} + +/// \brief Create a generator that will apply the map function to +/// each element of source. The map function is not called on the end +/// token. The result of the map function should be another +/// generator; all these generators will then be flattened to produce +/// a single stream of items. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeFlatMappedGenerator(AsyncGenerator source_generator, MapFn map) { + return MakeConcatenatedGenerator( + MakeMappedGenerator(std::move(source_generator), std::move(map))); +} + +/// \see MakeSequencingGenerator +template +class SequencingGenerator { + public: + SequencingGenerator(AsyncGenerator source, ComesAfter compare, IsNext is_next, + T initial_value) + : state_(std::make_shared(std::move(source), std::move(compare), + std::move(is_next), std::move(initial_value))) {} + + Future operator()() { + { + auto guard = state_->mutex.Lock(); + // We can send a result immediately if the top of the queue is either an + // error or the next item + if (!state_->queue.empty() && + (!state_->queue.top().ok() || + state_->is_next(state_->previous_value, *state_->queue.top()))) { + auto result = std::move(state_->queue.top()); + if (result.ok()) { + state_->previous_value = *result; + } + state_->queue.pop(); + return Future::MakeFinished(result); + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + // The next item is not in the queue so we will need to wait + auto new_waiting_fut = Future::Make(); + state_->waiting_future = new_waiting_fut; + guard.Unlock(); + state_->source().AddCallback(Callback{state_}); + return new_waiting_fut; + } + } + + private: + struct WrappedComesAfter { + bool operator()(const Result& left, const Result& right) { + if (!left.ok() || !right.ok()) { + // Should never happen + return false; + } + return compare(*left, *right); + } + ComesAfter compare; + }; + + struct State { + State(AsyncGenerator source, ComesAfter compare, IsNext is_next, T initial_value) + : source(std::move(source)), + is_next(std::move(is_next)), + previous_value(std::move(initial_value)), + waiting_future(), + queue(WrappedComesAfter{compare}), + finished(false), + mutex() {} + + AsyncGenerator source; + IsNext is_next; + T previous_value; + Future waiting_future; + std::priority_queue, std::vector>, WrappedComesAfter> queue; + bool finished; + util::Mutex mutex; + }; + + class Callback { + public: + explicit Callback(std::shared_ptr state) : state_(std::move(state)) {} + + void operator()(const Result result) { + Future to_deliver; + bool finished; + { + auto guard = state_->mutex.Lock(); + bool ready_to_deliver = false; + if (!result.ok()) { + // Clear any cached results + while (!state_->queue.empty()) { + state_->queue.pop(); + } + ready_to_deliver = true; + state_->finished = true; + } else if (IsIterationEnd(result.ValueUnsafe())) { + ready_to_deliver = state_->queue.empty(); + state_->finished = true; + } else { + ready_to_deliver = state_->is_next(state_->previous_value, *result); + } + + if (ready_to_deliver && state_->waiting_future.is_valid()) { + to_deliver = state_->waiting_future; + if (result.ok()) { + state_->previous_value = *result; + } + } else { + state_->queue.push(result); + } + // Capture state_->finished so we can access it outside the mutex + finished = state_->finished; + } + // Must deliver result outside of the mutex + if (to_deliver.is_valid()) { + to_deliver.MarkFinished(result); + } else { + // Otherwise, if we didn't get the next item (or a terminal item), we + // need to keep looking + if (!finished) { + state_->source().AddCallback(Callback{state_}); + } + } + } + + private: + const std::shared_ptr state_; + }; + + const std::shared_ptr state_; +}; + +/// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter +/// and IsNext determine the sequence order. +/// +/// ComesAfter should be a BinaryPredicate that only returns true if a comes after b +/// +/// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if +/// `b` follows immediately after `a`. It should return true given `initial_value` and +/// `b` if `b` is the first item in the sequence. +/// +/// This operator will queue unboundedly while waiting for the next item. It is intended +/// for jittery sources that might scatter an ordered sequence. It is NOT intended to +/// sort. Using it to try and sort could result in excessive RAM usage. This generator +/// will queue up to N blocks where N is the max "out of order"ness of the source. +/// +/// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3 +/// blocks beyond where it belongs. +/// +/// This generator is not async-reentrant but it consists only of a simple log(n) +/// insertion into a priority queue. +template +AsyncGenerator MakeSequencingGenerator(AsyncGenerator source_generator, + ComesAfter compare, IsNext is_next, + T initial_value) { + return SequencingGenerator( + std::move(source_generator), std::move(compare), std::move(is_next), + std::move(initial_value)); +} + +/// \see MakeTransformedGenerator +template +class TransformingGenerator { + // The transforming generator state will be referenced as an async generator but will + // also be referenced via callback to various futures. If the async generator owner + // moves it around we need the state to be consistent for future callbacks. + struct TransformingGeneratorState + : std::enable_shared_from_this { + TransformingGeneratorState(AsyncGenerator generator, Transformer transformer) + : generator_(std::move(generator)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Future operator()() { + while (true) { + auto maybe_next_result = Pump(); + if (!maybe_next_result.ok()) { + return Future::MakeFinished(maybe_next_result.status()); + } + auto maybe_next = std::move(maybe_next_result).ValueUnsafe(); + if (maybe_next.has_value()) { + return Future::MakeFinished(*std::move(maybe_next)); + } + + auto next_fut = generator_(); + // If finished already, process results immediately inside the loop to avoid + // stack overflow + if (next_fut.is_finished()) { + auto next_result = next_fut.result(); + if (next_result.ok()) { + last_value_ = *next_result; + } else { + return Future::MakeFinished(next_result.status()); + } + // Otherwise, if not finished immediately, add callback to process results + } else { + auto self = this->shared_from_this(); + return next_fut.Then([self](const T& next_result) { + self->last_value_ = next_result; + return (*self)(); + }); + } + } + } + + // See comment on TransformingIterator::Pump + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + ARROW_ASSIGN_OR_RAISE(TransformFlow next, transformer_(*last_value_)); + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + AsyncGenerator generator_; + Transformer transformer_; + std::optional last_value_; + bool finished_; + }; + + public: + explicit TransformingGenerator(AsyncGenerator generator, + Transformer transformer) + : state_(std::make_shared(std::move(generator), + std::move(transformer))) {} + + Future operator()() { return (*state_)(); } + + protected: + std::shared_ptr state_; +}; + +/// \brief Transform an async generator using a transformer function returning a new +/// AsyncGenerator +/// +/// The transform function here behaves exactly the same as the transform function in +/// MakeTransformedIterator and you can safely use the same transform function to +/// transform both synchronous and asynchronous streams. +/// +/// This generator is not async-reentrant +/// +/// This generator may queue up to 1 instance of T but will not delay +template +AsyncGenerator MakeTransformedGenerator(AsyncGenerator generator, + Transformer transformer) { + return TransformingGenerator(generator, transformer); +} + +/// \see MakeSerialReadaheadGenerator +template +class SerialReadaheadGenerator { + public: + SerialReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future operator()() { + if (state_->first_) { + // Lazy generator, need to wait for the first ask to prime the pump + state_->first_ = false; + auto next = state_->source_(); + return next.Then(Callback{state_}, ErrCallback{state_}); + } + + // This generator is not async-reentrant. We won't be called until the last + // future finished so we know there is something in the queue + auto finished = state_->finished_.load(); + if (finished && state_->readahead_queue_.IsEmpty()) { + return AsyncGeneratorEnd(); + } + + std::shared_ptr> next; + if (!state_->readahead_queue_.Read(next)) { + return Status::UnknownError("Could not read from readahead_queue"); + } + + auto last_available = state_->spaces_available_.fetch_add(1); + if (last_available == 0 && !finished) { + // Reader idled out, we need to restart it + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return *next; + } + + private: + struct State { + State(AsyncGenerator source, int max_readahead) + : first_(true), + source_(std::move(source)), + finished_(false), + // There is one extra "space" for the in-flight request + spaces_available_(max_readahead + 1), + // The SPSC queue has size-1 "usable" slots so we need to overallocate 1 + readahead_queue_(max_readahead + 1) {} + + Status Pump(const std::shared_ptr& self) { + // Can't do readahead_queue.write(source().Then(...)) because then the + // callback might run immediately and add itself to the queue before this gets added + // to the queue messing up the order. + auto next_slot = std::make_shared>(); + auto written = readahead_queue_.Write(next_slot); + if (!written) { + return Status::UnknownError("Could not write to readahead_queue"); + } + // If this Pump is being called from a callback it is possible for the source to + // poll and read from the queue between the Write and this spot where we fill the + // value in. However, it is not possible for the future to read this value we are + // writing. That is because this callback (the callback for future X) must be + // finished before future X is marked complete and this source is not pulled + // reentrantly so it will not poll for future X+1 until this callback has completed. + *next_slot = source_().Then(Callback{self}, ErrCallback{self}); + return Status::OK(); + } + + // Only accessed by the consumer end + bool first_; + // Accessed by both threads + AsyncGenerator source_; + std::atomic finished_; + // The queue has a size but it is not atomic. We keep track of how many spaces are + // left in the queue here so we know if we've just written the last value and we need + // to stop reading ahead or if we've just read from a full queue and we need to + // restart reading ahead + std::atomic spaces_available_; + // Needs to be a queue of shared_ptr and not Future because we set the value of the + // future after we add it to the queue + util::SpscQueue>> readahead_queue_; + }; + + struct Callback { + Result operator()(const T& next) { + if (IsIterationEnd(next)) { + state_->finished_.store(true); + return next; + } + auto last_available = state_->spaces_available_.fetch_sub(1); + if (last_available > 1) { + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return next; + } + + std::shared_ptr state_; + }; + + struct ErrCallback { + Result operator()(const Status& st) { + state_->finished_.store(true); + return st; + } + + std::shared_ptr state_; + }; + + std::shared_ptr state_; +}; + +/// \see MakeFromFuture +template +class FutureFirstGenerator { + public: + explicit FutureFirstGenerator(Future> future) + : state_(std::make_shared(std::move(future))) {} + + Future operator()() { + if (state_->source_) { + return state_->source_(); + } else { + auto state = state_; + return state_->future_.Then([state](const AsyncGenerator& source) { + state->source_ = source; + return state->source_(); + }); + } + } + + private: + struct State { + explicit State(Future> future) : future_(future), source_() {} + + Future> future_; + AsyncGenerator source_; + }; + + std::shared_ptr state_; +}; + +/// \brief Transform a Future> into an AsyncGenerator +/// that waits for the future to complete as part of the first item. +/// +/// This generator is not async-reentrant (even if the generator yielded by future is) +/// +/// This generator does not queue +template +AsyncGenerator MakeFromFuture(Future> future) { + return FutureFirstGenerator(std::move(future)); +} + +/// \brief Create a generator that will pull from the source into a queue. Unlike +/// MakeReadaheadGenerator this will not pull reentrantly from the source. +/// +/// The source generator does not need to be async-reentrant +/// +/// This generator is not async-reentrant (even if the source is) +/// +/// This generator may queue up to max_readahead additional instances of T +template +AsyncGenerator MakeSerialReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return SerialReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Create a generator that immediately pulls from the source +/// +/// Typical generators do not pull from their source until they themselves +/// are pulled. This generator does not follow that convention and will call +/// generator() once before it returns. The returned generator will otherwise +/// mirror the source. +/// +/// This generator forwards async-reentrant pressure to the source +/// This generator buffers one item (the first result) until it is delivered. +template +AsyncGenerator MakeAutoStartingGenerator(AsyncGenerator generator) { + struct AutostartGenerator { + Future operator()() { + if (first_future->is_valid()) { + Future result = *first_future; + *first_future = Future(); + return result; + } + return source(); + } + + std::shared_ptr> first_future; + AsyncGenerator source; + }; + + std::shared_ptr> first_future = std::make_shared>(generator()); + return AutostartGenerator{std::move(first_future), std::move(generator)}; +} + +/// \see MakeReadaheadGenerator +template +class ReadaheadGenerator { + public: + ReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future AddMarkFinishedContinuation(Future fut) { + auto state = state_; + return fut.Then( + [state](const T& result) -> Future { + state->MarkFinishedIfDone(result); + if (state->finished.load()) { + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + } else { + state->num_running.fetch_sub(1); + } + return result; + }, + [state](const Status& err) -> Future { + // If there is an error we need to make sure all running + // tasks finish before we return the error. + state->finished.store(true); + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + return state->final_future.Then([err]() -> Result { return err; }); + }); + } + + Future operator()() { + if (state_->readahead_queue.empty()) { + // This is the first request, let's pump the underlying queue + state_->num_running.store(state_->max_readahead); + for (int i = 0; i < state_->max_readahead; i++) { + auto next = state_->source_generator(); + auto next_after_check = AddMarkFinishedContinuation(std::move(next)); + state_->readahead_queue.push(std::move(next_after_check)); + } + } + // Pop one and add one + auto result = state_->readahead_queue.front(); + state_->readahead_queue.pop(); + if (state_->finished.load()) { + state_->readahead_queue.push(AsyncGeneratorEnd()); + } else { + state_->num_running.fetch_add(1); + auto back_of_queue = state_->source_generator(); + auto back_of_queue_after_check = + AddMarkFinishedContinuation(std::move(back_of_queue)); + state_->readahead_queue.push(std::move(back_of_queue_after_check)); + } + return result; + } + + private: + struct State { + State(AsyncGenerator source_generator, int max_readahead) + : source_generator(std::move(source_generator)), max_readahead(max_readahead) {} + + void MarkFinishedIfDone(const T& next_result) { + if (IsIterationEnd(next_result)) { + finished.store(true); + } + } + + AsyncGenerator source_generator; + int max_readahead; + Future<> final_future = Future<>::Make(); + std::atomic num_running{0}; + std::atomic finished{false}; + std::queue> readahead_queue; + }; + + std::shared_ptr state_; +}; + +/// \brief A generator where the producer pushes items on a queue. +/// +/// No back-pressure is applied, so this generator is mostly useful when +/// producing the values is neither CPU- nor memory-expensive (e.g. fetching +/// filesystem metadata). +/// +/// This generator is not async-reentrant. +template +class PushGenerator { + struct State { + State() {} + + util::Mutex mutex; + std::deque> result_q; + std::optional> consumer_fut; + bool finished = false; + }; + + public: + /// Producer API for PushGenerator + class Producer { + public: + explicit Producer(const std::shared_ptr& state) : weak_state_(state) {} + + /// \brief Push a value on the queue + /// + /// True is returned if the value was pushed, false if the generator is + /// already closed or destroyed. If the latter, it is recommended to stop + /// producing any further values. + bool Push(Result result) { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Closed early + return false; + } + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(std::move(result)); + } else { + state->result_q.push_back(std::move(result)); + } + return true; + } + + /// \brief Tell the consumer we have finished producing + /// + /// It is allowed to call this and later call Push() again ("early close"). + /// In this case, calls to Push() after the queue is closed are silently + /// ignored. This can help implementing non-trivial cancellation cases. + /// + /// True is returned on success, false if the generator is already closed + /// or destroyed. + bool Close() { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Already closed + return false; + } + state->finished = true; + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(IterationTraits::End()); + } + return true; + } + + /// Return whether the generator was closed or destroyed. + bool is_closed() const { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return true; + } + auto lock = state->mutex.Lock(); + return state->finished; + } + + private: + const std::weak_ptr weak_state_; + }; + + PushGenerator() : state_(std::make_shared()) {} + + /// Read an item from the queue + Future operator()() const { + auto lock = state_->mutex.Lock(); + assert(!state_->consumer_fut.has_value()); // Non-reentrant + if (!state_->result_q.empty()) { + auto fut = Future::MakeFinished(std::move(state_->result_q.front())); + state_->result_q.pop_front(); + return fut; + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + auto fut = Future::Make(); + state_->consumer_fut = fut; + return fut; + } + + /// \brief Return producer-side interface + /// + /// The returned object must be used by the producer to push values on the queue. + /// Only a single Producer object should be instantiated. + Producer producer() { return Producer{state_}; } + + private: + const std::shared_ptr state_; +}; + +/// \brief Create a generator that pulls reentrantly from a source +/// This generator will pull reentrantly from a source, ensuring that max_readahead +/// requests are active at any given time. +/// +/// The source generator must be async-reentrant +/// +/// This generator itself is async-reentrant. +/// +/// This generator may queue up to max_readahead instances of T +template +AsyncGenerator MakeReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return ReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Creates a generator that will yield finished futures from a vector +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeVectorGenerator(std::vector vec) { + struct State { + explicit State(std::vector vec_) : vec(std::move(vec_)), vec_idx(0) {} + + std::vector vec; + std::atomic vec_idx; + }; + + auto state = std::make_shared(std::move(vec)); + return [state]() { + auto idx = state->vec_idx.fetch_add(1); + if (idx >= state->vec.size()) { + // Eagerly return memory + state->vec.clear(); + return AsyncGeneratorEnd(); + } + return Future::MakeFinished(state->vec[idx]); + }; +} + +/// \see MakeMergedGenerator +template +class MergedGenerator { + // Note, the implementation of this class is quite complex at the moment (PRs to + // simplify are always welcome) + // + // Terminology is borrowed from rxjs. This is a pull based implementation of the + // mergeAll operator. The "outer subscription" refers to the async + // generator that the caller provided when creating this. The outer subscription + // yields generators. + // + // Each of these generators is then subscribed to (up to max_subscriptions) and these + // are referred to as "inner subscriptions". + // + // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For + // each inner subscription we will cache up to 1 value. This means we may have more + // values than we have been asked for. In our example, if a caller asks for one record + // batch we will start scanning `max_subscriptions` different files. For each file we + // will only queue up to 1 batch (so a separate readahead is needed on the file if batch + // readahead is desired). + // + // If the caller is slow we may accumulate ready-to-deliver items. These are stored + // in `delivered_jobs`. + // + // If the caller is very quick we may accumulate requests. These are stored in + // `waiting_jobs`. + // + // It may be helpful to consider an example, in the scanner the outer subscription + // is some kind of asynchronous directory listing. The inner subscription is + // then a scan on a file yielded by the directory listing. + // + // An "outstanding" request is when we have polled either the inner or outer + // subscription but that future hasn't completed yet. + // + // There are three possible "events" that can happen. + // * A caller could request the next future + // * An outer callback occurs when the next subscription is ready (e.g. the directory + // listing has produced a new file) + // * An inner callback occurs when one of the inner subscriptions emits a value (e.g. + // a file scan emits a record batch) + // + // Any time an event happens the logic is broken into two phases. First, we grab the + // lock and modify the shared state. While doing this we figure out what callbacks we + // will need to execute. Then, we give up the lock and execute these callbacks. It is + // important to execute these callbacks without the lock to avoid deadlock. + public: + explicit MergedGenerator(AsyncGenerator> source, + int max_subscriptions) + : state_(std::make_shared(std::move(source), max_subscriptions)) {} + + Future operator()() { + // A caller has requested a future + Future waiting_future; + std::shared_ptr delivered_job; + bool mark_generator_complete = false; + { + auto guard = state_->mutex.Lock(); + if (!state_->delivered_jobs.empty()) { + // If we have a job sitting around we can deliver it + delivered_job = std::move(state_->delivered_jobs.front()); + state_->delivered_jobs.pop_front(); + if (state_->IsCompleteUnlocked(guard)) { + // It's possible this waiting job was the only thing left to handle and + // we have now completed the generator. + mark_generator_complete = true; + } else { + // Since we had a job sitting around we also had an inner subscription + // that had paused. We are going to restart this inner subscription and + // so there will be a new outstanding request. + state_->outstanding_requests++; + } + } else if (state_->broken || + (!state_->first && state_->num_running_subscriptions == 0)) { + // If we are broken or exhausted then prepare a terminal item but + // we won't complete it until we've finished. + Result end_res = IterationEnd(); + if (!state_->final_error.ok()) { + end_res = state_->final_error; + state_->final_error = Status::OK(); + } + return state_->all_finished.Then([end_res]() -> Result { return end_res; }); + } else { + // Otherwise we just queue the request and it will be completed when one of the + // ongoing inner subscriptions delivers a result + waiting_future = Future::Make(); + state_->waiting_jobs.push_back(std::make_shared>(waiting_future)); + } + if (state_->first) { + // On the first request we are going to try and immediately fill our queue + // of subscriptions. We assume we are going to be able to start them all. + state_->outstanding_requests += + static_cast(state_->active_subscriptions.size()); + state_->num_running_subscriptions += + static_cast(state_->active_subscriptions.size()); + } + } + // If we grabbed a finished item from the delivered_jobs queue then we may need + // to mark the generator finished or issue a request for a new item to fill in + // the spot we just vacated. Notice that we issue that request to the same + // subscription that delivered it (deliverer). + if (delivered_job) { + if (mark_generator_complete) { + state_->all_finished.MarkFinished(); + } else { + delivered_job->deliverer().AddCallback( + InnerCallback(state_, delivered_job->index)); + } + return std::move(delivered_job->value); + } + // On the first call we try and fill up our subscriptions. It's possible the outer + // generator only has a few items and we can't fill up to what we were hoping. In + // that case we have to bail early. + if (state_->first) { + state_->first = false; + mark_generator_complete = false; + for (int i = 0; i < static_cast(state_->active_subscriptions.size()); i++) { + state_->PullSource().AddCallback( + OuterCallback{state_, static_cast(i)}); + // If we have to bail early then we need to update the shared state again so + // we need to reacquire the lock. + auto guard = state_->mutex.Lock(); + if (state_->source_exhausted) { + int excess_requests = + static_cast(state_->active_subscriptions.size()) - i - 1; + state_->outstanding_requests -= excess_requests; + state_->num_running_subscriptions -= excess_requests; + if (excess_requests > 0) { + // It's possible that we are completing the generator by reducing the number + // of outstanding requests (e.g. this happens when the outer subscription and + // all inner subscriptions are synchronous) + mark_generator_complete = state_->IsCompleteUnlocked(guard); + } + break; + } + } + if (mark_generator_complete) { + state_->MarkFinishedAndPurge(); + } + } + return waiting_future; + } + + private: + struct DeliveredJob { + explicit DeliveredJob(AsyncGenerator deliverer_, Result value_, + std::size_t index_) + : deliverer(deliverer_), value(std::move(value_)), index(index_) {} + + // The generator that delivered this result, we will request another item + // from this generator once the result is delivered + AsyncGenerator deliverer; + // The result we received from the generator + Result value; + // The index of the generator (in active_subscriptions) that delivered this + // result. This is used if we need to replace a finished generator. + std::size_t index; + }; + + struct State { + State(AsyncGenerator> source, int max_subscriptions) + : source(std::move(source)), + active_subscriptions(max_subscriptions), + delivered_jobs(), + waiting_jobs(), + mutex(), + first(true), + broken(false), + source_exhausted(false), + outstanding_requests(0), + num_running_subscriptions(0), + final_error(Status::OK()) {} + + Future> PullSource() { + // Need to guard access to source() so we don't pull sync-reentrantly which + // is never valid. + auto lock = mutex.Lock(); + return source(); + } + + void SignalErrorUnlocked(const util::Mutex::Guard& guard) { + broken = true; + // Empty any results that have arrived but not asked for. + while (!delivered_jobs.empty()) { + delivered_jobs.pop_front(); + } + } + + // This function is called outside the mutex but it will only ever be + // called once + void MarkFinishedAndPurge() { + all_finished.MarkFinished(); + while (!waiting_jobs.empty()) { + waiting_jobs.front()->MarkFinished(IterationEnd()); + waiting_jobs.pop_front(); + } + } + + // This is called outside the mutex but it is only ever called + // once and Future<>::AddCallback is thread-safe + void MarkFinalError(const Status& err, Future maybe_sink) { + if (maybe_sink.is_valid()) { + // Someone is waiting for this error so lets mark it complete when + // all the work is done + all_finished.AddCallback([maybe_sink, err](const Status& status) mutable { + maybe_sink.MarkFinished(err); + }); + } else { + // No one is waiting for this error right now so it will be delivered + // next. + final_error = err; + } + } + + bool IsCompleteUnlocked(const util::Mutex::Guard& guard) { + return outstanding_requests == 0 && + (broken || (source_exhausted && num_running_subscriptions == 0 && + delivered_jobs.empty())); + } + + bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) { + --outstanding_requests; + return IsCompleteUnlocked(guard); + } + + // The outer generator. Each item we pull from this will be its own generator + // and become an inner subscription + AsyncGenerator> source; + // active_subscriptions and delivered_jobs will be bounded by max_subscriptions + std::vector> active_subscriptions; + // Results delivered by the inner subscriptions that weren't yet asked for by the + // caller + std::deque> delivered_jobs; + // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the + // backpressure + std::deque>> waiting_jobs; + // A future that will be marked complete when the terminal item has arrived and all + // outstanding futures have completed. It is used to hold off emission of an error + // until all outstanding work is done. + Future<> all_finished = Future<>::Make(); + util::Mutex mutex; + // A flag cleared when the caller firsts asks for a future. Used to start polling. + bool first; + // A flag set when an error arrives, prevents us from issuing new requests. + bool broken; + // A flag set when the outer subscription has been exhausted. Prevents us from + // pulling it further (even though it would be generally harmless) and lets us know we + // are finishing up. + bool source_exhausted; + // The number of futures that we have requested from either the outer or inner + // subscriptions that have not yet completed. We cannot mark all_finished until this + // reaches 0. This will never be greater than max_subscriptions + int outstanding_requests; + // The number of running subscriptions. We ramp this up to `max_subscriptions` as + // soon as the first item is requested and then it stays at that level (each exhausted + // inner subscription is replaced by a new inner subscription) until the outer + // subscription is exhausted at which point this descends to 0 (and source_exhausted) + // is then set to true. + int num_running_subscriptions; + // If an error arrives, and the caller hasn't asked for that item, we store the error + // here. It is analagous to delivered_jobs but for errors instead of finished + // results. + Status final_error; + }; + + struct InnerCallback { + InnerCallback(std::shared_ptr state, std::size_t index, bool recursive = false) + : state(std::move(state)), index(index), recursive(recursive) {} + + void operator()(const Result& maybe_next_ref) { + // An item has been delivered by one of the inner subscriptions + Future next_fut; + const Result* maybe_next = &maybe_next_ref; + + // When an item is delivered (and the caller has asked for it) we grab the + // next item from the inner subscription. To avoid this behavior leading to an + // infinite loop (this can happen if the caller's callback asks for the next item) + // we use a while loop. + while (true) { + Future sink; + bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next); + bool pull_next_sub = false; + bool was_broken = false; + bool should_mark_gen_complete = false; + bool should_mark_final_error = false; + { + auto guard = state->mutex.Lock(); + if (state->broken) { + // We've errored out previously so ignore the result. If anyone was waiting + // for this they will get IterationEnd when we purge + was_broken = true; + } else { + if (!sub_finished) { + // There is a result to deliver. Either we can deliver it now or we will + // queue it up + if (state->waiting_jobs.empty()) { + state->delivered_jobs.push_back(std::make_shared( + state->active_subscriptions[index], *maybe_next, index)); + } else { + sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + + // If this is the first error then we transition the state to a broken state + if (!maybe_next->ok()) { + should_mark_final_error = true; + state->SignalErrorUnlocked(guard); + } + } + + // If we finished this inner subscription then we need to grab a new inner + // subscription to take its spot. If we can't (because we're broken or + // exhausted) then we aren't going to be starting any new futures and so + // the number of running subscriptions drops. + pull_next_sub = sub_finished && !state->source_exhausted && !was_broken; + if (sub_finished && !pull_next_sub) { + state->num_running_subscriptions--; + } + // There are three situations we won't pull again. If an error occurred or we + // are already finished or if no one was waiting for our result and so we queued + // it up. We will decrement outstanding_requests and possibly mark the + // generator completed. + if (state->broken || (!sink.is_valid() && !sub_finished) || + (sub_finished && state->source_exhausted)) { + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } + } + + // Now we have given up the lock and we can take all the actions we decided we + // need to take. + if (should_mark_final_error) { + state->MarkFinalError(maybe_next->status(), std::move(sink)); + } + + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + + // An error occurred elsewhere so there is no need to mark any future + // finished (will happen during the purge) or pull from anything + if (was_broken) { + return; + } + + if (pull_next_sub) { + if (recursive) { + was_empty = true; + return; + } + // We pulled an end token so we need to start a new subscription + // in our spot + state->PullSource().AddCallback(OuterCallback{state, index}); + } else if (sink.is_valid()) { + // We pulled a valid result and there was someone waiting for it + // so lets fetch the next result from our subscription + sink.MarkFinished(*maybe_next); + next_fut = state->active_subscriptions[index](); + if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) { + return; + } + // Already completed. Avoid very deep recursion by looping + // here instead of relying on the callback. + maybe_next = &next_fut.result(); + continue; + } + // else: We pulled a valid result but no one was waiting for it so + // we can just stop. + return; + } + } + std::shared_ptr state; + std::size_t index; + bool recursive; + bool was_empty = false; + }; + + struct OuterCallback { + void operator()(const Result>& initial_maybe_next) { + Result> maybe_next = initial_maybe_next; + while (true) { + // We have been given a new inner subscription + bool should_continue = false; + bool should_mark_gen_complete = false; + bool should_deliver_error = false; + bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next); + Future error_sink; + { + auto guard = state->mutex.Lock(); + if (!maybe_next.ok() || source_exhausted || state->broken) { + // If here then we will not pull any more from the outer source + if (!state->broken && !maybe_next.ok()) { + state->SignalErrorUnlocked(guard); + // If here then we are the first error so we need to deliver it + should_deliver_error = true; + if (!state->waiting_jobs.empty()) { + error_sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + if (source_exhausted) { + state->source_exhausted = true; + state->num_running_subscriptions--; + } + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } else { + state->active_subscriptions[index] = *maybe_next; + should_continue = true; + } + } + if (should_deliver_error) { + state->MarkFinalError(maybe_next.status(), std::move(error_sink)); + } + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + if (should_continue) { + // There is a possibility that a large sequence of immediately available inner + // callbacks could lead to a stack overflow. To avoid this we need to + // synchronously loop through inner/outer callbacks until we either find an + // unfinished future or we find an actual item to deliver. + Future next_item = (*maybe_next)(); + if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) { + // By setting recursive to true we signal to the inner callback that, if it is + // empty, instead of adding a new outer callback, it should just immediately + // return, flagging was_empty so that we know we need to check the next + // subscription. + InnerCallback immediate_inner(state, index, /*recursive=*/true); + immediate_inner(next_item.result()); + if (immediate_inner.was_empty) { + Future> next_source = state->PullSource(); + if (next_source.TryAddCallback([this] { + return OuterCallback{state, index}; + })) { + // We hit an unfinished future so we can stop looping + return; + } + // The current subscription was immediately and synchronously empty + // and we were able to synchronously pull the next subscription so we + // can keep looping. + maybe_next = next_source.result(); + continue; + } + } + } + return; + } + } + std::shared_ptr state; + std::size_t index; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that takes in a stream of generators and pulls from up to +/// max_subscriptions at a time +/// +/// Note: This may deliver items out of sequence. For example, items from the third +/// AsyncGenerator generated by the source may be emitted before some items from the first +/// AsyncGenerator generated by the source. +/// +/// This generator will pull from source async-reentrantly unless max_subscriptions is 1 +/// This generator will not pull from the individual subscriptions reentrantly. Add +/// readahead to the individual subscriptions if that is desired. +/// This generator is async-reentrant +/// +/// This generator may queue up to max_subscriptions instances of T +template +AsyncGenerator MakeMergedGenerator(AsyncGenerator> source, + int max_subscriptions) { + return MergedGenerator(std::move(source), max_subscriptions); +} + +template +Result> MakeSequencedMergedGenerator( + AsyncGenerator> source, int max_subscriptions) { + if (max_subscriptions < 0) { + return Status::Invalid("max_subscriptions must be a positive integer"); + } + if (max_subscriptions == 1) { + return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1"); + } + AsyncGenerator> autostarting_source = MakeMappedGenerator( + std::move(source), + [](const AsyncGenerator& sub) { return MakeAutoStartingGenerator(sub); }); + AsyncGenerator> sub_readahead = + MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1); + return MakeConcatenatedGenerator(std::move(sub_readahead)); +} + +/// \brief Create a generator that takes in a stream of generators and pulls from each +/// one in sequence. +/// +/// This generator is async-reentrant but will never pull from source reentrantly and +/// will never pull from any subscription reentrantly. +/// +/// This generator may queue 1 instance of T +/// +/// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that +/// forwards async-reentrant requests instead of buffering them (which is what +/// MergedGenerator does) +template +AsyncGenerator MakeConcatenatedGenerator(AsyncGenerator> source) { + return MergedGenerator(std::move(source), 1); +} + +template +struct Enumerated { + T value; + int index; + bool last; +}; + +template +struct IterationTraits> { + static Enumerated End() { return Enumerated{IterationEnd(), -1, false}; } + static bool IsEnd(const Enumerated& val) { return val.index < 0; } +}; + +/// \see MakeEnumeratedGenerator +template +class EnumeratingGenerator { + public: + EnumeratingGenerator(AsyncGenerator source, T initial_value) + : state_(std::make_shared(std::move(source), std::move(initial_value))) {} + + Future> operator()() { + if (state_->finished) { + return AsyncGeneratorEnd>(); + } else { + auto state = state_; + return state->source().Then([state](const T& next) { + auto finished = IsIterationEnd(next); + auto prev = Enumerated{state->prev_value, state->prev_index, finished}; + state->prev_value = next; + state->prev_index++; + state->finished = finished; + return prev; + }); + } + } + + private: + struct State { + State(AsyncGenerator source, T initial_value) + : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) { + finished = IsIterationEnd(prev_value); + } + + AsyncGenerator source; + T prev_value; + int prev_index; + bool finished; + }; + + std::shared_ptr state_; +}; + +/// Wrap items from a source generator with positional information +/// +/// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be +/// processed in a "first-available" fashion and later resequenced which can reduce the +/// impact of sources with erratic performance (e.g. a filesystem where some items may +/// take longer to read than others). +/// +/// TODO(ARROW-12371) Would require this generator be async-reentrant +/// +/// \see MakeSequencingGenerator for an example of putting items back in order +/// +/// This generator is not async-reentrant +/// +/// This generator buffers one item (so it knows which item is the last item) +template +AsyncGenerator> MakeEnumeratedGenerator(AsyncGenerator source) { + return FutureFirstGenerator>( + source().Then([source](const T& initial_value) -> AsyncGenerator> { + return EnumeratingGenerator(std::move(source), initial_value); + })); +} + +/// \see MakeTransferredGenerator +template +class TransferringGenerator { + public: + explicit TransferringGenerator(AsyncGenerator source, internal::Executor* executor) + : source_(std::move(source)), executor_(executor) {} + + Future operator()() { return executor_->Transfer(source_()); } + + private: + AsyncGenerator source_; + internal::Executor* executor_; +}; + +/// \brief Transfer a future to an underlying executor. +/// +/// Continuations run on the returned future will be run on the given executor +/// if they cannot be run synchronously. +/// +/// This is often needed to move computation off I/O threads or other external +/// completion sources and back on to the CPU executor so the I/O thread can +/// stay busy and focused on I/O +/// +/// Keep in mind that continuations called on an already completed future will +/// always be run synchronously and so no transfer will happen in that case. +/// +/// This generator is async reentrant if the source is +/// +/// This generator will not queue +template +AsyncGenerator MakeTransferredGenerator(AsyncGenerator source, + internal::Executor* executor) { + return TransferringGenerator(std::move(source), executor); +} + +/// \see MakeBackgroundGenerator +template +class BackgroundGenerator { + public: + explicit BackgroundGenerator(Iterator it, internal::Executor* io_executor, int max_q, + int q_restart) + : state_(std::make_shared(io_executor, std::move(it), max_q, q_restart)), + cleanup_(std::make_shared(state_.get())) {} + + Future operator()() { + auto guard = state_->mutex.Lock(); + Future waiting_future; + if (state_->queue.empty()) { + if (state_->finished) { + return AsyncGeneratorEnd(); + } else { + waiting_future = Future::Make(); + state_->waiting_future = waiting_future; + } + } else { + auto next = Future::MakeFinished(std::move(state_->queue.front())); + state_->queue.pop(); + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(next)); + } + return next; + } + // This should only trigger the very first time this method is called + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(waiting_future)); + } + return waiting_future; + } + + protected: + static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits::max()}; + + struct State { + State(internal::Executor* io_executor, Iterator it, int max_q, int q_restart) + : io_executor(io_executor), + max_q(max_q), + q_restart(q_restart), + it(std::move(it)), + reading(false), + finished(false), + should_shutdown(false) {} + + void ClearQueue() { + while (!queue.empty()) { + queue.pop(); + } + } + + bool TaskIsRunning() const { return task_finished.is_valid(); } + + bool NeedsRestart() const { + return !finished && !reading && static_cast(queue.size()) <= q_restart; + } + + void DoRestartTask(std::shared_ptr state, util::Mutex::Guard guard) { + // If we get here we are actually going to start a new task so let's create a + // task_finished future for it + state->task_finished = Future<>::Make(); + state->reading = true; + auto spawn_status = io_executor->Spawn( + [state]() { BackgroundGenerator::WorkerTask(std::move(state)); }); + if (!spawn_status.ok()) { + // If we can't spawn a new task then send an error to the consumer (either via a + // waiting future or the queue) and mark ourselves finished + state->finished = true; + state->task_finished = Future<>(); + if (waiting_future.has_value()) { + auto to_deliver = std::move(waiting_future.value()); + waiting_future.reset(); + guard.Unlock(); + to_deliver.MarkFinished(spawn_status); + } else { + ClearQueue(); + queue.push(spawn_status); + } + } + } + + Future RestartTask(std::shared_ptr state, util::Mutex::Guard guard, + Future next) { + if (TaskIsRunning()) { + // If the task is still cleaning up we need to wait for it to finish before + // restarting. We also want to block the consumer until we've restarted the + // reader to avoid multiple restarts + return task_finished.Then([state, next]() { + // This may appear dangerous (recursive mutex) but we should be guaranteed the + // outer guard has been released by this point. We know... + // * task_finished is not already finished (it would be invalid in that case) + // * task_finished will not be marked complete until we've given up the mutex + auto guard_ = state->mutex.Lock(); + state->DoRestartTask(state, std::move(guard_)); + return next; + }); + } + // Otherwise we can restart immediately + DoRestartTask(std::move(state), std::move(guard)); + return next; + } + + internal::Executor* io_executor; + const int max_q; + const int q_restart; + Iterator it; + std::atomic worker_thread_id{kUnlikelyThreadId}; + + // If true, the task is actively pumping items from the queue and does not need a + // restart + bool reading; + // Set to true when a terminal item arrives + bool finished; + // Signal to the background task to end early because consumers have given up on it + bool should_shutdown; + // If the queue is empty, the consumer will create a waiting future and wait for it + std::queue> queue; + std::optional> waiting_future; + // Every background task is given a future to complete when it is entirely finished + // processing and ready for the next task to start or for State to be destroyed + Future<> task_finished; + util::Mutex mutex; + }; + + // Cleanup task that will be run when all consumer references to the generator are lost + struct Cleanup { + explicit Cleanup(State* state) : state(state) {} + ~Cleanup() { + /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and + /// there is no need to perform this check. + /// + /// It's a deadlock if we enter cleanup from + /// the worker thread but it can happen if the consumer doesn't transfer away + assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId()); + Future<> finish_fut; + { + auto lock = state->mutex.Lock(); + if (!state->TaskIsRunning()) { + return; + } + // Signal the current task to stop and wait for it to finish + state->should_shutdown = true; + finish_fut = state->task_finished; + } + // Using future as a condition variable here + Status st = finish_fut.status(); + ARROW_UNUSED(st); + } + State* state; + }; + + static void WorkerTask(std::shared_ptr state) { + state->worker_thread_id.store(::arrow::internal::GetThreadId()); + // We need to capture the state to read while outside the mutex + bool reading = true; + while (reading) { + auto next = state->it.Next(); + // Need to capture state->waiting_future inside the mutex to mark finished outside + Future waiting_future; + { + auto guard = state->mutex.Lock(); + + if (state->should_shutdown) { + state->finished = true; + break; + } + + if (!next.ok() || IsIterationEnd(*next)) { + // Terminal item. Mark finished to true, send this last item, and quit + state->finished = true; + if (!next.ok()) { + state->ClearQueue(); + } + } + // At this point we are going to send an item. Either we will add it to the + // queue or deliver it to a waiting future. + if (state->waiting_future.has_value()) { + waiting_future = std::move(state->waiting_future.value()); + state->waiting_future.reset(); + } else { + state->queue.push(std::move(next)); + // We just filled up the queue so it is time to quit. We may need to notify + // a cleanup task so we transition to Quitting + if (static_cast(state->queue.size()) >= state->max_q) { + state->reading = false; + } + } + reading = state->reading && !state->finished; + } + // This should happen outside the mutex. Presumably there is a + // transferring generator on the other end that will quickly transfer any + // callbacks off of this thread so we can continue looping. Still, best not to + // rely on that + if (waiting_future.is_valid()) { + waiting_future.MarkFinished(next); + } + } + // Once we've sent our last item we can notify any waiters that we are done and so + // either state can be cleaned up or a new background task can be started + Future<> task_finished; + { + auto guard = state->mutex.Lock(); + // After we give up the mutex state can be safely deleted. We will no longer + // reference it. We can safely transition to idle now. + task_finished = state->task_finished; + state->task_finished = Future<>(); + state->worker_thread_id.store(kUnlikelyThreadId); + } + task_finished.MarkFinished(); + } + + std::shared_ptr state_; + // state_ is held by both the generator and the background thread so it won't be cleaned + // up when all consumer references are relinquished. cleanup_ is only held by the + // generator so it will be destructed when the last consumer reference is gone. We use + // this to cleanup / stop the background generator in case the consuming end stops + // listening (e.g. due to a downstream error) + std::shared_ptr cleanup_; +}; + +constexpr int kDefaultBackgroundMaxQ = 32; +constexpr int kDefaultBackgroundQRestart = 16; + +/// \brief Create an AsyncGenerator by iterating over an Iterator on a background +/// thread +/// +/// The parameter max_q and q_restart control queue size and background thread task +/// management. If the background task is fast you typically don't want it creating a +/// thread task for every item. Instead the background thread will run until it fills +/// up a readahead queue. +/// +/// Once the queue has filled up the background thread task will terminate (allowing other +/// I/O tasks to use the thread). Once the queue has been drained enough (specified by +/// q_restart) then the background thread task will be restarted. If q_restart is too low +/// then you may exhaust the queue waiting for the background thread task to start running +/// again. If it is too high then it will be constantly stopping and restarting the +/// background queue task +/// +/// The "background thread" is a logical thread and will run as tasks on the io_executor. +/// This thread may stop and start when the queue fills up but there will only be one +/// active background thread task at any given time. You MUST transfer away from this +/// background generator. Otherwise there could be a race condition if a callback on the +/// background thread deletes the last consumer reference to the background generator. You +/// can transfer onto the same executor as the background thread, it is only necessary to +/// create a new thread task, not to switch executors. +/// +/// This generator is not async-reentrant +/// +/// This generator will queue up to max_q blocks +template +static Result> MakeBackgroundGenerator( + Iterator iterator, internal::Executor* io_executor, + int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) { + if (max_q < q_restart) { + return Status::Invalid("max_q must be >= q_restart"); + } + return BackgroundGenerator(std::move(iterator), io_executor, max_q, q_restart); +} + +/// \brief Create an AsyncGenerator by iterating over an Iterator synchronously +/// +/// This should only be used if you know the source iterator does not involve any +/// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending +/// on the complexity of the iterator, it may lead to deadlock. +/// +/// If you are not certain if there will be I/O then it is better to use +/// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator +/// equivalent of Future::MakeFinished +/// +/// It is impossible to call this in an async-reentrant manner since the returned +/// future will be completed by the time it is polled. +/// +/// This generator does not queue +template +static Result> MakeBlockingGenerator( + std::shared_ptr> iterator) { + return [it = std::move(iterator)]() mutable -> Future { + return Future::MakeFinished(it->Next()); + }; +} + +template +static Result> MakeBlockingGenerator(Iterator iterator) { + return MakeBlockingGenerator(std::make_shared>(std::move(iterator))); +} + +/// \see MakeGeneratorIterator +template +class GeneratorIterator { + public: + explicit GeneratorIterator(AsyncGenerator source) : source_(std::move(source)) {} + + Result Next() { return source_().result(); } + + private: + AsyncGenerator source_; +}; + +/// \brief Convert an AsyncGenerator to an Iterator which blocks until each future +/// is finished +template +Iterator MakeGeneratorIterator(AsyncGenerator source) { + return Iterator(GeneratorIterator(std::move(source))); +} + +/// \brief Add readahead to an iterator using a background thread. +/// +/// Under the hood this is converting the iterator to a generator using +/// MakeBackgroundGenerator, adding readahead to the converted generator with +/// MakeReadaheadGenerator, and then converting back to an iterator using +/// MakeGeneratorIterator. +template +Result> MakeReadaheadIterator(Iterator it, int readahead_queue_size) { + ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1)); + auto max_q = readahead_queue_size; + auto q_restart = std::max(1, max_q / 2); + ARROW_ASSIGN_OR_RAISE( + auto background_generator, + MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart)); + // Capture io_executor to keep it alive as long as owned_bg_generator is still + // referenced + AsyncGenerator owned_bg_generator = [io_executor, background_generator]() { + return background_generator(); + }; + return MakeGeneratorIterator(std::move(owned_bg_generator)); +} + +/// \brief Make a generator that returns a single pre-generated future +/// +/// This generator is async-reentrant. +template +std::function()> MakeSingleFutureGenerator(Future future) { + assert(future.is_valid()); + auto state = std::make_shared>(std::move(future)); + return [state]() -> Future { + auto fut = std::move(*state); + if (fut.is_valid()) { + return fut; + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that immediately ends. +/// +/// This generator is async-reentrant. +template +std::function()> MakeEmptyGenerator() { + return []() -> Future { return AsyncGeneratorEnd(); }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeFailingGenerator(Status st) { + assert(!st.ok()); + auto state = std::make_shared(std::move(st)); + return [state]() -> Future { + auto st = std::move(*state); + if (!st.ok()) { + return std::move(st); + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This overload allows inferring the return type from the argument. +template +AsyncGenerator MakeFailingGenerator(const Result& result) { + return MakeFailingGenerator(result.status()); +} + +/// \brief Prepend initial_values onto a generator +/// +/// This generator is async-reentrant but will buffer requests and will not +/// pull from following_values async-reentrantly. +template +AsyncGenerator MakeGeneratorStartsWith(std::vector initial_values, + AsyncGenerator following_values) { + auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values)); + auto gen_gen = MakeVectorGenerator>( + {std::move(initial_values_vec_gen), std::move(following_values)}); + return MakeConcatenatedGenerator(std::move(gen_gen)); +} + +template +struct CancellableGenerator { + Future operator()() { + if (stop_token.IsStopRequested()) { + return stop_token.Poll(); + } + return source(); + } + + AsyncGenerator source; + StopToken stop_token; +}; + +/// \brief Allow an async generator to be cancelled +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeCancellable(AsyncGenerator source, StopToken stop_token) { + return CancellableGenerator{std::move(source), std::move(stop_token)}; +} + +template +class DefaultIfEmptyGenerator { + public: + DefaultIfEmptyGenerator(AsyncGenerator source, T or_value) + : state_(std::make_shared(std::move(source), std::move(or_value))) {} + + Future operator()() { + if (state_->first) { + state_->first = false; + struct { + T or_value; + + Result operator()(const T& value) { + if (IterationTraits::IsEnd(value)) { + return std::move(or_value); + } + return value; + } + } Continuation; + Continuation.or_value = std::move(state_->or_value); + return state_->source().Then(std::move(Continuation)); + } + return state_->source(); + } + + private: + struct State { + AsyncGenerator source; + T or_value; + bool first; + State(AsyncGenerator source_, T or_value_) + : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {} + }; + std::shared_ptr state_; +}; + +/// \brief If the generator is empty, return the given value, else +/// forward the values from the generator. +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeDefaultIfEmptyGenerator(AsyncGenerator source, T or_value) { + return DefaultIfEmptyGenerator(std::move(source), std::move(or_value)); +} +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..d8a91ea76b3906ec8d8b55bdb282cdb8da874cfd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h @@ -0,0 +1,492 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_traits.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +enum class DecimalStatus { + kSuccess, + kDivideByZero, + kOverflow, + kRescaleDataLoss, +}; + +template +class ARROW_EXPORT GenericBasicDecimal { + protected: + struct LittleEndianArrayTag {}; + +#if ARROW_LITTLE_ENDIAN + static constexpr int kHighWordIndex = NWORDS - 1; + static constexpr int kLowWordIndex = 0; +#else + static constexpr int kHighWordIndex = 0; + static constexpr int kLowWordIndex = NWORDS - 1; +#endif + + public: + static constexpr int kBitWidth = BIT_WIDTH; + static constexpr int kByteWidth = kBitWidth / 8; + static constexpr int kNumWords = NWORDS; + + // A constructor tag to introduce a little-endian encoded array + static constexpr LittleEndianArrayTag LittleEndianArray{}; + + using WordArray = std::array; + + /// \brief Empty constructor creates a decimal with a value of 0. + constexpr GenericBasicDecimal() noexcept : array_({0}) {} + + /// \brief Create a decimal from the two's complement representation. + /// + /// Input array is assumed to be in native endianness. + explicit constexpr GenericBasicDecimal(const WordArray& array) noexcept + : array_(array) {} + + /// \brief Create a decimal from the two's complement representation. + /// + /// Input array is assumed to be in little endianness, with native endian elements. + GenericBasicDecimal(LittleEndianArrayTag, const WordArray& array) noexcept + : GenericBasicDecimal(bit_util::little_endian::ToNative(array)) {} + + /// \brief Create a decimal from any integer not wider than 64 bits. + template ::value && (sizeof(T) <= sizeof(uint64_t)), T>::type> + constexpr GenericBasicDecimal(T value) noexcept // NOLINT(runtime/explicit) + : array_(WordsFromLowBits(value)) {} + + /// \brief Create a decimal from an array of bytes. + /// + /// Bytes are assumed to be in native-endian byte order. + explicit GenericBasicDecimal(const uint8_t* bytes) { + memcpy(array_.data(), bytes, sizeof(array_)); + } + + /// \brief Get the bits of the two's complement representation of the number. + /// + /// The elements are in native endian order. The bits within each uint64_t element + /// are in native endian order. For example, on a little endian machine, + /// BasicDecimal128(123).native_endian_array() = {123, 0}; + /// but on a big endian machine, + /// BasicDecimal128(123).native_endian_array() = {0, 123}; + constexpr const WordArray& native_endian_array() const { return array_; } + + /// \brief Get the bits of the two's complement representation of the number. + /// + /// The elements are in little endian order. However, the bits within each + /// uint64_t element are in native endian order. + /// For example, BasicDecimal128(123).little_endian_array() = {123, 0}; + WordArray little_endian_array() const { + return bit_util::little_endian::FromNative(array_); + } + + const uint8_t* native_endian_bytes() const { + return reinterpret_cast(array_.data()); + } + + uint8_t* mutable_native_endian_bytes() { + return reinterpret_cast(array_.data()); + } + + /// \brief Return the raw bytes of the value in native-endian byte order. + std::array ToBytes() const { + std::array out{{0}}; + memcpy(out.data(), array_.data(), kByteWidth); + return out; + } + + /// \brief Copy the raw bytes of the value in native-endian byte order. + void ToBytes(uint8_t* out) const { memcpy(out, array_.data(), kByteWidth); } + + /// Return 1 if positive or zero, -1 if strictly negative. + int64_t Sign() const { + return 1 | (static_cast(array_[kHighWordIndex]) >> 63); + } + + bool IsNegative() const { return static_cast(array_[kHighWordIndex]) < 0; } + + explicit operator bool() const { return array_ != WordArray{}; } + + friend bool operator==(const GenericBasicDecimal& left, + const GenericBasicDecimal& right) { + return left.array_ == right.array_; + } + + friend bool operator!=(const GenericBasicDecimal& left, + const GenericBasicDecimal& right) { + return left.array_ != right.array_; + } + + protected: + WordArray array_; + + template + static constexpr uint64_t SignExtend(T low_bits) noexcept { + return low_bits >= T{} ? uint64_t{0} : ~uint64_t{0}; + } + + template + static constexpr WordArray WordsFromLowBits(T low_bits) { + WordArray words{}; + if (low_bits < T{}) { + for (auto& word : words) { + word = ~uint64_t{0}; + } + } + words[kLowWordIndex] = static_cast(low_bits); + return words; + } +}; + +/// Represents a signed 128-bit integer in two's complement. +/// +/// This class is also compiled into LLVM IR - so, it should not have cpp references like +/// streams and boost. +class ARROW_EXPORT BasicDecimal128 : public GenericBasicDecimal { + public: + static constexpr int kMaxPrecision = 38; + static constexpr int kMaxScale = 38; + + using GenericBasicDecimal::GenericBasicDecimal; + + constexpr BasicDecimal128() noexcept : GenericBasicDecimal() {} + + /// \brief Create a BasicDecimal128 from the two's complement representation. +#if ARROW_LITTLE_ENDIAN + constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept + : BasicDecimal128(WordArray{low, static_cast(high)}) {} +#else + constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept + : BasicDecimal128(WordArray{static_cast(high), low}) {} +#endif + + /// \brief Negate the current value (in-place) + BasicDecimal128& Negate(); + + /// \brief Absolute value (in-place) + BasicDecimal128& Abs(); + + /// \brief Absolute value + static BasicDecimal128 Abs(const BasicDecimal128& left); + + /// \brief Add a number to this one. The result is truncated to 128 bits. + BasicDecimal128& operator+=(const BasicDecimal128& right); + + /// \brief Subtract a number from this one. The result is truncated to 128 bits. + BasicDecimal128& operator-=(const BasicDecimal128& right); + + /// \brief Multiply this number by another number. The result is truncated to 128 bits. + BasicDecimal128& operator*=(const BasicDecimal128& right); + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \param[out] result the quotient + /// \param[out] remainder the remainder after the division + DecimalStatus Divide(const BasicDecimal128& divisor, BasicDecimal128* result, + BasicDecimal128* remainder) const; + + /// \brief In-place division. + BasicDecimal128& operator/=(const BasicDecimal128& right); + + /// \brief Bitwise "or" between two BasicDecimal128. + BasicDecimal128& operator|=(const BasicDecimal128& right); + + /// \brief Bitwise "and" between two BasicDecimal128. + BasicDecimal128& operator&=(const BasicDecimal128& right); + + /// \brief Shift left by the given number of bits. + BasicDecimal128& operator<<=(uint32_t bits); + + BasicDecimal128 operator<<(uint32_t bits) const { + auto res = *this; + res <<= bits; + return res; + } + + /// \brief Shift right by the given number of bits. + /// + /// Negative values will sign-extend. + BasicDecimal128& operator>>=(uint32_t bits); + + BasicDecimal128 operator>>(uint32_t bits) const { + auto res = *this; + res >>= bits; + return res; + } + + /// \brief Get the high bits of the two's complement representation of the number. + constexpr int64_t high_bits() const { +#if ARROW_LITTLE_ENDIAN + return static_cast(array_[1]); +#else + return static_cast(array_[0]); +#endif + } + + /// \brief Get the low bits of the two's complement representation of the number. + constexpr uint64_t low_bits() const { +#if ARROW_LITTLE_ENDIAN + return array_[0]; +#else + return array_[1]; +#endif + } + + /// \brief separate the integer and fractional parts for the given scale. + void GetWholeAndFraction(int32_t scale, BasicDecimal128* whole, + BasicDecimal128* fraction) const; + + /// \brief Scale multiplier for given scale value. + static const BasicDecimal128& GetScaleMultiplier(int32_t scale); + /// \brief Half-scale multiplier for given scale value. + static const BasicDecimal128& GetHalfScaleMultiplier(int32_t scale); + + /// \brief Convert BasicDecimal128 from one scale to another + DecimalStatus Rescale(int32_t original_scale, int32_t new_scale, + BasicDecimal128* out) const; + + /// \brief Scale up. + BasicDecimal128 IncreaseScaleBy(int32_t increase_by) const; + + /// \brief Scale down. + /// - If 'round' is true, the right-most digits are dropped and the result value is + /// rounded up (+1 for +ve, -1 for -ve) based on the value of the dropped digits + /// (>= 10^reduce_by / 2). + /// - If 'round' is false, the right-most digits are simply dropped. + BasicDecimal128 ReduceScaleBy(int32_t reduce_by, bool round = true) const; + + /// \brief Whether this number fits in the given precision + /// + /// Return true if the number of significant digits is less or equal to `precision`. + bool FitsInPrecision(int32_t precision) const; + + /// \brief count the number of leading binary zeroes. + int32_t CountLeadingBinaryZeros() const; + + /// \brief Get the maximum valid unscaled decimal value. + static const BasicDecimal128& GetMaxValue(); + + /// \brief Get the maximum valid unscaled decimal value for the given precision. + static BasicDecimal128 GetMaxValue(int32_t precision); + + /// \brief Get the maximum decimal value (is not a valid value). + static constexpr BasicDecimal128 GetMaxSentinel() { + return BasicDecimal128(/*high=*/std::numeric_limits::max(), + /*low=*/std::numeric_limits::max()); + } + /// \brief Get the minimum decimal value (is not a valid value). + static constexpr BasicDecimal128 GetMinSentinel() { + return BasicDecimal128(/*high=*/std::numeric_limits::min(), + /*low=*/std::numeric_limits::min()); + } +}; + +ARROW_EXPORT bool operator<(const BasicDecimal128& left, const BasicDecimal128& right); +ARROW_EXPORT bool operator<=(const BasicDecimal128& left, const BasicDecimal128& right); +ARROW_EXPORT bool operator>(const BasicDecimal128& left, const BasicDecimal128& right); +ARROW_EXPORT bool operator>=(const BasicDecimal128& left, const BasicDecimal128& right); + +ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& operand); +ARROW_EXPORT BasicDecimal128 operator~(const BasicDecimal128& operand); +ARROW_EXPORT BasicDecimal128 operator+(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator*(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator/(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator%(const BasicDecimal128& left, + const BasicDecimal128& right); + +class ARROW_EXPORT BasicDecimal256 : public GenericBasicDecimal { + public: + using GenericBasicDecimal::GenericBasicDecimal; + + static constexpr int kMaxPrecision = 76; + static constexpr int kMaxScale = 76; + + constexpr BasicDecimal256() noexcept : GenericBasicDecimal() {} + + explicit BasicDecimal256(const BasicDecimal128& value) noexcept + : BasicDecimal256(bit_util::little_endian::ToNative( + {value.low_bits(), static_cast(value.high_bits()), + SignExtend(value.high_bits()), SignExtend(value.high_bits())})) {} + + /// \brief Negate the current value (in-place) + BasicDecimal256& Negate(); + + /// \brief Absolute value (in-place) + BasicDecimal256& Abs(); + + /// \brief Absolute value + static BasicDecimal256 Abs(const BasicDecimal256& left); + + /// \brief Add a number to this one. The result is truncated to 256 bits. + BasicDecimal256& operator+=(const BasicDecimal256& right); + + /// \brief Subtract a number from this one. The result is truncated to 256 bits. + BasicDecimal256& operator-=(const BasicDecimal256& right); + + /// \brief Get the lowest bits of the two's complement representation of the number. + uint64_t low_bits() const { return bit_util::little_endian::Make(array_)[0]; } + + /// \brief separate the integer and fractional parts for the given scale. + void GetWholeAndFraction(int32_t scale, BasicDecimal256* whole, + BasicDecimal256* fraction) const; + + /// \brief Scale multiplier for given scale value. + static const BasicDecimal256& GetScaleMultiplier(int32_t scale); + /// \brief Half-scale multiplier for given scale value. + static const BasicDecimal256& GetHalfScaleMultiplier(int32_t scale); + + /// \brief Convert BasicDecimal256 from one scale to another + DecimalStatus Rescale(int32_t original_scale, int32_t new_scale, + BasicDecimal256* out) const; + + /// \brief Scale up. + BasicDecimal256 IncreaseScaleBy(int32_t increase_by) const; + + /// \brief Scale down. + /// - If 'round' is true, the right-most digits are dropped and the result value is + /// rounded up (+1 for positive, -1 for negative) based on the value of the + /// dropped digits (>= 10^reduce_by / 2). + /// - If 'round' is false, the right-most digits are simply dropped. + BasicDecimal256 ReduceScaleBy(int32_t reduce_by, bool round = true) const; + + /// \brief Whether this number fits in the given precision + /// + /// Return true if the number of significant digits is less or equal to `precision`. + bool FitsInPrecision(int32_t precision) const; + + /// \brief Multiply this number by another number. The result is truncated to 256 bits. + BasicDecimal256& operator*=(const BasicDecimal256& right); + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \param[out] result the quotient + /// \param[out] remainder the remainder after the division + DecimalStatus Divide(const BasicDecimal256& divisor, BasicDecimal256* result, + BasicDecimal256* remainder) const; + + /// \brief Shift left by the given number of bits. + BasicDecimal256& operator<<=(uint32_t bits); + + BasicDecimal256 operator<<(uint32_t bits) const { + auto res = *this; + res <<= bits; + return res; + } + + /// \brief Shift right by the given number of bits. + /// + /// Negative values will sign-extend. + BasicDecimal256& operator>>=(uint32_t bits); + + BasicDecimal256 operator>>(uint32_t bits) const { + auto res = *this; + res >>= bits; + return res; + } + + /// \brief In-place division. + BasicDecimal256& operator/=(const BasicDecimal256& right); + + /// \brief Get the maximum valid unscaled decimal value for the given precision. + static BasicDecimal256 GetMaxValue(int32_t precision); + + /// \brief Get the maximum decimal value (is not a valid value). + static constexpr BasicDecimal256 GetMaxSentinel() { +#if ARROW_LITTLE_ENDIAN + return BasicDecimal256({std::numeric_limits::max(), + std::numeric_limits::max(), + std::numeric_limits::max(), + static_cast(std::numeric_limits::max())}); +#else + return BasicDecimal256({static_cast(std::numeric_limits::max()), + std::numeric_limits::max(), + std::numeric_limits::max(), + std::numeric_limits::max()}); +#endif + } + /// \brief Get the minimum decimal value (is not a valid value). + static constexpr BasicDecimal256 GetMinSentinel() { +#if ARROW_LITTLE_ENDIAN + return BasicDecimal256( + {0, 0, 0, static_cast(std::numeric_limits::min())}); +#else + return BasicDecimal256( + {static_cast(std::numeric_limits::min()), 0, 0, 0}); +#endif + } +}; + +ARROW_EXPORT bool operator<(const BasicDecimal256& left, const BasicDecimal256& right); + +ARROW_EXPORT inline bool operator<=(const BasicDecimal256& left, + const BasicDecimal256& right) { + return !operator<(right, left); +} + +ARROW_EXPORT inline bool operator>(const BasicDecimal256& left, + const BasicDecimal256& right) { + return operator<(right, left); +} + +ARROW_EXPORT inline bool operator>=(const BasicDecimal256& left, + const BasicDecimal256& right) { + return !operator<(left, right); +} + +ARROW_EXPORT BasicDecimal256 operator-(const BasicDecimal256& operand); +ARROW_EXPORT BasicDecimal256 operator~(const BasicDecimal256& operand); +ARROW_EXPORT BasicDecimal256 operator+(const BasicDecimal256& left, + const BasicDecimal256& right); +ARROW_EXPORT BasicDecimal256 operator*(const BasicDecimal256& left, + const BasicDecimal256& right); +ARROW_EXPORT BasicDecimal256 operator/(const BasicDecimal256& left, + const BasicDecimal256& right); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h new file mode 100644 index 0000000000000000000000000000000000000000..75639ac11ae41acb5e23e3eaa91901f41472fdc6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h @@ -0,0 +1,211 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include + +#include "benchmark/benchmark.h" + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/logging.h" // IWYU pragma: keep + +namespace arrow { + +// Benchmark changed its parameter type between releases from +// int to int64_t. As it doesn't have version macros, we need +// to apply C++ template magic. + +template +struct BenchmarkArgsType; + +// Pattern matching that extracts the vector element type of Benchmark::Args() +template +struct BenchmarkArgsType&)> { + using type = Values; +}; + +using ArgsType = + typename BenchmarkArgsType::type; + +using internal::CpuInfo; + +static const CpuInfo* cpu_info = CpuInfo::GetInstance(); + +static const int64_t kL1Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L1); +static const int64_t kL2Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L2); +static const int64_t kL3Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L3); +static const int64_t kCantFitInL3Size = kL3Size * 4; +static const std::vector kMemorySizes = {kL1Size, kL2Size, kL3Size, + kCantFitInL3Size}; +// 0 is treated as "no nulls" +static const std::vector kInverseNullProportions = {10000, 100, 10, 2, 1, 0}; + +struct GenericItemsArgs { + // number of items processed per iteration + const int64_t size; + + // proportion of nulls in generated arrays + double null_proportion; + + explicit GenericItemsArgs(benchmark::State& state) + : size(state.range(0)), state_(state) { + if (state.range(1) == 0) { + this->null_proportion = 0.0; + } else { + this->null_proportion = std::min(1., 1. / static_cast(state.range(1))); + } + } + + ~GenericItemsArgs() { + state_.counters["size"] = static_cast(size); + state_.counters["null_percent"] = null_proportion * 100; + state_.SetItemsProcessed(state_.iterations() * size); + } + + private: + benchmark::State& state_; +}; + +void BenchmarkSetArgsWithSizes(benchmark::internal::Benchmark* bench, + const std::vector& sizes = kMemorySizes) { + bench->Unit(benchmark::kMicrosecond); + + for (const auto size : sizes) { + for (const auto inverse_null_proportion : kInverseNullProportions) { + bench->Args({static_cast(size), inverse_null_proportion}); + } + } +} + +void BenchmarkSetArgs(benchmark::internal::Benchmark* bench) { + BenchmarkSetArgsWithSizes(bench, kMemorySizes); +} + +void RegressionSetArgs(benchmark::internal::Benchmark* bench) { + // Regression do not need to account for cache hierarchy, thus optimize for + // the best case. + BenchmarkSetArgsWithSizes(bench, {kL1Size}); +} + +// RAII struct to handle some of the boilerplate in regression benchmarks +struct RegressionArgs { + // size of memory tested (per iteration) in bytes + int64_t size; + + // proportion of nulls in generated arrays + double null_proportion; + + // If size_is_bytes is true, then it's a number of bytes, otherwise it's the + // number of items processed (for reporting) + explicit RegressionArgs(benchmark::State& state, bool size_is_bytes = true) + : size(state.range(0)), state_(state), size_is_bytes_(size_is_bytes) { + if (state.range(1) == 0) { + this->null_proportion = 0.0; + } else { + this->null_proportion = std::min(1., 1. / static_cast(state.range(1))); + } + } + + ~RegressionArgs() { + state_.counters["size"] = static_cast(size); + state_.counters["null_percent"] = null_proportion * 100; + if (size_is_bytes_) { + state_.SetBytesProcessed(state_.iterations() * size); + } else { + state_.SetItemsProcessed(state_.iterations() * size); + } + } + + private: + benchmark::State& state_; + bool size_is_bytes_; +}; + +class MemoryPoolMemoryManager : public benchmark::MemoryManager { + void Start() override { + memory_pool = std::make_shared(default_memory_pool()); + + MemoryPool* default_pool = default_memory_pool(); + global_allocations_start = default_pool->num_allocations(); + } + +// BENCHMARK_DONT_OPTIMIZE is used here to detect Google Benchmark +// 1.8.0. We can remove this Stop(Result*) when we require Google +// Benchmark 1.8.0 or later. +#ifndef BENCHMARK_DONT_OPTIMIZE + void Stop(Result* result) override { Stop(*result); } +#endif + + void Stop(benchmark::MemoryManager::Result& result) override { + // If num_allocations is still zero, we assume that the memory pool wasn't passed down + // so we should record them. + MemoryPool* default_pool = default_memory_pool(); + int64_t new_default_allocations = + default_pool->num_allocations() - global_allocations_start; + + // Only record metrics if (1) there were allocations and (2) we + // recorded at least one. + if (new_default_allocations > 0 && memory_pool->num_allocations() > 0) { + if (new_default_allocations > memory_pool->num_allocations()) { + // If we missed some, let's report that. + int64_t missed_allocations = + new_default_allocations - memory_pool->num_allocations(); + ARROW_LOG(WARNING) << "BenchmarkMemoryTracker recorded some allocations " + << "for a benchmark, but missed " << missed_allocations + << " allocations.\n"; + } + + result.max_bytes_used = memory_pool->max_memory(); + result.total_allocated_bytes = memory_pool->total_bytes_allocated(); + result.num_allocs = memory_pool->num_allocations(); + } + } + + public: + std::shared_ptr<::arrow::ProxyMemoryPool> memory_pool; + + protected: + int64_t global_allocations_start; +}; + +/// \brief Track memory pool allocations in benchmarks. +/// +/// Instantiate as a global variable to register the hooks into Google Benchmark +/// to collect memory metrics. Before each benchmark, a new ProxyMemoryPool is +/// created. It can then be accessed with memory_pool(). Once the benchmark is +/// complete, the hook will record the maximum memory used, the total bytes +/// allocated, and the total number of allocations. If no allocations were seen, +/// (for example, if you forgot to pass down the memory pool), then these metrics +/// will not be saved. +/// +/// Since this is used as one global variable, this will not work if multiple +/// benchmarks are run concurrently or for multi-threaded benchmarks (ones +/// that use `->ThreadRange(...)`). +class BenchmarkMemoryTracker { + public: + BenchmarkMemoryTracker() : manager_() { ::benchmark::RegisterMemoryManager(&manager_); } + ::arrow::MemoryPool* memory_pool() const { return manager_.memory_pool.get(); } + + protected: + ::arrow::MemoryPoolMemoryManager manager_; +}; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h new file mode 100644 index 0000000000000000000000000000000000000000..73a1ee8600fb4e0be10f26e921083c3be5740490 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h @@ -0,0 +1,570 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { +namespace detail { + +inline uint64_t LoadWord(const uint8_t* bytes) { + return bit_util::ToLittleEndian(util::SafeLoadAs(bytes)); +} + +inline uint64_t ShiftWord(uint64_t current, uint64_t next, int64_t shift) { + if (shift == 0) { + return current; + } + return (current >> shift) | (next << (64 - shift)); +} + +// These templates are here to help with unit tests + +template +constexpr T BitNot(T x) { + return ~x; +} + +template <> +constexpr bool BitNot(bool x) { + return !x; +} + +struct BitBlockAnd { + template + static constexpr T Call(T left, T right) { + return left & right; + } +}; + +struct BitBlockAndNot { + template + static constexpr T Call(T left, T right) { + return left & BitNot(right); + } +}; + +struct BitBlockOr { + template + static constexpr T Call(T left, T right) { + return left | right; + } +}; + +struct BitBlockOrNot { + template + static constexpr T Call(T left, T right) { + return left | BitNot(right); + } +}; + +} // namespace detail + +/// \brief Return value from bit block counters: the total number of bits and +/// the number of set bits. +struct BitBlockCount { + int16_t length; + int16_t popcount; + + bool NoneSet() const { return this->popcount == 0; } + bool AllSet() const { return this->length == this->popcount; } +}; + +/// \brief A class that scans through a true/false bitmap to compute popcounts +/// 64 or 256 bits at a time. This is used to accelerate processing of +/// mostly-not-null array data. +class ARROW_EXPORT BitBlockCounter { + public: + BitBlockCounter(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8), + bits_remaining_(length), + offset_(start_offset % 8) {} + + /// \brief The bit size of each word run + static constexpr int64_t kWordBits = 64; + + /// \brief The bit size of four words run + static constexpr int64_t kFourWordsBits = kWordBits * 4; + + /// \brief Return the next run of available bits, usually 256. The returned + /// pair contains the size of run and the number of true values. The last + /// block will have a length less than 256 if the bitmap length is not a + /// multiple of 256, and will return 0-length blocks in subsequent + /// invocations. + BitBlockCount NextFourWords() { + using detail::LoadWord; + using detail::ShiftWord; + + if (!bits_remaining_) { + return {0, 0}; + } + int64_t total_popcount = 0; + if (offset_ == 0) { + if (bits_remaining_ < kFourWordsBits) { + return GetBlockSlow(kFourWordsBits); + } + total_popcount += bit_util::PopCount(LoadWord(bitmap_)); + total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 8)); + total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 16)); + total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 24)); + } else { + // When the offset is > 0, we need there to be a word beyond the last + // aligned word in the bitmap for the bit shifting logic. + if (bits_remaining_ < 5 * kFourWordsBits - offset_) { + return GetBlockSlow(kFourWordsBits); + } + auto current = LoadWord(bitmap_); + auto next = LoadWord(bitmap_ + 8); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + current = next; + next = LoadWord(bitmap_ + 16); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + current = next; + next = LoadWord(bitmap_ + 24); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + current = next; + next = LoadWord(bitmap_ + 32); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + } + bitmap_ += bit_util::BytesForBits(kFourWordsBits); + bits_remaining_ -= kFourWordsBits; + return {256, static_cast(total_popcount)}; + } + + /// \brief Return the next run of available bits, usually 64. The returned + /// pair contains the size of run and the number of true values. The last + /// block will have a length less than 64 if the bitmap length is not a + /// multiple of 64, and will return 0-length blocks in subsequent + /// invocations. + BitBlockCount NextWord() { + using detail::LoadWord; + using detail::ShiftWord; + + if (!bits_remaining_) { + return {0, 0}; + } + int64_t popcount = 0; + if (offset_ == 0) { + if (bits_remaining_ < kWordBits) { + return GetBlockSlow(kWordBits); + } + popcount = bit_util::PopCount(LoadWord(bitmap_)); + } else { + // When the offset is > 0, we need there to be a word beyond the last + // aligned word in the bitmap for the bit shifting logic. + if (bits_remaining_ < 2 * kWordBits - offset_) { + return GetBlockSlow(kWordBits); + } + popcount = bit_util::PopCount( + ShiftWord(LoadWord(bitmap_), LoadWord(bitmap_ + 8), offset_)); + } + bitmap_ += kWordBits / 8; + bits_remaining_ -= kWordBits; + return {64, static_cast(popcount)}; + } + + private: + /// \brief Return block with the requested size when doing word-wise + /// computation is not possible due to inadequate bits remaining. + BitBlockCount GetBlockSlow(int64_t block_size) noexcept; + + const uint8_t* bitmap_; + int64_t bits_remaining_; + int64_t offset_; +}; + +/// \brief A tool to iterate through a possibly nonexistent validity bitmap, +/// to allow us to write one code path for both the with-nulls and no-nulls +/// cases without giving up a lot of performance. +class ARROW_EXPORT OptionalBitBlockCounter { + public: + // validity_bitmap may be NULLPTR + OptionalBitBlockCounter(const uint8_t* validity_bitmap, int64_t offset, int64_t length); + + // validity_bitmap may be null + OptionalBitBlockCounter(const std::shared_ptr& validity_bitmap, int64_t offset, + int64_t length); + + /// Return block count for next word when the bitmap is available otherwise + /// return a block with length up to INT16_MAX when there is no validity + /// bitmap (so all the referenced values are not null). + BitBlockCount NextBlock() { + static constexpr int64_t kMaxBlockSize = std::numeric_limits::max(); + if (has_bitmap_) { + BitBlockCount block = counter_.NextWord(); + position_ += block.length; + return block; + } else { + int16_t block_size = + static_cast(std::min(kMaxBlockSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + + // Like NextBlock, but returns a word-sized block even when there is no + // validity bitmap + BitBlockCount NextWord() { + static constexpr int64_t kWordSize = 64; + if (has_bitmap_) { + BitBlockCount block = counter_.NextWord(); + position_ += block.length; + return block; + } else { + int16_t block_size = static_cast(std::min(kWordSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + + private: + const bool has_bitmap_; + int64_t position_; + int64_t length_; + BitBlockCounter counter_; +}; + +/// \brief A class that computes popcounts on the result of bitwise operations +/// between two bitmaps, 64 bits at a time. A 64-bit word is loaded from each +/// bitmap, then the popcount is computed on e.g. the bitwise-and of the two +/// words. +class ARROW_EXPORT BinaryBitBlockCounter { + public: + BinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, int64_t length) + : left_bitmap_(util::MakeNonNull(left_bitmap) + left_offset / 8), + left_offset_(left_offset % 8), + right_bitmap_(util::MakeNonNull(right_bitmap) + right_offset / 8), + right_offset_(right_offset % 8), + bits_remaining_(length) {} + + /// \brief Return the popcount of the bitwise-and of the next run of + /// available bits, up to 64. The returned pair contains the size of run and + /// the number of true values. The last block will have a length less than 64 + /// if the bitmap length is not a multiple of 64, and will return 0-length + /// blocks in subsequent invocations. + BitBlockCount NextAndWord() { return NextWord(); } + + /// \brief Computes "x & ~y" block for each available run of bits. + BitBlockCount NextAndNotWord() { return NextWord(); } + + /// \brief Computes "x | y" block for each available run of bits. + BitBlockCount NextOrWord() { return NextWord(); } + + /// \brief Computes "x | ~y" block for each available run of bits. + BitBlockCount NextOrNotWord() { return NextWord(); } + + private: + template + BitBlockCount NextWord() { + using detail::LoadWord; + using detail::ShiftWord; + + if (!bits_remaining_) { + return {0, 0}; + } + // When the offset is > 0, we need there to be a word beyond the last aligned + // word in the bitmap for the bit shifting logic. + constexpr int64_t kWordBits = BitBlockCounter::kWordBits; + const int64_t bits_required_to_use_words = + std::max(left_offset_ == 0 ? 64 : 64 + (64 - left_offset_), + right_offset_ == 0 ? 64 : 64 + (64 - right_offset_)); + if (bits_remaining_ < bits_required_to_use_words) { + const int16_t run_length = + static_cast(std::min(bits_remaining_, kWordBits)); + int16_t popcount = 0; + for (int64_t i = 0; i < run_length; ++i) { + if (Op::Call(bit_util::GetBit(left_bitmap_, left_offset_ + i), + bit_util::GetBit(right_bitmap_, right_offset_ + i))) { + ++popcount; + } + } + // This code path should trigger _at most_ 2 times. In the "two times" + // case, the first time the run length will be a multiple of 8. + left_bitmap_ += run_length / 8; + right_bitmap_ += run_length / 8; + bits_remaining_ -= run_length; + return {run_length, popcount}; + } + + int64_t popcount = 0; + if (left_offset_ == 0 && right_offset_ == 0) { + popcount = + bit_util::PopCount(Op::Call(LoadWord(left_bitmap_), LoadWord(right_bitmap_))); + } else { + auto left_word = + ShiftWord(LoadWord(left_bitmap_), LoadWord(left_bitmap_ + 8), left_offset_); + auto right_word = + ShiftWord(LoadWord(right_bitmap_), LoadWord(right_bitmap_ + 8), right_offset_); + popcount = bit_util::PopCount(Op::Call(left_word, right_word)); + } + left_bitmap_ += kWordBits / 8; + right_bitmap_ += kWordBits / 8; + bits_remaining_ -= kWordBits; + return {64, static_cast(popcount)}; + } + + const uint8_t* left_bitmap_; + int64_t left_offset_; + const uint8_t* right_bitmap_; + int64_t right_offset_; + int64_t bits_remaining_; +}; + +class ARROW_EXPORT OptionalBinaryBitBlockCounter { + public: + // Any bitmap may be NULLPTR + OptionalBinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length); + + // Any bitmap may be null + OptionalBinaryBitBlockCounter(const std::shared_ptr& left_bitmap, + int64_t left_offset, + const std::shared_ptr& right_bitmap, + int64_t right_offset, int64_t length); + + BitBlockCount NextAndBlock() { + static constexpr int64_t kMaxBlockSize = std::numeric_limits::max(); + switch (has_bitmap_) { + case HasBitmap::BOTH: { + BitBlockCount block = binary_counter_.NextAndWord(); + position_ += block.length; + return block; + } + case HasBitmap::ONE: { + BitBlockCount block = unary_counter_.NextWord(); + position_ += block.length; + return block; + } + case HasBitmap::NONE: + default: { + const int16_t block_size = + static_cast(std::min(kMaxBlockSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + } + + BitBlockCount NextOrNotBlock() { + static constexpr int64_t kMaxBlockSize = std::numeric_limits::max(); + switch (has_bitmap_) { + case HasBitmap::BOTH: { + BitBlockCount block = binary_counter_.NextOrNotWord(); + position_ += block.length; + return block; + } + case HasBitmap::ONE: { + BitBlockCount block = unary_counter_.NextWord(); + position_ += block.length; + return block; + } + case HasBitmap::NONE: + default: { + const int16_t block_size = + static_cast(std::min(kMaxBlockSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + } + + private: + enum class HasBitmap : int { BOTH, ONE, NONE }; + + const HasBitmap has_bitmap_; + int64_t position_; + int64_t length_; + BitBlockCounter unary_counter_; + BinaryBitBlockCounter binary_counter_; + + static HasBitmap HasBitmapFromBitmaps(bool has_left, bool has_right) { + switch (static_cast(has_left) + static_cast(has_right)) { + case 0: + return HasBitmap::NONE; + case 1: + return HasBitmap::ONE; + default: // 2 + return HasBitmap::BOTH; + } + } +}; + +// Functional-style bit block visitors. + +template +static Status VisitBitBlocks(const uint8_t* bitmap, int64_t offset, int64_t length, + VisitNotNull&& visit_not_null, VisitNull&& visit_null) { + internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length); + int64_t position = 0; + while (position < length) { + internal::BitBlockCount block = bit_counter.NextBlock(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_null()); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(bitmap, offset + position)) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } else { + ARROW_RETURN_NOT_OK(visit_null()); + } + } + } + } + return Status::OK(); +} + +template +static void VisitBitBlocksVoid(const uint8_t* bitmap, int64_t offset, int64_t length, + VisitNotNull&& visit_not_null, VisitNull&& visit_null) { + internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length); + int64_t position = 0; + while (position < length) { + internal::BitBlockCount block = bit_counter.NextBlock(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_not_null(position); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_null(); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(bitmap, offset + position)) { + visit_not_null(position); + } else { + visit_null(); + } + } + } + } +} + +template +static Status VisitTwoBitBlocks(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length, VisitNotNull&& visit_not_null, + VisitNull&& visit_null) { + if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) { + // At most one bitmap is present + if (left_bitmap == NULLPTR) { + return VisitBitBlocks(right_bitmap, right_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } else { + return VisitBitBlocks(left_bitmap, left_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } + } + BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset, + length); + int64_t position = 0; + while (position < length) { + BitBlockCount block = bit_counter.NextAndWord(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_null()); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(left_bitmap, left_offset + position) && + bit_util::GetBit(right_bitmap, right_offset + position)) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } else { + ARROW_RETURN_NOT_OK(visit_null()); + } + } + } + } + return Status::OK(); +} + +template +static void VisitTwoBitBlocksVoid(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length, VisitNotNull&& visit_not_null, + VisitNull&& visit_null) { + if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) { + // At most one bitmap is present + if (left_bitmap == NULLPTR) { + return VisitBitBlocksVoid(right_bitmap, right_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } else { + return VisitBitBlocksVoid(left_bitmap, left_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } + } + BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset, + length); + int64_t position = 0; + while (position < length) { + BitBlockCount block = bit_counter.NextAndWord(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_not_null(position); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_null(); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(left_bitmap, left_offset + position) && + bit_util::GetBit(right_bitmap, right_offset + position)) { + visit_not_null(position); + } else { + visit_null(); + } + } + } + } +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h new file mode 100644 index 0000000000000000000000000000000000000000..1d3a1dc2459f935e5494743a253a24c5d0b1f197 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h @@ -0,0 +1,370 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_MSC_VER) +#if defined(_M_AMD64) || defined(_M_X64) +#include // IWYU pragma: keep +#include +#endif + +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#define ARROW_POPCOUNT64 __popcnt64 +#define ARROW_POPCOUNT32 __popcnt +#else +#define ARROW_POPCOUNT64 __builtin_popcountll +#define ARROW_POPCOUNT32 __builtin_popcount +#endif + +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace detail { + +template +typename std::make_unsigned::type as_unsigned(Integer x) { + return static_cast::type>(x); +} + +} // namespace detail + +namespace bit_util { + +// The number of set bits in a given unsigned byte value, pre-computed +// +// Generated with the following Python code +// output = 'static constexpr uint8_t kBytePopcount[] = {{{0}}};' +// popcounts = [str(bin(i).count('1')) for i in range(0, 256)] +// print(output.format(', '.join(popcounts))) +static constexpr uint8_t kBytePopcount[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, + 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, + 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, + 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, + 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, + 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, + 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; + +static inline uint64_t PopCount(uint64_t bitmap) { return ARROW_POPCOUNT64(bitmap); } +static inline uint32_t PopCount(uint32_t bitmap) { return ARROW_POPCOUNT32(bitmap); } + +// +// Bit-related computations on integer values +// + +// Returns the ceil of value/divisor +constexpr int64_t CeilDiv(int64_t value, int64_t divisor) { + return (value == 0) ? 0 : 1 + (value - 1) / divisor; +} + +// Return the number of bytes needed to fit the given number of bits +constexpr int64_t BytesForBits(int64_t bits) { + // This formula avoids integer overflow on very large `bits` + return (bits >> 3) + ((bits & 7) != 0); +} + +constexpr bool IsPowerOf2(int64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +constexpr bool IsPowerOf2(uint64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +// Returns the smallest power of two that contains v. If v is already a +// power of two, it is returned as is. +static inline int64_t NextPower2(int64_t n) { + // Taken from + // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + n--; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n |= n >> 32; + n++; + return n; +} + +constexpr bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; } + +constexpr bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; } + +// Returns a mask for the bit_index lower order bits. +// Only valid for bit_index in the range [0, 64). +constexpr uint64_t LeastSignificantBitMask(int64_t bit_index) { + return (static_cast(1) << bit_index) - 1; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' +constexpr int64_t RoundUp(int64_t value, int64_t factor) { + return CeilDiv(value, factor) * factor; +} + +// Returns 'value' rounded down to the nearest multiple of 'factor' +constexpr int64_t RoundDown(int64_t value, int64_t factor) { + return (value / factor) * factor; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' when factor +// is a power of two. +// The result is undefined on overflow, i.e. if `value > 2**64 - factor`, +// since we cannot return the correct result which would be 2**64. +constexpr int64_t RoundUpToPowerOf2(int64_t value, int64_t factor) { + // DCHECK(value >= 0); + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr uint64_t RoundUpToPowerOf2(uint64_t value, uint64_t factor) { + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr int64_t RoundUpToMultipleOf8(int64_t num) { return RoundUpToPowerOf2(num, 8); } + +constexpr int64_t RoundUpToMultipleOf64(int64_t num) { + return RoundUpToPowerOf2(num, 64); +} + +// Returns the number of bytes covering a sliced bitmap. Find the length +// rounded to cover full bytes on both extremities. +// +// The following example represents a slice (offset=10, length=9) +// +// 0 8 16 24 +// |-------|-------|------| +// [ ] (slice) +// [ ] (same slice aligned to bytes bounds, length=16) +// +// The covering bytes is the length (in bytes) of this new aligned slice. +constexpr int64_t CoveringBytes(int64_t offset, int64_t length) { + return (bit_util::RoundUp(length + offset, 8) - bit_util::RoundDown(offset, 8)) / 8; +} + +// Returns the 'num_bits' least-significant bits of 'v'. +static inline uint64_t TrailingBits(uint64_t v, int num_bits) { + if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0; + if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v; + int n = 64 - num_bits; + return (v << n) >> n; +} + +/// \brief Count the number of leading zeros in an unsigned integer. +static inline int CountLeadingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_clz(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse(&index, static_cast(value))) { // NOLINT + return 31 - static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 32 - bitpos; +#endif +} + +static inline int CountLeadingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_clzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse64(&index, value)) { // NOLINT + return 63 - static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 64 - bitpos; +#endif +} + +static inline int CountTrailingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_ctzl(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward(&index, value)) { + return static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 32; + } + return bitpos; +#endif +} + +static inline int CountTrailingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_ctzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward64(&index, value)) { + return static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 64; + } + return bitpos; +#endif +} + +// Returns the minimum number of bits needed to represent an unsigned value +static inline int NumRequiredBits(uint64_t x) { return 64 - CountLeadingZeros(x); } + +// Returns ceil(log2(x)). +static inline int Log2(uint64_t x) { + // DCHECK_GT(x, 0); + return NumRequiredBits(x - 1); +} + +// +// Utilities for reading and writing individual bits by their index +// in a memory area. +// + +// Bitmask selecting the k-th bit in a byte +static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128}; + +// the bitwise complement version of kBitmask +static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127}; + +// Bitmask selecting the (k - 1) preceding bits in a byte +static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127}; +static constexpr uint8_t kPrecedingWrappingBitmask[] = {255, 1, 3, 7, 15, 31, 63, 127}; + +// the bitwise complement version of kPrecedingBitmask +static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128}; + +static constexpr bool GetBit(const uint8_t* bits, uint64_t i) { + return (bits[i >> 3] >> (i & 0x07)) & 1; +} + +// Gets the i-th bit from a byte. Should only be used with i <= 7. +static constexpr bool GetBitFromByte(uint8_t byte, uint8_t i) { + return byte & kBitmask[i]; +} + +static inline void ClearBit(uint8_t* bits, int64_t i) { + bits[i / 8] &= kFlippedBitmask[i % 8]; +} + +static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; } + +static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) { + // https://graphics.stanford.edu/~seander/bithacks.html + // "Conditionally set or clear bits without branching" + // NOTE: this seems to confuse Valgrind as it reads from potentially + // uninitialized memory + bits[i / 8] ^= static_cast(-static_cast(bit_is_set) ^ bits[i / 8]) & + kBitmask[i % 8]; +} + +/// \brief set or clear a range of bits quickly +ARROW_EXPORT +void SetBitsTo(uint8_t* bits, int64_t start_offset, int64_t length, bool bits_are_set); + +/// \brief Sets all bits in the bitmap to true +ARROW_EXPORT +void SetBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// \brief Clears all bits in the bitmap (set to false) +ARROW_EXPORT +void ClearBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// Returns a mask with lower i bits set to 1. If i >= sizeof(Word)*8, all-ones will be +/// returned +/// ex: +/// ref: https://stackoverflow.com/a/59523400 +template +constexpr Word PrecedingWordBitmask(unsigned int const i) { + return static_cast(static_cast(i < sizeof(Word) * 8) + << (i & (sizeof(Word) * 8 - 1))) - + 1; +} +static_assert(PrecedingWordBitmask(0) == 0x00, ""); +static_assert(PrecedingWordBitmask(4) == 0x0f, ""); +static_assert(PrecedingWordBitmask(8) == 0xff, ""); +static_assert(PrecedingWordBitmask(8) == 0x00ff, ""); + +/// \brief Create a word with low `n` bits from `low` and high `sizeof(Word)-n` bits +/// from `high`. +/// Word ret +/// for (i = 0; i < sizeof(Word)*8; i++){ +/// ret[i]= i < n ? low[i]: high[i]; +/// } +template +constexpr Word SpliceWord(int n, Word low, Word high) { + return (high & ~PrecedingWordBitmask(n)) | (low & PrecedingWordBitmask(n)); +} + +/// \brief Pack integers into a bitmap in batches of 8 +template +void PackBits(const uint32_t* values, uint8_t* out) { + for (int i = 0; i < batch_size / 8; ++i) { + *out++ = static_cast(values[0] | values[1] << 1 | values[2] << 2 | + values[3] << 3 | values[4] << 4 | values[5] << 5 | + values[6] << 6 | values[7] << 7); + values += 8; + } +} + +} // namespace bit_util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h new file mode 100644 index 0000000000000000000000000000000000000000..4750e697fc7972e8ad57766ffd1134cf3e99fd14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h @@ -0,0 +1,466 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/bitmap_writer.h" +#include "arrow/util/compare.h" +#include "arrow/util/endian.h" +#include "arrow/util/functional.h" +#include "arrow/util/span.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class BooleanArray; + +namespace internal { + +class ARROW_EXPORT Bitmap : public util::ToStringOstreamable, + public util::EqualityComparable { + public: + Bitmap() = default; + + Bitmap(const std::shared_ptr& buffer, int64_t offset, int64_t length) + : data_(buffer->data()), offset_(offset), length_(length) { + if (buffer->is_mutable()) { + mutable_data_ = buffer->mutable_data(); + } + } + + Bitmap(const void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), offset_(offset), length_(length) {} + + Bitmap(void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), + mutable_data_(reinterpret_cast(data)), + offset_(offset), + length_(length) {} + + Bitmap Slice(int64_t offset) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length_ - offset}; + } else { + return {data_, offset_ + offset, length_ - offset}; + } + } + + Bitmap Slice(int64_t offset, int64_t length) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length}; + } else { + return {data_, offset_ + offset, length}; + } + } + + std::string ToString() const; + + bool Equals(const Bitmap& other) const; + + std::string Diff(const Bitmap& other) const; + + bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); } + + bool operator[](int64_t i) const { return GetBit(i); } + + void SetBitTo(int64_t i, bool v) const { + bit_util::SetBitTo(mutable_data_, i + offset_, v); + } + + void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); } + + void CopyFrom(const Bitmap& other); + void CopyFromInverted(const Bitmap& other); + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps, N); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const std::array& bitmaps, Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit words of bits from each bitmap as array + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// + /// TODO(bkietz) allow for early termination + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + // local, mutable variables which will be sliced/decremented to represent consumption: + Bitmap bitmaps[N]; + int64_t offsets[N]; + int64_t bit_length = BitLength(bitmaps_arg, N); + util::span words[N]; + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps_arg[i]; + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + + auto consume = [&](int64_t consumed_bits) { + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits); + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + bit_length -= consumed_bits; + }; + + std::array visited_words; + visited_words.fill(0); + + if (bit_length <= kBitWidth * 2) { + // bitmaps fit into one or two words so don't bother with optimization + while (bit_length > 0) { + auto leading_bits = std::min(bit_length, kBitWidth); + SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + return 0; + } + + int64_t max_offset = *std::max_element(offsets, offsets + N); + int64_t min_offset = *std::min_element(offsets, offsets + N); + if (max_offset > 0) { + // consume leading bits + auto leading_bits = kBitWidth - min_offset; + SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + assert(*std::min_element(offsets, offsets + N) == 0); + + int64_t whole_word_count = bit_length / kBitWidth; + assert(whole_word_count >= 1); + + if (min_offset == max_offset) { + // all offsets were identical, all leading bits have been consumed + assert( + std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; })); + + for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) { + for (size_t i = 0; i < N; ++i) { + visited_words[i] = words[i][word_i]; + } + visitor(visited_words); + } + consume(whole_word_count * kBitWidth); + } else { + // leading bits from potentially incomplete words have been consumed + + // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely + // within the bitmap for all i + for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) { + for (size_t i = 0; i < N; ++i) { + if (offsets[i] == 0) { + visited_words[i] = words[i][word_i]; + } else { + auto words0 = bit_util::ToLittleEndian(words[i][word_i]); + auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]); + visited_words[i] = bit_util::FromLittleEndian( + (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i]))); + } + } + visitor(visited_words); + } + consume((whole_word_count - 1) * kBitWidth); + + SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words); + + visitor(visited_words); + consume(kBitWidth); + } + + // load remaining bits + if (bit_length > 0) { + SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words); + visitor(visited_words); + } + + return min_offset; + } + + template >::type::value_type> + static void RunVisitWordsAndWriteLoop(int64_t bit_length, + std::array& readers, + std::array& writers, + Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + std::array visited_words; + std::array output_words; + + // every reader will have same number of words, since they are same length'ed + // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond + // Word boundary, every Word would have to be created from 2 adjoining Words + auto n_words = readers[0].words(); + bit_length -= n_words * kBitWidth; + while (n_words--) { + // first collect all words to visited_words array + for (size_t i = 0; i < N; i++) { + visited_words[i] = readers[i].NextWord(); + } + visitor(visited_words, &output_words); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextWord(output_words[i]); + } + } + + // every reader will have same number of trailing bytes, because of the above reason + // tailing portion could be more than one word! (ref: BitmapWordReader constructor) + // remaining full/ partial words to write + + if (bit_length) { + // convert the word visitor lambda to a byte_visitor + auto byte_visitor = [&](const std::array& in, + std::array* out) { + std::array in_words; + std::array out_words; + std::copy(in.begin(), in.end(), in_words.begin()); + visitor(in_words, &out_words); + for (size_t i = 0; i < M; i++) { + out->at(i) = static_cast(out_words[i]); + } + }; + + std::array visited_bytes; + std::array output_bytes; + int n_bytes = readers[0].trailing_bytes(); + while (n_bytes--) { + visited_bytes.fill(0); + output_bytes.fill(0); + int valid_bits; + for (size_t i = 0; i < N; i++) { + visited_bytes[i] = readers[i].NextTrailingByte(valid_bits); + } + byte_visitor(visited_bytes, &output_bytes); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextTrailingByte(output_bytes[i], valid_bits); + } + } + } + } + + /// \brief Visit words of bits from each input bitmap as array and collects + /// outputs to an array, to be written into the output bitmaps accordingly. + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// Visitor is expected to have the following signature + /// [](const std::array& in_words, std::array* out_words){...} + /// + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static void VisitWordsAndWrite(const std::array& bitmaps_arg, + std::array* out_bitmaps_arg, + Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps_arg); + assert(bit_length == BitLength(*out_bitmaps_arg)); + + // if both input and output bitmaps have no byte offset, then use special template + if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; }) && + std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; })) { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = BitmapWordReader( + in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter( + out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } else { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = + BitmapWordReader(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter(out_bitmap.mutable_data_, out_bitmap.offset_, + out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } + } + + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return mutable_data_; } + + /// offset of first bit relative to buffer().data() + int64_t offset() const { return offset_; } + + /// number of bits in this Bitmap + int64_t length() const { return length_; } + + /// span of all bytes which contain any bit in this Bitmap + util::span bytes() const { + auto byte_offset = offset_ / 8; + auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset; + return {data_ + byte_offset, static_cast(byte_count)}; + } + + private: + /// span of all Words which contain any bit in this Bitmap + /// + /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36) + /// words() would span bits [16, 48). + /// + /// 0 16 32 48 64 + /// |-------|-------|------|------| (buffer) + /// [ ] (bitmap) + /// |-------|------| (returned words) + /// + /// \warning The words may contain bytes which lie outside the buffer or are + /// uninitialized. + template + util::span words() const { + auto bytes_addr = reinterpret_cast(bytes().data()); + auto words_addr = bytes_addr - bytes_addr % sizeof(Word); + auto word_byte_count = + bit_util::RoundUpToPowerOf2(static_cast(bytes_addr + bytes().size()), + static_cast(sizeof(Word))) - + words_addr; + return {reinterpret_cast(words_addr), + static_cast(word_byte_count / sizeof(Word))}; + } + + /// offset of first bit relative to words().data() + template + int64_t word_offset() const { + return offset_ + 8 * (reinterpret_cast(data_) - + reinterpret_cast(words().data())); + } + + /// load words from bitmaps bitwise + template + static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset, + int64_t out_length, bool set_trailing_bits, + std::array* out) { + out->fill(0); + + int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0; + + Bitmap slices[N], out_bitmaps[N]; + for (size_t i = 0; i < N; ++i) { + slices[i] = bitmaps[i].Slice(offset, out_length); + out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length); + } + + int64_t bit_i = 0; + Bitmap::VisitBits(slices, [&](std::bitset bits) { + for (size_t i = 0; i < N; ++i) { + out_bitmaps[i].SetBitTo(bit_i, bits[i]); + } + ++bit_i; + }); + } + + /// assert bitmaps have identical length and return that length + static int64_t BitLength(const Bitmap* bitmaps, size_t N); + + template + static int64_t BitLength(const std::array& bitmaps) { + for (size_t i = 1; i < N; ++i) { + assert(bitmaps[i].length() == bitmaps[0].length()); + } + return bitmaps[0].length(); + } + + const uint8_t* data_ = NULLPTR; + uint8_t* mutable_data_ = NULLPTR; + int64_t offset_ = 0, length_ = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h new file mode 100644 index 0000000000000000000000000000000000000000..5bd2ad44140834487b02d5899d3515e7b7eafefc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Generate Bitmap with all position to `value` except for one found +/// at `straggler_pos`. +ARROW_EXPORT +Result> BitmapAllButOne(MemoryPool* pool, int64_t length, + int64_t straggler_pos, bool value = true); + +/// \brief Convert vector of bytes to bitmap buffer +ARROW_EXPORT +Result> BytesToBits(const std::vector&, + MemoryPool* pool = default_memory_pool()); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h new file mode 100644 index 0000000000000000000000000000000000000000..52a1e228e01f1d6c3c37a5e2d49d843f0a4573f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A std::generate() like function to write sequential bits into a bitmap area. +// Bits preceding the bitmap area are preserved, bits following the bitmap +// area may be clobbered. + +template +void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) { + if (length == 0) { + return; + } + uint8_t* cur = bitmap + start_offset / 8; + uint8_t bit_mask = bit_util::kBitmask[start_offset % 8]; + uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8]; + + for (int64_t index = 0; index < length; ++index) { + const bool bit = g(); + current_byte = bit ? (current_byte | bit_mask) : current_byte; + bit_mask = static_cast(bit_mask << 1); + if (bit_mask == 0) { + bit_mask = 1; + *cur++ = current_byte; + current_byte = 0; + } + } + if (bit_mask != 1) { + *cur++ = current_byte; + } +} + +// Like GenerateBits(), but unrolls its main loop for higher performance. + +template +void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length, + Generator&& g) { + static_assert(std::is_same()()), bool>::value, + "Functor passed to GenerateBitsUnrolled must return bool"); + + if (length == 0) { + return; + } + uint8_t current_byte; + uint8_t* cur = bitmap + start_offset / 8; + const uint64_t start_bit_offset = start_offset % 8; + uint8_t bit_mask = bit_util::kBitmask[start_bit_offset]; + int64_t remaining = length; + + if (bit_mask != 0x01) { + current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset]; + while (bit_mask != 0 && remaining > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + --remaining; + } + *cur++ = current_byte; + } + + int64_t remaining_bytes = remaining / 8; + uint8_t out_results[8]; + while (remaining_bytes-- > 0) { + for (int i = 0; i < 8; ++i) { + out_results[i] = g(); + } + *cur++ = static_cast(out_results[0] | out_results[1] << 1 | + out_results[2] << 2 | out_results[3] << 3 | + out_results[4] << 4 | out_results[5] << 5 | + out_results[6] << 6 | out_results[7] << 7); + } + + int64_t remaining_bits = remaining % 8; + if (remaining_bits) { + current_byte = 0; + bit_mask = 0x01; + while (remaining_bits-- > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + } + *cur++ = current_byte; + } +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a9d900b2588d9d556fd1995de1d60d8583edfca7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h @@ -0,0 +1,244 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; + +namespace internal { + +// ---------------------------------------------------------------------- +// Bitmap utilities + +/// Copy a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// +/// \return Status message +ARROW_EXPORT +Result> CopyBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Copy a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void CopyBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Invert a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void InvertBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Invert a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// +/// \return Status message +ARROW_EXPORT +Result> InvertBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Reverse a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to reverse +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void ReverseBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Reverse a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to reverse +/// +/// \return Status message +ARROW_EXPORT +Result> ReverseBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Compute the number of 1's in the given data array +/// +/// \param[in] data a packed LSB-ordered bitmap as a byte array +/// \param[in] bit_offset a bitwise offset into the bitmap +/// \param[in] length the number of bits to inspect in the bitmap relative to +/// the offset +/// +/// \return The number of set (1) bits in the range +ARROW_EXPORT +int64_t CountSetBits(const uint8_t* data, int64_t bit_offset, int64_t length); + +/// Compute the number of 1's in the result of an "and" (&) of two bitmaps +/// +/// \param[in] left_bitmap a packed LSB-ordered bitmap as a byte array +/// \param[in] left_offset a bitwise offset into the left bitmap +/// \param[in] right_bitmap a packed LSB-ordered bitmap as a byte array +/// \param[in] right_offset a bitwise offset into the right bitmap +/// \param[in] length the length of the bitmaps (must be the same) +/// +/// \return The number of set (1) bits in the "and" of the two bitmaps +ARROW_EXPORT +int64_t CountAndSetBits(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length); + +ARROW_EXPORT +bool BitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length); + +// Same as BitmapEquals, but considers a NULL bitmap pointer the same as an +// all-ones bitmap. +ARROW_EXPORT +bool OptionalBitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length); + +ARROW_EXPORT +bool OptionalBitmapEquals(const std::shared_ptr& left, int64_t left_offset, + const std::shared_ptr& right, int64_t right_offset, + int64_t length); + +/// \brief Do a "bitmap and" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapAnd(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap and" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapAnd(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap or" for the given bit length on right and left buffers +/// starting at their respective bit-offsets and put the results in out_buffer +/// starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapOr(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap or" for the given bit length on right and left buffers +/// starting at their respective bit-offsets and put the results in out +/// starting at the given bit-offset. +ARROW_EXPORT +void BitmapOr(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap xor" for the given bit-length on right and left +/// buffers starting at their respective bit-offsets and put the results in +/// out_buffer starting at the given bit offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapXor(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap xor" for the given bit-length on right and left +/// buffers starting at their respective bit-offsets and put the results in +/// out starting at the given bit offset. +ARROW_EXPORT +void BitmapXor(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap and not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapAndNot(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap and not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapAndNot(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap or not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapOrNot(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap or not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapOrNot(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..5526c87dbcaf2d6fc69709d6853d7dbbb351f044 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h @@ -0,0 +1,273 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +class BitmapReader { + public: + BitmapReader(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + current_byte_ = 0; + byte_offset_ = start_offset / 8; + bit_offset_ = start_offset % 8; + if (length > 0) { + current_byte_ = bitmap[byte_offset_]; + } + } + + bool IsSet() const { return (current_byte_ & (1 << bit_offset_)) != 0; } + + bool IsNotSet() const { return (current_byte_ & (1 << bit_offset_)) == 0; } + + void Next() { + ++bit_offset_; + ++position_; + if (ARROW_PREDICT_FALSE(bit_offset_ == 8)) { + bit_offset_ = 0; + ++byte_offset_; + if (ARROW_PREDICT_TRUE(position_ < length_)) { + current_byte_ = bitmap_[byte_offset_]; + } + } + } + + int64_t position() const { return position_; } + + int64_t length() const { return length_; } + + private: + const uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + int64_t byte_offset_; + int64_t bit_offset_; +}; + +// XXX Cannot name it BitmapWordReader because the name is already used +// in bitmap_ops.cc + +class BitmapUInt64Reader { + public: + BitmapUInt64Reader(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8), + num_carry_bits_(8 - start_offset % 8), + length_(length), + remaining_length_(length_), + carry_bits_(0) { + if (length_ > 0) { + // Load carry bits from the first byte's MSBs + if (length_ >= num_carry_bits_) { + carry_bits_ = + LoadPartialWord(static_cast(8 - num_carry_bits_), num_carry_bits_); + } else { + carry_bits_ = LoadPartialWord(static_cast(8 - num_carry_bits_), length_); + } + } + } + + uint64_t NextWord() { + if (ARROW_PREDICT_TRUE(remaining_length_ >= 64 + num_carry_bits_)) { + // We can load a full word + uint64_t next_word = LoadFullWord(); + // Carry bits come first, then the (64 - num_carry_bits_) LSBs from next_word + uint64_t word = carry_bits_ | (next_word << num_carry_bits_); + carry_bits_ = next_word >> (64 - num_carry_bits_); + remaining_length_ -= 64; + return word; + } else if (remaining_length_ > num_carry_bits_) { + // We can load a partial word + uint64_t next_word = + LoadPartialWord(/*bit_offset=*/0, remaining_length_ - num_carry_bits_); + uint64_t word = carry_bits_ | (next_word << num_carry_bits_); + carry_bits_ = next_word >> (64 - num_carry_bits_); + remaining_length_ = std::max(remaining_length_ - 64, 0); + return word; + } else { + remaining_length_ = 0; + return carry_bits_; + } + } + + int64_t position() const { return length_ - remaining_length_; } + + int64_t length() const { return length_; } + + private: + uint64_t LoadFullWord() { + uint64_t word; + memcpy(&word, bitmap_, 8); + bitmap_ += 8; + return bit_util::ToLittleEndian(word); + } + + uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) { + uint64_t word = 0; + const int64_t num_bytes = bit_util::BytesForBits(num_bits); + memcpy(&word, bitmap_, num_bytes); + bitmap_ += num_bytes; + return (bit_util::ToLittleEndian(word) >> bit_offset) & + bit_util::LeastSignificantBitMask(num_bits); + } + + const uint8_t* bitmap_; + const int64_t num_carry_bits_; // in [1, 8] + const int64_t length_; + int64_t remaining_length_; + uint64_t carry_bits_; +}; + +// BitmapWordReader here is faster than BitmapUInt64Reader (in bitmap_reader.h) +// on sufficiently large inputs. However, it has a larger prolog / epilog overhead +// and should probably not be used for small bitmaps. + +template +class BitmapWordReader { + public: + BitmapWordReader() = default; + BitmapWordReader(const uint8_t* bitmap, int64_t offset, int64_t length) + : offset_(static_cast(may_have_byte_offset) * (offset % 8)), + bitmap_(bitmap + offset / 8), + bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)) { + // decrement word count by one as we may touch two adjacent words in one iteration + nwords_ = length / (sizeof(Word) * 8) - 1; + if (nwords_ < 0) { + nwords_ = 0; + } + trailing_bits_ = static_cast(length - nwords_ * sizeof(Word) * 8); + trailing_bytes_ = static_cast(bit_util::BytesForBits(trailing_bits_)); + + if (nwords_ > 0) { + current_data.word_ = load(bitmap_); + } else if (length > 0) { + current_data.epi.byte_ = load(bitmap_); + } + } + + Word NextWord() { + bitmap_ += sizeof(Word); + const Word next_word = load(bitmap_); + Word word = current_data.word_; + if (may_have_byte_offset && offset_) { + // combine two adjacent words into one word + // |<------ next ----->|<---- current ---->| + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // | | offset + // v v + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // |<------ word ----->| + word >>= offset_; + word |= next_word << (sizeof(Word) * 8 - offset_); + } + current_data.word_ = next_word; + return word; + } + + uint8_t NextTrailingByte(int& valid_bits) { + uint8_t byte; + assert(trailing_bits_ > 0); + + if (trailing_bits_ <= 8) { + // last byte + valid_bits = trailing_bits_; + trailing_bits_ = 0; + byte = 0; + internal::BitmapReader reader(bitmap_, offset_, valid_bits); + for (int i = 0; i < valid_bits; ++i) { + byte >>= 1; + if (reader.IsSet()) { + byte |= 0x80; + } + reader.Next(); + } + byte >>= (8 - valid_bits); + } else { + ++bitmap_; + const uint8_t next_byte = load(bitmap_); + byte = current_data.epi.byte_; + if (may_have_byte_offset && offset_) { + byte >>= offset_; + byte |= next_byte << (8 - offset_); + } + current_data.epi.byte_ = next_byte; + trailing_bits_ -= 8; + trailing_bytes_--; + valid_bits = 8; + } + return byte; + } + + int64_t words() const { return nwords_; } + int trailing_bytes() const { return trailing_bytes_; } + + private: + int64_t offset_; + const uint8_t* bitmap_; + + const uint8_t* bitmap_end_; + int64_t nwords_; + int trailing_bits_; + int trailing_bytes_; + union { + Word word_; + struct { +#if ARROW_LITTLE_ENDIAN == 0 + uint8_t padding_bytes_[sizeof(Word) - 1]; +#endif + uint8_t byte_; + } epi; + } current_data; + + template + DType load(const uint8_t* bitmap) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + return bit_util::ToLittleEndian(util::SafeLoadAs(bitmap)); + } +}; + +/// \brief Index into a possibly nonexistent bitmap +struct OptionalBitIndexer { + const uint8_t* bitmap; + const int64_t offset; + + explicit OptionalBitIndexer(const uint8_t* buffer = NULLPTR, int64_t offset = 0) + : bitmap(buffer), offset(offset) {} + + bool operator[](int64_t i) const { + return bitmap == NULLPTR || bit_util::GetBit(bitmap, offset + i); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h new file mode 100644 index 0000000000000000000000000000000000000000..4f45619b2a770e3e6589af03012641ceb833b115 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h @@ -0,0 +1,5642 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This file was generated by script which is modified from its original version in +// GitHub. Original source: +// https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py +// The original copyright notice follows. + +// This code is released under the +// Apache License Version 2.0 http://www.apache.org/licenses/. +// (c) Daniel Lemire 2013 + +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace internal { + +inline const uint8_t* unpack0_64(const uint8_t* in, uint64_t* out) { + for (int k = 0; k < 32; k += 1) { + out[k] = 0; + } + return in; +} + +inline const uint8_t* unpack1_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 1) & mask; + out[2] = (w0 >> 2) & mask; + out[3] = (w0 >> 3) & mask; + out[4] = (w0 >> 4) & mask; + out[5] = (w0 >> 5) & mask; + out[6] = (w0 >> 6) & mask; + out[7] = (w0 >> 7) & mask; + out[8] = (w0 >> 8) & mask; + out[9] = (w0 >> 9) & mask; + out[10] = (w0 >> 10) & mask; + out[11] = (w0 >> 11) & mask; + out[12] = (w0 >> 12) & mask; + out[13] = (w0 >> 13) & mask; + out[14] = (w0 >> 14) & mask; + out[15] = (w0 >> 15) & mask; + out[16] = (w0 >> 16) & mask; + out[17] = (w0 >> 17) & mask; + out[18] = (w0 >> 18) & mask; + out[19] = (w0 >> 19) & mask; + out[20] = (w0 >> 20) & mask; + out[21] = (w0 >> 21) & mask; + out[22] = (w0 >> 22) & mask; + out[23] = (w0 >> 23) & mask; + out[24] = (w0 >> 24) & mask; + out[25] = (w0 >> 25) & mask; + out[26] = (w0 >> 26) & mask; + out[27] = (w0 >> 27) & mask; + out[28] = (w0 >> 28) & mask; + out[29] = (w0 >> 29) & mask; + out[30] = (w0 >> 30) & mask; + out[31] = (w0 >> 31) & mask; + + return in; +} + +inline const uint8_t* unpack2_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 3ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 2) & mask; + out[2] = (w0 >> 4) & mask; + out[3] = (w0 >> 6) & mask; + out[4] = (w0 >> 8) & mask; + out[5] = (w0 >> 10) & mask; + out[6] = (w0 >> 12) & mask; + out[7] = (w0 >> 14) & mask; + out[8] = (w0 >> 16) & mask; + out[9] = (w0 >> 18) & mask; + out[10] = (w0 >> 20) & mask; + out[11] = (w0 >> 22) & mask; + out[12] = (w0 >> 24) & mask; + out[13] = (w0 >> 26) & mask; + out[14] = (w0 >> 28) & mask; + out[15] = (w0 >> 30) & mask; + out[16] = (w0 >> 32) & mask; + out[17] = (w0 >> 34) & mask; + out[18] = (w0 >> 36) & mask; + out[19] = (w0 >> 38) & mask; + out[20] = (w0 >> 40) & mask; + out[21] = (w0 >> 42) & mask; + out[22] = (w0 >> 44) & mask; + out[23] = (w0 >> 46) & mask; + out[24] = (w0 >> 48) & mask; + out[25] = (w0 >> 50) & mask; + out[26] = (w0 >> 52) & mask; + out[27] = (w0 >> 54) & mask; + out[28] = (w0 >> 56) & mask; + out[29] = (w0 >> 58) & mask; + out[30] = (w0 >> 60) & mask; + out[31] = w0 >> 62; + + return in; +} + +inline const uint8_t* unpack3_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 7ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 3) & mask; + out[2] = (w0 >> 6) & mask; + out[3] = (w0 >> 9) & mask; + out[4] = (w0 >> 12) & mask; + out[5] = (w0 >> 15) & mask; + out[6] = (w0 >> 18) & mask; + out[7] = (w0 >> 21) & mask; + out[8] = (w0 >> 24) & mask; + out[9] = (w0 >> 27) & mask; + out[10] = (w0 >> 30) & mask; + out[11] = (w0 >> 33) & mask; + out[12] = (w0 >> 36) & mask; + out[13] = (w0 >> 39) & mask; + out[14] = (w0 >> 42) & mask; + out[15] = (w0 >> 45) & mask; + out[16] = (w0 >> 48) & mask; + out[17] = (w0 >> 51) & mask; + out[18] = (w0 >> 54) & mask; + out[19] = (w0 >> 57) & mask; + out[20] = (w0 >> 60) & mask; + out[21] = ((w0 >> 63) | (w1 << 1)) & mask; + out[22] = (w1 >> 2) & mask; + out[23] = (w1 >> 5) & mask; + out[24] = (w1 >> 8) & mask; + out[25] = (w1 >> 11) & mask; + out[26] = (w1 >> 14) & mask; + out[27] = (w1 >> 17) & mask; + out[28] = (w1 >> 20) & mask; + out[29] = (w1 >> 23) & mask; + out[30] = (w1 >> 26) & mask; + out[31] = (w1 >> 29) & mask; + + return in; +} + +inline const uint8_t* unpack4_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 15ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 4) & mask; + out[2] = (w0 >> 8) & mask; + out[3] = (w0 >> 12) & mask; + out[4] = (w0 >> 16) & mask; + out[5] = (w0 >> 20) & mask; + out[6] = (w0 >> 24) & mask; + out[7] = (w0 >> 28) & mask; + out[8] = (w0 >> 32) & mask; + out[9] = (w0 >> 36) & mask; + out[10] = (w0 >> 40) & mask; + out[11] = (w0 >> 44) & mask; + out[12] = (w0 >> 48) & mask; + out[13] = (w0 >> 52) & mask; + out[14] = (w0 >> 56) & mask; + out[15] = w0 >> 60; + out[16] = (w1)&mask; + out[17] = (w1 >> 4) & mask; + out[18] = (w1 >> 8) & mask; + out[19] = (w1 >> 12) & mask; + out[20] = (w1 >> 16) & mask; + out[21] = (w1 >> 20) & mask; + out[22] = (w1 >> 24) & mask; + out[23] = (w1 >> 28) & mask; + out[24] = (w1 >> 32) & mask; + out[25] = (w1 >> 36) & mask; + out[26] = (w1 >> 40) & mask; + out[27] = (w1 >> 44) & mask; + out[28] = (w1 >> 48) & mask; + out[29] = (w1 >> 52) & mask; + out[30] = (w1 >> 56) & mask; + out[31] = w1 >> 60; + + return in; +} + +inline const uint8_t* unpack5_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 31ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 5) & mask; + out[2] = (w0 >> 10) & mask; + out[3] = (w0 >> 15) & mask; + out[4] = (w0 >> 20) & mask; + out[5] = (w0 >> 25) & mask; + out[6] = (w0 >> 30) & mask; + out[7] = (w0 >> 35) & mask; + out[8] = (w0 >> 40) & mask; + out[9] = (w0 >> 45) & mask; + out[10] = (w0 >> 50) & mask; + out[11] = (w0 >> 55) & mask; + out[12] = ((w0 >> 60) | (w1 << 4)) & mask; + out[13] = (w1 >> 1) & mask; + out[14] = (w1 >> 6) & mask; + out[15] = (w1 >> 11) & mask; + out[16] = (w1 >> 16) & mask; + out[17] = (w1 >> 21) & mask; + out[18] = (w1 >> 26) & mask; + out[19] = (w1 >> 31) & mask; + out[20] = (w1 >> 36) & mask; + out[21] = (w1 >> 41) & mask; + out[22] = (w1 >> 46) & mask; + out[23] = (w1 >> 51) & mask; + out[24] = (w1 >> 56) & mask; + out[25] = ((w1 >> 61) | (w2 << 3)) & mask; + out[26] = (w2 >> 2) & mask; + out[27] = (w2 >> 7) & mask; + out[28] = (w2 >> 12) & mask; + out[29] = (w2 >> 17) & mask; + out[30] = (w2 >> 22) & mask; + out[31] = (w2 >> 27) & mask; + + return in; +} + +inline const uint8_t* unpack6_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 63ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 6) & mask; + out[2] = (w0 >> 12) & mask; + out[3] = (w0 >> 18) & mask; + out[4] = (w0 >> 24) & mask; + out[5] = (w0 >> 30) & mask; + out[6] = (w0 >> 36) & mask; + out[7] = (w0 >> 42) & mask; + out[8] = (w0 >> 48) & mask; + out[9] = (w0 >> 54) & mask; + out[10] = ((w0 >> 60) | (w1 << 4)) & mask; + out[11] = (w1 >> 2) & mask; + out[12] = (w1 >> 8) & mask; + out[13] = (w1 >> 14) & mask; + out[14] = (w1 >> 20) & mask; + out[15] = (w1 >> 26) & mask; + out[16] = (w1 >> 32) & mask; + out[17] = (w1 >> 38) & mask; + out[18] = (w1 >> 44) & mask; + out[19] = (w1 >> 50) & mask; + out[20] = (w1 >> 56) & mask; + out[21] = ((w1 >> 62) | (w2 << 2)) & mask; + out[22] = (w2 >> 4) & mask; + out[23] = (w2 >> 10) & mask; + out[24] = (w2 >> 16) & mask; + out[25] = (w2 >> 22) & mask; + out[26] = (w2 >> 28) & mask; + out[27] = (w2 >> 34) & mask; + out[28] = (w2 >> 40) & mask; + out[29] = (w2 >> 46) & mask; + out[30] = (w2 >> 52) & mask; + out[31] = w2 >> 58; + + return in; +} + +inline const uint8_t* unpack7_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 127ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 7) & mask; + out[2] = (w0 >> 14) & mask; + out[3] = (w0 >> 21) & mask; + out[4] = (w0 >> 28) & mask; + out[5] = (w0 >> 35) & mask; + out[6] = (w0 >> 42) & mask; + out[7] = (w0 >> 49) & mask; + out[8] = (w0 >> 56) & mask; + out[9] = ((w0 >> 63) | (w1 << 1)) & mask; + out[10] = (w1 >> 6) & mask; + out[11] = (w1 >> 13) & mask; + out[12] = (w1 >> 20) & mask; + out[13] = (w1 >> 27) & mask; + out[14] = (w1 >> 34) & mask; + out[15] = (w1 >> 41) & mask; + out[16] = (w1 >> 48) & mask; + out[17] = (w1 >> 55) & mask; + out[18] = ((w1 >> 62) | (w2 << 2)) & mask; + out[19] = (w2 >> 5) & mask; + out[20] = (w2 >> 12) & mask; + out[21] = (w2 >> 19) & mask; + out[22] = (w2 >> 26) & mask; + out[23] = (w2 >> 33) & mask; + out[24] = (w2 >> 40) & mask; + out[25] = (w2 >> 47) & mask; + out[26] = (w2 >> 54) & mask; + out[27] = ((w2 >> 61) | (w3 << 3)) & mask; + out[28] = (w3 >> 4) & mask; + out[29] = (w3 >> 11) & mask; + out[30] = (w3 >> 18) & mask; + out[31] = (w3 >> 25) & mask; + + return in; +} + +inline const uint8_t* unpack8_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 255ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 8) & mask; + out[2] = (w0 >> 16) & mask; + out[3] = (w0 >> 24) & mask; + out[4] = (w0 >> 32) & mask; + out[5] = (w0 >> 40) & mask; + out[6] = (w0 >> 48) & mask; + out[7] = w0 >> 56; + out[8] = (w1)&mask; + out[9] = (w1 >> 8) & mask; + out[10] = (w1 >> 16) & mask; + out[11] = (w1 >> 24) & mask; + out[12] = (w1 >> 32) & mask; + out[13] = (w1 >> 40) & mask; + out[14] = (w1 >> 48) & mask; + out[15] = w1 >> 56; + out[16] = (w2)&mask; + out[17] = (w2 >> 8) & mask; + out[18] = (w2 >> 16) & mask; + out[19] = (w2 >> 24) & mask; + out[20] = (w2 >> 32) & mask; + out[21] = (w2 >> 40) & mask; + out[22] = (w2 >> 48) & mask; + out[23] = w2 >> 56; + out[24] = (w3)&mask; + out[25] = (w3 >> 8) & mask; + out[26] = (w3 >> 16) & mask; + out[27] = (w3 >> 24) & mask; + out[28] = (w3 >> 32) & mask; + out[29] = (w3 >> 40) & mask; + out[30] = (w3 >> 48) & mask; + out[31] = w3 >> 56; + + return in; +} + +inline const uint8_t* unpack9_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 511ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 9) & mask; + out[2] = (w0 >> 18) & mask; + out[3] = (w0 >> 27) & mask; + out[4] = (w0 >> 36) & mask; + out[5] = (w0 >> 45) & mask; + out[6] = (w0 >> 54) & mask; + out[7] = ((w0 >> 63) | (w1 << 1)) & mask; + out[8] = (w1 >> 8) & mask; + out[9] = (w1 >> 17) & mask; + out[10] = (w1 >> 26) & mask; + out[11] = (w1 >> 35) & mask; + out[12] = (w1 >> 44) & mask; + out[13] = (w1 >> 53) & mask; + out[14] = ((w1 >> 62) | (w2 << 2)) & mask; + out[15] = (w2 >> 7) & mask; + out[16] = (w2 >> 16) & mask; + out[17] = (w2 >> 25) & mask; + out[18] = (w2 >> 34) & mask; + out[19] = (w2 >> 43) & mask; + out[20] = (w2 >> 52) & mask; + out[21] = ((w2 >> 61) | (w3 << 3)) & mask; + out[22] = (w3 >> 6) & mask; + out[23] = (w3 >> 15) & mask; + out[24] = (w3 >> 24) & mask; + out[25] = (w3 >> 33) & mask; + out[26] = (w3 >> 42) & mask; + out[27] = (w3 >> 51) & mask; + out[28] = ((w3 >> 60) | (w4 << 4)) & mask; + out[29] = (w4 >> 5) & mask; + out[30] = (w4 >> 14) & mask; + out[31] = (w4 >> 23) & mask; + + return in; +} + +inline const uint8_t* unpack10_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1023ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 10) & mask; + out[2] = (w0 >> 20) & mask; + out[3] = (w0 >> 30) & mask; + out[4] = (w0 >> 40) & mask; + out[5] = (w0 >> 50) & mask; + out[6] = ((w0 >> 60) | (w1 << 4)) & mask; + out[7] = (w1 >> 6) & mask; + out[8] = (w1 >> 16) & mask; + out[9] = (w1 >> 26) & mask; + out[10] = (w1 >> 36) & mask; + out[11] = (w1 >> 46) & mask; + out[12] = ((w1 >> 56) | (w2 << 8)) & mask; + out[13] = (w2 >> 2) & mask; + out[14] = (w2 >> 12) & mask; + out[15] = (w2 >> 22) & mask; + out[16] = (w2 >> 32) & mask; + out[17] = (w2 >> 42) & mask; + out[18] = (w2 >> 52) & mask; + out[19] = ((w2 >> 62) | (w3 << 2)) & mask; + out[20] = (w3 >> 8) & mask; + out[21] = (w3 >> 18) & mask; + out[22] = (w3 >> 28) & mask; + out[23] = (w3 >> 38) & mask; + out[24] = (w3 >> 48) & mask; + out[25] = ((w3 >> 58) | (w4 << 6)) & mask; + out[26] = (w4 >> 4) & mask; + out[27] = (w4 >> 14) & mask; + out[28] = (w4 >> 24) & mask; + out[29] = (w4 >> 34) & mask; + out[30] = (w4 >> 44) & mask; + out[31] = w4 >> 54; + + return in; +} + +inline const uint8_t* unpack11_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2047ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 11) & mask; + out[2] = (w0 >> 22) & mask; + out[3] = (w0 >> 33) & mask; + out[4] = (w0 >> 44) & mask; + out[5] = ((w0 >> 55) | (w1 << 9)) & mask; + out[6] = (w1 >> 2) & mask; + out[7] = (w1 >> 13) & mask; + out[8] = (w1 >> 24) & mask; + out[9] = (w1 >> 35) & mask; + out[10] = (w1 >> 46) & mask; + out[11] = ((w1 >> 57) | (w2 << 7)) & mask; + out[12] = (w2 >> 4) & mask; + out[13] = (w2 >> 15) & mask; + out[14] = (w2 >> 26) & mask; + out[15] = (w2 >> 37) & mask; + out[16] = (w2 >> 48) & mask; + out[17] = ((w2 >> 59) | (w3 << 5)) & mask; + out[18] = (w3 >> 6) & mask; + out[19] = (w3 >> 17) & mask; + out[20] = (w3 >> 28) & mask; + out[21] = (w3 >> 39) & mask; + out[22] = (w3 >> 50) & mask; + out[23] = ((w3 >> 61) | (w4 << 3)) & mask; + out[24] = (w4 >> 8) & mask; + out[25] = (w4 >> 19) & mask; + out[26] = (w4 >> 30) & mask; + out[27] = (w4 >> 41) & mask; + out[28] = (w4 >> 52) & mask; + out[29] = ((w4 >> 63) | (w5 << 1)) & mask; + out[30] = (w5 >> 10) & mask; + out[31] = (w5 >> 21) & mask; + + return in; +} + +inline const uint8_t* unpack12_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4095ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 12) & mask; + out[2] = (w0 >> 24) & mask; + out[3] = (w0 >> 36) & mask; + out[4] = (w0 >> 48) & mask; + out[5] = ((w0 >> 60) | (w1 << 4)) & mask; + out[6] = (w1 >> 8) & mask; + out[7] = (w1 >> 20) & mask; + out[8] = (w1 >> 32) & mask; + out[9] = (w1 >> 44) & mask; + out[10] = ((w1 >> 56) | (w2 << 8)) & mask; + out[11] = (w2 >> 4) & mask; + out[12] = (w2 >> 16) & mask; + out[13] = (w2 >> 28) & mask; + out[14] = (w2 >> 40) & mask; + out[15] = w2 >> 52; + out[16] = (w3)&mask; + out[17] = (w3 >> 12) & mask; + out[18] = (w3 >> 24) & mask; + out[19] = (w3 >> 36) & mask; + out[20] = (w3 >> 48) & mask; + out[21] = ((w3 >> 60) | (w4 << 4)) & mask; + out[22] = (w4 >> 8) & mask; + out[23] = (w4 >> 20) & mask; + out[24] = (w4 >> 32) & mask; + out[25] = (w4 >> 44) & mask; + out[26] = ((w4 >> 56) | (w5 << 8)) & mask; + out[27] = (w5 >> 4) & mask; + out[28] = (w5 >> 16) & mask; + out[29] = (w5 >> 28) & mask; + out[30] = (w5 >> 40) & mask; + out[31] = w5 >> 52; + + return in; +} + +inline const uint8_t* unpack13_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8191ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 13) & mask; + out[2] = (w0 >> 26) & mask; + out[3] = (w0 >> 39) & mask; + out[4] = ((w0 >> 52) | (w1 << 12)) & mask; + out[5] = (w1 >> 1) & mask; + out[6] = (w1 >> 14) & mask; + out[7] = (w1 >> 27) & mask; + out[8] = (w1 >> 40) & mask; + out[9] = ((w1 >> 53) | (w2 << 11)) & mask; + out[10] = (w2 >> 2) & mask; + out[11] = (w2 >> 15) & mask; + out[12] = (w2 >> 28) & mask; + out[13] = (w2 >> 41) & mask; + out[14] = ((w2 >> 54) | (w3 << 10)) & mask; + out[15] = (w3 >> 3) & mask; + out[16] = (w3 >> 16) & mask; + out[17] = (w3 >> 29) & mask; + out[18] = (w3 >> 42) & mask; + out[19] = ((w3 >> 55) | (w4 << 9)) & mask; + out[20] = (w4 >> 4) & mask; + out[21] = (w4 >> 17) & mask; + out[22] = (w4 >> 30) & mask; + out[23] = (w4 >> 43) & mask; + out[24] = ((w4 >> 56) | (w5 << 8)) & mask; + out[25] = (w5 >> 5) & mask; + out[26] = (w5 >> 18) & mask; + out[27] = (w5 >> 31) & mask; + out[28] = (w5 >> 44) & mask; + out[29] = ((w5 >> 57) | (w6 << 7)) & mask; + out[30] = (w6 >> 6) & mask; + out[31] = (w6 >> 19) & mask; + + return in; +} + +inline const uint8_t* unpack14_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 16383ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 14) & mask; + out[2] = (w0 >> 28) & mask; + out[3] = (w0 >> 42) & mask; + out[4] = ((w0 >> 56) | (w1 << 8)) & mask; + out[5] = (w1 >> 6) & mask; + out[6] = (w1 >> 20) & mask; + out[7] = (w1 >> 34) & mask; + out[8] = (w1 >> 48) & mask; + out[9] = ((w1 >> 62) | (w2 << 2)) & mask; + out[10] = (w2 >> 12) & mask; + out[11] = (w2 >> 26) & mask; + out[12] = (w2 >> 40) & mask; + out[13] = ((w2 >> 54) | (w3 << 10)) & mask; + out[14] = (w3 >> 4) & mask; + out[15] = (w3 >> 18) & mask; + out[16] = (w3 >> 32) & mask; + out[17] = (w3 >> 46) & mask; + out[18] = ((w3 >> 60) | (w4 << 4)) & mask; + out[19] = (w4 >> 10) & mask; + out[20] = (w4 >> 24) & mask; + out[21] = (w4 >> 38) & mask; + out[22] = ((w4 >> 52) | (w5 << 12)) & mask; + out[23] = (w5 >> 2) & mask; + out[24] = (w5 >> 16) & mask; + out[25] = (w5 >> 30) & mask; + out[26] = (w5 >> 44) & mask; + out[27] = ((w5 >> 58) | (w6 << 6)) & mask; + out[28] = (w6 >> 8) & mask; + out[29] = (w6 >> 22) & mask; + out[30] = (w6 >> 36) & mask; + out[31] = w6 >> 50; + + return in; +} + +inline const uint8_t* unpack15_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 32767ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 15) & mask; + out[2] = (w0 >> 30) & mask; + out[3] = (w0 >> 45) & mask; + out[4] = ((w0 >> 60) | (w1 << 4)) & mask; + out[5] = (w1 >> 11) & mask; + out[6] = (w1 >> 26) & mask; + out[7] = (w1 >> 41) & mask; + out[8] = ((w1 >> 56) | (w2 << 8)) & mask; + out[9] = (w2 >> 7) & mask; + out[10] = (w2 >> 22) & mask; + out[11] = (w2 >> 37) & mask; + out[12] = ((w2 >> 52) | (w3 << 12)) & mask; + out[13] = (w3 >> 3) & mask; + out[14] = (w3 >> 18) & mask; + out[15] = (w3 >> 33) & mask; + out[16] = (w3 >> 48) & mask; + out[17] = ((w3 >> 63) | (w4 << 1)) & mask; + out[18] = (w4 >> 14) & mask; + out[19] = (w4 >> 29) & mask; + out[20] = (w4 >> 44) & mask; + out[21] = ((w4 >> 59) | (w5 << 5)) & mask; + out[22] = (w5 >> 10) & mask; + out[23] = (w5 >> 25) & mask; + out[24] = (w5 >> 40) & mask; + out[25] = ((w5 >> 55) | (w6 << 9)) & mask; + out[26] = (w6 >> 6) & mask; + out[27] = (w6 >> 21) & mask; + out[28] = (w6 >> 36) & mask; + out[29] = ((w6 >> 51) | (w7 << 13)) & mask; + out[30] = (w7 >> 2) & mask; + out[31] = (w7 >> 17) & mask; + + return in; +} + +inline const uint8_t* unpack16_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 65535ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 16) & mask; + out[2] = (w0 >> 32) & mask; + out[3] = w0 >> 48; + out[4] = (w1)&mask; + out[5] = (w1 >> 16) & mask; + out[6] = (w1 >> 32) & mask; + out[7] = w1 >> 48; + out[8] = (w2)&mask; + out[9] = (w2 >> 16) & mask; + out[10] = (w2 >> 32) & mask; + out[11] = w2 >> 48; + out[12] = (w3)&mask; + out[13] = (w3 >> 16) & mask; + out[14] = (w3 >> 32) & mask; + out[15] = w3 >> 48; + out[16] = (w4)&mask; + out[17] = (w4 >> 16) & mask; + out[18] = (w4 >> 32) & mask; + out[19] = w4 >> 48; + out[20] = (w5)&mask; + out[21] = (w5 >> 16) & mask; + out[22] = (w5 >> 32) & mask; + out[23] = w5 >> 48; + out[24] = (w6)&mask; + out[25] = (w6 >> 16) & mask; + out[26] = (w6 >> 32) & mask; + out[27] = w6 >> 48; + out[28] = (w7)&mask; + out[29] = (w7 >> 16) & mask; + out[30] = (w7 >> 32) & mask; + out[31] = w7 >> 48; + + return in; +} + +inline const uint8_t* unpack17_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 131071ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 17) & mask; + out[2] = (w0 >> 34) & mask; + out[3] = ((w0 >> 51) | (w1 << 13)) & mask; + out[4] = (w1 >> 4) & mask; + out[5] = (w1 >> 21) & mask; + out[6] = (w1 >> 38) & mask; + out[7] = ((w1 >> 55) | (w2 << 9)) & mask; + out[8] = (w2 >> 8) & mask; + out[9] = (w2 >> 25) & mask; + out[10] = (w2 >> 42) & mask; + out[11] = ((w2 >> 59) | (w3 << 5)) & mask; + out[12] = (w3 >> 12) & mask; + out[13] = (w3 >> 29) & mask; + out[14] = (w3 >> 46) & mask; + out[15] = ((w3 >> 63) | (w4 << 1)) & mask; + out[16] = (w4 >> 16) & mask; + out[17] = (w4 >> 33) & mask; + out[18] = ((w4 >> 50) | (w5 << 14)) & mask; + out[19] = (w5 >> 3) & mask; + out[20] = (w5 >> 20) & mask; + out[21] = (w5 >> 37) & mask; + out[22] = ((w5 >> 54) | (w6 << 10)) & mask; + out[23] = (w6 >> 7) & mask; + out[24] = (w6 >> 24) & mask; + out[25] = (w6 >> 41) & mask; + out[26] = ((w6 >> 58) | (w7 << 6)) & mask; + out[27] = (w7 >> 11) & mask; + out[28] = (w7 >> 28) & mask; + out[29] = (w7 >> 45) & mask; + out[30] = ((w7 >> 62) | (w8 << 2)) & mask; + out[31] = (w8 >> 15) & mask; + + return in; +} + +inline const uint8_t* unpack18_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 262143ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 18) & mask; + out[2] = (w0 >> 36) & mask; + out[3] = ((w0 >> 54) | (w1 << 10)) & mask; + out[4] = (w1 >> 8) & mask; + out[5] = (w1 >> 26) & mask; + out[6] = (w1 >> 44) & mask; + out[7] = ((w1 >> 62) | (w2 << 2)) & mask; + out[8] = (w2 >> 16) & mask; + out[9] = (w2 >> 34) & mask; + out[10] = ((w2 >> 52) | (w3 << 12)) & mask; + out[11] = (w3 >> 6) & mask; + out[12] = (w3 >> 24) & mask; + out[13] = (w3 >> 42) & mask; + out[14] = ((w3 >> 60) | (w4 << 4)) & mask; + out[15] = (w4 >> 14) & mask; + out[16] = (w4 >> 32) & mask; + out[17] = ((w4 >> 50) | (w5 << 14)) & mask; + out[18] = (w5 >> 4) & mask; + out[19] = (w5 >> 22) & mask; + out[20] = (w5 >> 40) & mask; + out[21] = ((w5 >> 58) | (w6 << 6)) & mask; + out[22] = (w6 >> 12) & mask; + out[23] = (w6 >> 30) & mask; + out[24] = ((w6 >> 48) | (w7 << 16)) & mask; + out[25] = (w7 >> 2) & mask; + out[26] = (w7 >> 20) & mask; + out[27] = (w7 >> 38) & mask; + out[28] = ((w7 >> 56) | (w8 << 8)) & mask; + out[29] = (w8 >> 10) & mask; + out[30] = (w8 >> 28) & mask; + out[31] = w8 >> 46; + + return in; +} + +inline const uint8_t* unpack19_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 524287ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 19) & mask; + out[2] = (w0 >> 38) & mask; + out[3] = ((w0 >> 57) | (w1 << 7)) & mask; + out[4] = (w1 >> 12) & mask; + out[5] = (w1 >> 31) & mask; + out[6] = ((w1 >> 50) | (w2 << 14)) & mask; + out[7] = (w2 >> 5) & mask; + out[8] = (w2 >> 24) & mask; + out[9] = (w2 >> 43) & mask; + out[10] = ((w2 >> 62) | (w3 << 2)) & mask; + out[11] = (w3 >> 17) & mask; + out[12] = (w3 >> 36) & mask; + out[13] = ((w3 >> 55) | (w4 << 9)) & mask; + out[14] = (w4 >> 10) & mask; + out[15] = (w4 >> 29) & mask; + out[16] = ((w4 >> 48) | (w5 << 16)) & mask; + out[17] = (w5 >> 3) & mask; + out[18] = (w5 >> 22) & mask; + out[19] = (w5 >> 41) & mask; + out[20] = ((w5 >> 60) | (w6 << 4)) & mask; + out[21] = (w6 >> 15) & mask; + out[22] = (w6 >> 34) & mask; + out[23] = ((w6 >> 53) | (w7 << 11)) & mask; + out[24] = (w7 >> 8) & mask; + out[25] = (w7 >> 27) & mask; + out[26] = ((w7 >> 46) | (w8 << 18)) & mask; + out[27] = (w8 >> 1) & mask; + out[28] = (w8 >> 20) & mask; + out[29] = (w8 >> 39) & mask; + out[30] = ((w8 >> 58) | (w9 << 6)) & mask; + out[31] = (w9 >> 13) & mask; + + return in; +} + +inline const uint8_t* unpack20_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1048575ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 20) & mask; + out[2] = (w0 >> 40) & mask; + out[3] = ((w0 >> 60) | (w1 << 4)) & mask; + out[4] = (w1 >> 16) & mask; + out[5] = (w1 >> 36) & mask; + out[6] = ((w1 >> 56) | (w2 << 8)) & mask; + out[7] = (w2 >> 12) & mask; + out[8] = (w2 >> 32) & mask; + out[9] = ((w2 >> 52) | (w3 << 12)) & mask; + out[10] = (w3 >> 8) & mask; + out[11] = (w3 >> 28) & mask; + out[12] = ((w3 >> 48) | (w4 << 16)) & mask; + out[13] = (w4 >> 4) & mask; + out[14] = (w4 >> 24) & mask; + out[15] = w4 >> 44; + out[16] = (w5)&mask; + out[17] = (w5 >> 20) & mask; + out[18] = (w5 >> 40) & mask; + out[19] = ((w5 >> 60) | (w6 << 4)) & mask; + out[20] = (w6 >> 16) & mask; + out[21] = (w6 >> 36) & mask; + out[22] = ((w6 >> 56) | (w7 << 8)) & mask; + out[23] = (w7 >> 12) & mask; + out[24] = (w7 >> 32) & mask; + out[25] = ((w7 >> 52) | (w8 << 12)) & mask; + out[26] = (w8 >> 8) & mask; + out[27] = (w8 >> 28) & mask; + out[28] = ((w8 >> 48) | (w9 << 16)) & mask; + out[29] = (w9 >> 4) & mask; + out[30] = (w9 >> 24) & mask; + out[31] = w9 >> 44; + + return in; +} + +inline const uint8_t* unpack21_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2097151ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 21) & mask; + out[2] = (w0 >> 42) & mask; + out[3] = ((w0 >> 63) | (w1 << 1)) & mask; + out[4] = (w1 >> 20) & mask; + out[5] = (w1 >> 41) & mask; + out[6] = ((w1 >> 62) | (w2 << 2)) & mask; + out[7] = (w2 >> 19) & mask; + out[8] = (w2 >> 40) & mask; + out[9] = ((w2 >> 61) | (w3 << 3)) & mask; + out[10] = (w3 >> 18) & mask; + out[11] = (w3 >> 39) & mask; + out[12] = ((w3 >> 60) | (w4 << 4)) & mask; + out[13] = (w4 >> 17) & mask; + out[14] = (w4 >> 38) & mask; + out[15] = ((w4 >> 59) | (w5 << 5)) & mask; + out[16] = (w5 >> 16) & mask; + out[17] = (w5 >> 37) & mask; + out[18] = ((w5 >> 58) | (w6 << 6)) & mask; + out[19] = (w6 >> 15) & mask; + out[20] = (w6 >> 36) & mask; + out[21] = ((w6 >> 57) | (w7 << 7)) & mask; + out[22] = (w7 >> 14) & mask; + out[23] = (w7 >> 35) & mask; + out[24] = ((w7 >> 56) | (w8 << 8)) & mask; + out[25] = (w8 >> 13) & mask; + out[26] = (w8 >> 34) & mask; + out[27] = ((w8 >> 55) | (w9 << 9)) & mask; + out[28] = (w9 >> 12) & mask; + out[29] = (w9 >> 33) & mask; + out[30] = ((w9 >> 54) | (w10 << 10)) & mask; + out[31] = (w10 >> 11) & mask; + + return in; +} + +inline const uint8_t* unpack22_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4194303ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 22) & mask; + out[2] = ((w0 >> 44) | (w1 << 20)) & mask; + out[3] = (w1 >> 2) & mask; + out[4] = (w1 >> 24) & mask; + out[5] = ((w1 >> 46) | (w2 << 18)) & mask; + out[6] = (w2 >> 4) & mask; + out[7] = (w2 >> 26) & mask; + out[8] = ((w2 >> 48) | (w3 << 16)) & mask; + out[9] = (w3 >> 6) & mask; + out[10] = (w3 >> 28) & mask; + out[11] = ((w3 >> 50) | (w4 << 14)) & mask; + out[12] = (w4 >> 8) & mask; + out[13] = (w4 >> 30) & mask; + out[14] = ((w4 >> 52) | (w5 << 12)) & mask; + out[15] = (w5 >> 10) & mask; + out[16] = (w5 >> 32) & mask; + out[17] = ((w5 >> 54) | (w6 << 10)) & mask; + out[18] = (w6 >> 12) & mask; + out[19] = (w6 >> 34) & mask; + out[20] = ((w6 >> 56) | (w7 << 8)) & mask; + out[21] = (w7 >> 14) & mask; + out[22] = (w7 >> 36) & mask; + out[23] = ((w7 >> 58) | (w8 << 6)) & mask; + out[24] = (w8 >> 16) & mask; + out[25] = (w8 >> 38) & mask; + out[26] = ((w8 >> 60) | (w9 << 4)) & mask; + out[27] = (w9 >> 18) & mask; + out[28] = (w9 >> 40) & mask; + out[29] = ((w9 >> 62) | (w10 << 2)) & mask; + out[30] = (w10 >> 20) & mask; + out[31] = w10 >> 42; + + return in; +} + +inline const uint8_t* unpack23_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8388607ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 23) & mask; + out[2] = ((w0 >> 46) | (w1 << 18)) & mask; + out[3] = (w1 >> 5) & mask; + out[4] = (w1 >> 28) & mask; + out[5] = ((w1 >> 51) | (w2 << 13)) & mask; + out[6] = (w2 >> 10) & mask; + out[7] = (w2 >> 33) & mask; + out[8] = ((w2 >> 56) | (w3 << 8)) & mask; + out[9] = (w3 >> 15) & mask; + out[10] = (w3 >> 38) & mask; + out[11] = ((w3 >> 61) | (w4 << 3)) & mask; + out[12] = (w4 >> 20) & mask; + out[13] = ((w4 >> 43) | (w5 << 21)) & mask; + out[14] = (w5 >> 2) & mask; + out[15] = (w5 >> 25) & mask; + out[16] = ((w5 >> 48) | (w6 << 16)) & mask; + out[17] = (w6 >> 7) & mask; + out[18] = (w6 >> 30) & mask; + out[19] = ((w6 >> 53) | (w7 << 11)) & mask; + out[20] = (w7 >> 12) & mask; + out[21] = (w7 >> 35) & mask; + out[22] = ((w7 >> 58) | (w8 << 6)) & mask; + out[23] = (w8 >> 17) & mask; + out[24] = (w8 >> 40) & mask; + out[25] = ((w8 >> 63) | (w9 << 1)) & mask; + out[26] = (w9 >> 22) & mask; + out[27] = ((w9 >> 45) | (w10 << 19)) & mask; + out[28] = (w10 >> 4) & mask; + out[29] = (w10 >> 27) & mask; + out[30] = ((w10 >> 50) | (w11 << 14)) & mask; + out[31] = (w11 >> 9) & mask; + + return in; +} + +inline const uint8_t* unpack24_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 16777215ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 24) & mask; + out[2] = ((w0 >> 48) | (w1 << 16)) & mask; + out[3] = (w1 >> 8) & mask; + out[4] = (w1 >> 32) & mask; + out[5] = ((w1 >> 56) | (w2 << 8)) & mask; + out[6] = (w2 >> 16) & mask; + out[7] = w2 >> 40; + out[8] = (w3)&mask; + out[9] = (w3 >> 24) & mask; + out[10] = ((w3 >> 48) | (w4 << 16)) & mask; + out[11] = (w4 >> 8) & mask; + out[12] = (w4 >> 32) & mask; + out[13] = ((w4 >> 56) | (w5 << 8)) & mask; + out[14] = (w5 >> 16) & mask; + out[15] = w5 >> 40; + out[16] = (w6)&mask; + out[17] = (w6 >> 24) & mask; + out[18] = ((w6 >> 48) | (w7 << 16)) & mask; + out[19] = (w7 >> 8) & mask; + out[20] = (w7 >> 32) & mask; + out[21] = ((w7 >> 56) | (w8 << 8)) & mask; + out[22] = (w8 >> 16) & mask; + out[23] = w8 >> 40; + out[24] = (w9)&mask; + out[25] = (w9 >> 24) & mask; + out[26] = ((w9 >> 48) | (w10 << 16)) & mask; + out[27] = (w10 >> 8) & mask; + out[28] = (w10 >> 32) & mask; + out[29] = ((w10 >> 56) | (w11 << 8)) & mask; + out[30] = (w11 >> 16) & mask; + out[31] = w11 >> 40; + + return in; +} + +inline const uint8_t* unpack25_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 33554431ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 25) & mask; + out[2] = ((w0 >> 50) | (w1 << 14)) & mask; + out[3] = (w1 >> 11) & mask; + out[4] = (w1 >> 36) & mask; + out[5] = ((w1 >> 61) | (w2 << 3)) & mask; + out[6] = (w2 >> 22) & mask; + out[7] = ((w2 >> 47) | (w3 << 17)) & mask; + out[8] = (w3 >> 8) & mask; + out[9] = (w3 >> 33) & mask; + out[10] = ((w3 >> 58) | (w4 << 6)) & mask; + out[11] = (w4 >> 19) & mask; + out[12] = ((w4 >> 44) | (w5 << 20)) & mask; + out[13] = (w5 >> 5) & mask; + out[14] = (w5 >> 30) & mask; + out[15] = ((w5 >> 55) | (w6 << 9)) & mask; + out[16] = (w6 >> 16) & mask; + out[17] = ((w6 >> 41) | (w7 << 23)) & mask; + out[18] = (w7 >> 2) & mask; + out[19] = (w7 >> 27) & mask; + out[20] = ((w7 >> 52) | (w8 << 12)) & mask; + out[21] = (w8 >> 13) & mask; + out[22] = (w8 >> 38) & mask; + out[23] = ((w8 >> 63) | (w9 << 1)) & mask; + out[24] = (w9 >> 24) & mask; + out[25] = ((w9 >> 49) | (w10 << 15)) & mask; + out[26] = (w10 >> 10) & mask; + out[27] = (w10 >> 35) & mask; + out[28] = ((w10 >> 60) | (w11 << 4)) & mask; + out[29] = (w11 >> 21) & mask; + out[30] = ((w11 >> 46) | (w12 << 18)) & mask; + out[31] = (w12 >> 7) & mask; + + return in; +} + +inline const uint8_t* unpack26_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 67108863ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 26) & mask; + out[2] = ((w0 >> 52) | (w1 << 12)) & mask; + out[3] = (w1 >> 14) & mask; + out[4] = ((w1 >> 40) | (w2 << 24)) & mask; + out[5] = (w2 >> 2) & mask; + out[6] = (w2 >> 28) & mask; + out[7] = ((w2 >> 54) | (w3 << 10)) & mask; + out[8] = (w3 >> 16) & mask; + out[9] = ((w3 >> 42) | (w4 << 22)) & mask; + out[10] = (w4 >> 4) & mask; + out[11] = (w4 >> 30) & mask; + out[12] = ((w4 >> 56) | (w5 << 8)) & mask; + out[13] = (w5 >> 18) & mask; + out[14] = ((w5 >> 44) | (w6 << 20)) & mask; + out[15] = (w6 >> 6) & mask; + out[16] = (w6 >> 32) & mask; + out[17] = ((w6 >> 58) | (w7 << 6)) & mask; + out[18] = (w7 >> 20) & mask; + out[19] = ((w7 >> 46) | (w8 << 18)) & mask; + out[20] = (w8 >> 8) & mask; + out[21] = (w8 >> 34) & mask; + out[22] = ((w8 >> 60) | (w9 << 4)) & mask; + out[23] = (w9 >> 22) & mask; + out[24] = ((w9 >> 48) | (w10 << 16)) & mask; + out[25] = (w10 >> 10) & mask; + out[26] = (w10 >> 36) & mask; + out[27] = ((w10 >> 62) | (w11 << 2)) & mask; + out[28] = (w11 >> 24) & mask; + out[29] = ((w11 >> 50) | (w12 << 14)) & mask; + out[30] = (w12 >> 12) & mask; + out[31] = w12 >> 38; + + return in; +} + +inline const uint8_t* unpack27_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 134217727ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 27) & mask; + out[2] = ((w0 >> 54) | (w1 << 10)) & mask; + out[3] = (w1 >> 17) & mask; + out[4] = ((w1 >> 44) | (w2 << 20)) & mask; + out[5] = (w2 >> 7) & mask; + out[6] = (w2 >> 34) & mask; + out[7] = ((w2 >> 61) | (w3 << 3)) & mask; + out[8] = (w3 >> 24) & mask; + out[9] = ((w3 >> 51) | (w4 << 13)) & mask; + out[10] = (w4 >> 14) & mask; + out[11] = ((w4 >> 41) | (w5 << 23)) & mask; + out[12] = (w5 >> 4) & mask; + out[13] = (w5 >> 31) & mask; + out[14] = ((w5 >> 58) | (w6 << 6)) & mask; + out[15] = (w6 >> 21) & mask; + out[16] = ((w6 >> 48) | (w7 << 16)) & mask; + out[17] = (w7 >> 11) & mask; + out[18] = ((w7 >> 38) | (w8 << 26)) & mask; + out[19] = (w8 >> 1) & mask; + out[20] = (w8 >> 28) & mask; + out[21] = ((w8 >> 55) | (w9 << 9)) & mask; + out[22] = (w9 >> 18) & mask; + out[23] = ((w9 >> 45) | (w10 << 19)) & mask; + out[24] = (w10 >> 8) & mask; + out[25] = (w10 >> 35) & mask; + out[26] = ((w10 >> 62) | (w11 << 2)) & mask; + out[27] = (w11 >> 25) & mask; + out[28] = ((w11 >> 52) | (w12 << 12)) & mask; + out[29] = (w12 >> 15) & mask; + out[30] = ((w12 >> 42) | (w13 << 22)) & mask; + out[31] = (w13 >> 5) & mask; + + return in; +} + +inline const uint8_t* unpack28_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 268435455ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 28) & mask; + out[2] = ((w0 >> 56) | (w1 << 8)) & mask; + out[3] = (w1 >> 20) & mask; + out[4] = ((w1 >> 48) | (w2 << 16)) & mask; + out[5] = (w2 >> 12) & mask; + out[6] = ((w2 >> 40) | (w3 << 24)) & mask; + out[7] = (w3 >> 4) & mask; + out[8] = (w3 >> 32) & mask; + out[9] = ((w3 >> 60) | (w4 << 4)) & mask; + out[10] = (w4 >> 24) & mask; + out[11] = ((w4 >> 52) | (w5 << 12)) & mask; + out[12] = (w5 >> 16) & mask; + out[13] = ((w5 >> 44) | (w6 << 20)) & mask; + out[14] = (w6 >> 8) & mask; + out[15] = w6 >> 36; + out[16] = (w7)&mask; + out[17] = (w7 >> 28) & mask; + out[18] = ((w7 >> 56) | (w8 << 8)) & mask; + out[19] = (w8 >> 20) & mask; + out[20] = ((w8 >> 48) | (w9 << 16)) & mask; + out[21] = (w9 >> 12) & mask; + out[22] = ((w9 >> 40) | (w10 << 24)) & mask; + out[23] = (w10 >> 4) & mask; + out[24] = (w10 >> 32) & mask; + out[25] = ((w10 >> 60) | (w11 << 4)) & mask; + out[26] = (w11 >> 24) & mask; + out[27] = ((w11 >> 52) | (w12 << 12)) & mask; + out[28] = (w12 >> 16) & mask; + out[29] = ((w12 >> 44) | (w13 << 20)) & mask; + out[30] = (w13 >> 8) & mask; + out[31] = w13 >> 36; + + return in; +} + +inline const uint8_t* unpack29_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 536870911ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 29) & mask; + out[2] = ((w0 >> 58) | (w1 << 6)) & mask; + out[3] = (w1 >> 23) & mask; + out[4] = ((w1 >> 52) | (w2 << 12)) & mask; + out[5] = (w2 >> 17) & mask; + out[6] = ((w2 >> 46) | (w3 << 18)) & mask; + out[7] = (w3 >> 11) & mask; + out[8] = ((w3 >> 40) | (w4 << 24)) & mask; + out[9] = (w4 >> 5) & mask; + out[10] = (w4 >> 34) & mask; + out[11] = ((w4 >> 63) | (w5 << 1)) & mask; + out[12] = (w5 >> 28) & mask; + out[13] = ((w5 >> 57) | (w6 << 7)) & mask; + out[14] = (w6 >> 22) & mask; + out[15] = ((w6 >> 51) | (w7 << 13)) & mask; + out[16] = (w7 >> 16) & mask; + out[17] = ((w7 >> 45) | (w8 << 19)) & mask; + out[18] = (w8 >> 10) & mask; + out[19] = ((w8 >> 39) | (w9 << 25)) & mask; + out[20] = (w9 >> 4) & mask; + out[21] = (w9 >> 33) & mask; + out[22] = ((w9 >> 62) | (w10 << 2)) & mask; + out[23] = (w10 >> 27) & mask; + out[24] = ((w10 >> 56) | (w11 << 8)) & mask; + out[25] = (w11 >> 21) & mask; + out[26] = ((w11 >> 50) | (w12 << 14)) & mask; + out[27] = (w12 >> 15) & mask; + out[28] = ((w12 >> 44) | (w13 << 20)) & mask; + out[29] = (w13 >> 9) & mask; + out[30] = ((w13 >> 38) | (w14 << 26)) & mask; + out[31] = (w14 >> 3) & mask; + + return in; +} + +inline const uint8_t* unpack30_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1073741823ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 30) & mask; + out[2] = ((w0 >> 60) | (w1 << 4)) & mask; + out[3] = (w1 >> 26) & mask; + out[4] = ((w1 >> 56) | (w2 << 8)) & mask; + out[5] = (w2 >> 22) & mask; + out[6] = ((w2 >> 52) | (w3 << 12)) & mask; + out[7] = (w3 >> 18) & mask; + out[8] = ((w3 >> 48) | (w4 << 16)) & mask; + out[9] = (w4 >> 14) & mask; + out[10] = ((w4 >> 44) | (w5 << 20)) & mask; + out[11] = (w5 >> 10) & mask; + out[12] = ((w5 >> 40) | (w6 << 24)) & mask; + out[13] = (w6 >> 6) & mask; + out[14] = ((w6 >> 36) | (w7 << 28)) & mask; + out[15] = (w7 >> 2) & mask; + out[16] = (w7 >> 32) & mask; + out[17] = ((w7 >> 62) | (w8 << 2)) & mask; + out[18] = (w8 >> 28) & mask; + out[19] = ((w8 >> 58) | (w9 << 6)) & mask; + out[20] = (w9 >> 24) & mask; + out[21] = ((w9 >> 54) | (w10 << 10)) & mask; + out[22] = (w10 >> 20) & mask; + out[23] = ((w10 >> 50) | (w11 << 14)) & mask; + out[24] = (w11 >> 16) & mask; + out[25] = ((w11 >> 46) | (w12 << 18)) & mask; + out[26] = (w12 >> 12) & mask; + out[27] = ((w12 >> 42) | (w13 << 22)) & mask; + out[28] = (w13 >> 8) & mask; + out[29] = ((w13 >> 38) | (w14 << 26)) & mask; + out[30] = (w14 >> 4) & mask; + out[31] = w14 >> 34; + + return in; +} + +inline const uint8_t* unpack31_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2147483647ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 31) & mask; + out[2] = ((w0 >> 62) | (w1 << 2)) & mask; + out[3] = (w1 >> 29) & mask; + out[4] = ((w1 >> 60) | (w2 << 4)) & mask; + out[5] = (w2 >> 27) & mask; + out[6] = ((w2 >> 58) | (w3 << 6)) & mask; + out[7] = (w3 >> 25) & mask; + out[8] = ((w3 >> 56) | (w4 << 8)) & mask; + out[9] = (w4 >> 23) & mask; + out[10] = ((w4 >> 54) | (w5 << 10)) & mask; + out[11] = (w5 >> 21) & mask; + out[12] = ((w5 >> 52) | (w6 << 12)) & mask; + out[13] = (w6 >> 19) & mask; + out[14] = ((w6 >> 50) | (w7 << 14)) & mask; + out[15] = (w7 >> 17) & mask; + out[16] = ((w7 >> 48) | (w8 << 16)) & mask; + out[17] = (w8 >> 15) & mask; + out[18] = ((w8 >> 46) | (w9 << 18)) & mask; + out[19] = (w9 >> 13) & mask; + out[20] = ((w9 >> 44) | (w10 << 20)) & mask; + out[21] = (w10 >> 11) & mask; + out[22] = ((w10 >> 42) | (w11 << 22)) & mask; + out[23] = (w11 >> 9) & mask; + out[24] = ((w11 >> 40) | (w12 << 24)) & mask; + out[25] = (w12 >> 7) & mask; + out[26] = ((w12 >> 38) | (w13 << 26)) & mask; + out[27] = (w13 >> 5) & mask; + out[28] = ((w13 >> 36) | (w14 << 28)) & mask; + out[29] = (w14 >> 3) & mask; + out[30] = ((w14 >> 34) | (w15 << 30)) & mask; + out[31] = (w15 >> 1) & mask; + + return in; +} + +inline const uint8_t* unpack32_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4294967295ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + out[0] = (w0)&mask; + out[1] = w0 >> 32; + out[2] = (w1)&mask; + out[3] = w1 >> 32; + out[4] = (w2)&mask; + out[5] = w2 >> 32; + out[6] = (w3)&mask; + out[7] = w3 >> 32; + out[8] = (w4)&mask; + out[9] = w4 >> 32; + out[10] = (w5)&mask; + out[11] = w5 >> 32; + out[12] = (w6)&mask; + out[13] = w6 >> 32; + out[14] = (w7)&mask; + out[15] = w7 >> 32; + out[16] = (w8)&mask; + out[17] = w8 >> 32; + out[18] = (w9)&mask; + out[19] = w9 >> 32; + out[20] = (w10)&mask; + out[21] = w10 >> 32; + out[22] = (w11)&mask; + out[23] = w11 >> 32; + out[24] = (w12)&mask; + out[25] = w12 >> 32; + out[26] = (w13)&mask; + out[27] = w13 >> 32; + out[28] = (w14)&mask; + out[29] = w14 >> 32; + out[30] = (w15)&mask; + out[31] = w15 >> 32; + + return in; +} + +inline const uint8_t* unpack33_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8589934591ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 33) | (w1 << 31)) & mask; + out[2] = (w1 >> 2) & mask; + out[3] = ((w1 >> 35) | (w2 << 29)) & mask; + out[4] = (w2 >> 4) & mask; + out[5] = ((w2 >> 37) | (w3 << 27)) & mask; + out[6] = (w3 >> 6) & mask; + out[7] = ((w3 >> 39) | (w4 << 25)) & mask; + out[8] = (w4 >> 8) & mask; + out[9] = ((w4 >> 41) | (w5 << 23)) & mask; + out[10] = (w5 >> 10) & mask; + out[11] = ((w5 >> 43) | (w6 << 21)) & mask; + out[12] = (w6 >> 12) & mask; + out[13] = ((w6 >> 45) | (w7 << 19)) & mask; + out[14] = (w7 >> 14) & mask; + out[15] = ((w7 >> 47) | (w8 << 17)) & mask; + out[16] = (w8 >> 16) & mask; + out[17] = ((w8 >> 49) | (w9 << 15)) & mask; + out[18] = (w9 >> 18) & mask; + out[19] = ((w9 >> 51) | (w10 << 13)) & mask; + out[20] = (w10 >> 20) & mask; + out[21] = ((w10 >> 53) | (w11 << 11)) & mask; + out[22] = (w11 >> 22) & mask; + out[23] = ((w11 >> 55) | (w12 << 9)) & mask; + out[24] = (w12 >> 24) & mask; + out[25] = ((w12 >> 57) | (w13 << 7)) & mask; + out[26] = (w13 >> 26) & mask; + out[27] = ((w13 >> 59) | (w14 << 5)) & mask; + out[28] = (w14 >> 28) & mask; + out[29] = ((w14 >> 61) | (w15 << 3)) & mask; + out[30] = (w15 >> 30) & mask; + out[31] = ((w15 >> 63) | (w16 << 1)) & mask; + + return in; +} + +inline const uint8_t* unpack34_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 17179869183ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 34) | (w1 << 30)) & mask; + out[2] = (w1 >> 4) & mask; + out[3] = ((w1 >> 38) | (w2 << 26)) & mask; + out[4] = (w2 >> 8) & mask; + out[5] = ((w2 >> 42) | (w3 << 22)) & mask; + out[6] = (w3 >> 12) & mask; + out[7] = ((w3 >> 46) | (w4 << 18)) & mask; + out[8] = (w4 >> 16) & mask; + out[9] = ((w4 >> 50) | (w5 << 14)) & mask; + out[10] = (w5 >> 20) & mask; + out[11] = ((w5 >> 54) | (w6 << 10)) & mask; + out[12] = (w6 >> 24) & mask; + out[13] = ((w6 >> 58) | (w7 << 6)) & mask; + out[14] = (w7 >> 28) & mask; + out[15] = ((w7 >> 62) | (w8 << 2)) & mask; + out[16] = ((w8 >> 32) | (w9 << 32)) & mask; + out[17] = (w9 >> 2) & mask; + out[18] = ((w9 >> 36) | (w10 << 28)) & mask; + out[19] = (w10 >> 6) & mask; + out[20] = ((w10 >> 40) | (w11 << 24)) & mask; + out[21] = (w11 >> 10) & mask; + out[22] = ((w11 >> 44) | (w12 << 20)) & mask; + out[23] = (w12 >> 14) & mask; + out[24] = ((w12 >> 48) | (w13 << 16)) & mask; + out[25] = (w13 >> 18) & mask; + out[26] = ((w13 >> 52) | (w14 << 12)) & mask; + out[27] = (w14 >> 22) & mask; + out[28] = ((w14 >> 56) | (w15 << 8)) & mask; + out[29] = (w15 >> 26) & mask; + out[30] = ((w15 >> 60) | (w16 << 4)) & mask; + out[31] = w16 >> 30; + + return in; +} + +inline const uint8_t* unpack35_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 34359738367ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 35) | (w1 << 29)) & mask; + out[2] = (w1 >> 6) & mask; + out[3] = ((w1 >> 41) | (w2 << 23)) & mask; + out[4] = (w2 >> 12) & mask; + out[5] = ((w2 >> 47) | (w3 << 17)) & mask; + out[6] = (w3 >> 18) & mask; + out[7] = ((w3 >> 53) | (w4 << 11)) & mask; + out[8] = (w4 >> 24) & mask; + out[9] = ((w4 >> 59) | (w5 << 5)) & mask; + out[10] = ((w5 >> 30) | (w6 << 34)) & mask; + out[11] = (w6 >> 1) & mask; + out[12] = ((w6 >> 36) | (w7 << 28)) & mask; + out[13] = (w7 >> 7) & mask; + out[14] = ((w7 >> 42) | (w8 << 22)) & mask; + out[15] = (w8 >> 13) & mask; + out[16] = ((w8 >> 48) | (w9 << 16)) & mask; + out[17] = (w9 >> 19) & mask; + out[18] = ((w9 >> 54) | (w10 << 10)) & mask; + out[19] = (w10 >> 25) & mask; + out[20] = ((w10 >> 60) | (w11 << 4)) & mask; + out[21] = ((w11 >> 31) | (w12 << 33)) & mask; + out[22] = (w12 >> 2) & mask; + out[23] = ((w12 >> 37) | (w13 << 27)) & mask; + out[24] = (w13 >> 8) & mask; + out[25] = ((w13 >> 43) | (w14 << 21)) & mask; + out[26] = (w14 >> 14) & mask; + out[27] = ((w14 >> 49) | (w15 << 15)) & mask; + out[28] = (w15 >> 20) & mask; + out[29] = ((w15 >> 55) | (w16 << 9)) & mask; + out[30] = (w16 >> 26) & mask; + out[31] = ((w16 >> 61) | (w17 << 3)) & mask; + + return in; +} + +inline const uint8_t* unpack36_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 68719476735ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 36) | (w1 << 28)) & mask; + out[2] = (w1 >> 8) & mask; + out[3] = ((w1 >> 44) | (w2 << 20)) & mask; + out[4] = (w2 >> 16) & mask; + out[5] = ((w2 >> 52) | (w3 << 12)) & mask; + out[6] = (w3 >> 24) & mask; + out[7] = ((w3 >> 60) | (w4 << 4)) & mask; + out[8] = ((w4 >> 32) | (w5 << 32)) & mask; + out[9] = (w5 >> 4) & mask; + out[10] = ((w5 >> 40) | (w6 << 24)) & mask; + out[11] = (w6 >> 12) & mask; + out[12] = ((w6 >> 48) | (w7 << 16)) & mask; + out[13] = (w7 >> 20) & mask; + out[14] = ((w7 >> 56) | (w8 << 8)) & mask; + out[15] = w8 >> 28; + out[16] = (w9)&mask; + out[17] = ((w9 >> 36) | (w10 << 28)) & mask; + out[18] = (w10 >> 8) & mask; + out[19] = ((w10 >> 44) | (w11 << 20)) & mask; + out[20] = (w11 >> 16) & mask; + out[21] = ((w11 >> 52) | (w12 << 12)) & mask; + out[22] = (w12 >> 24) & mask; + out[23] = ((w12 >> 60) | (w13 << 4)) & mask; + out[24] = ((w13 >> 32) | (w14 << 32)) & mask; + out[25] = (w14 >> 4) & mask; + out[26] = ((w14 >> 40) | (w15 << 24)) & mask; + out[27] = (w15 >> 12) & mask; + out[28] = ((w15 >> 48) | (w16 << 16)) & mask; + out[29] = (w16 >> 20) & mask; + out[30] = ((w16 >> 56) | (w17 << 8)) & mask; + out[31] = w17 >> 28; + + return in; +} + +inline const uint8_t* unpack37_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 137438953471ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 37) | (w1 << 27)) & mask; + out[2] = (w1 >> 10) & mask; + out[3] = ((w1 >> 47) | (w2 << 17)) & mask; + out[4] = (w2 >> 20) & mask; + out[5] = ((w2 >> 57) | (w3 << 7)) & mask; + out[6] = ((w3 >> 30) | (w4 << 34)) & mask; + out[7] = (w4 >> 3) & mask; + out[8] = ((w4 >> 40) | (w5 << 24)) & mask; + out[9] = (w5 >> 13) & mask; + out[10] = ((w5 >> 50) | (w6 << 14)) & mask; + out[11] = (w6 >> 23) & mask; + out[12] = ((w6 >> 60) | (w7 << 4)) & mask; + out[13] = ((w7 >> 33) | (w8 << 31)) & mask; + out[14] = (w8 >> 6) & mask; + out[15] = ((w8 >> 43) | (w9 << 21)) & mask; + out[16] = (w9 >> 16) & mask; + out[17] = ((w9 >> 53) | (w10 << 11)) & mask; + out[18] = (w10 >> 26) & mask; + out[19] = ((w10 >> 63) | (w11 << 1)) & mask; + out[20] = ((w11 >> 36) | (w12 << 28)) & mask; + out[21] = (w12 >> 9) & mask; + out[22] = ((w12 >> 46) | (w13 << 18)) & mask; + out[23] = (w13 >> 19) & mask; + out[24] = ((w13 >> 56) | (w14 << 8)) & mask; + out[25] = ((w14 >> 29) | (w15 << 35)) & mask; + out[26] = (w15 >> 2) & mask; + out[27] = ((w15 >> 39) | (w16 << 25)) & mask; + out[28] = (w16 >> 12) & mask; + out[29] = ((w16 >> 49) | (w17 << 15)) & mask; + out[30] = (w17 >> 22) & mask; + out[31] = ((w17 >> 59) | (w18 << 5)) & mask; + + return in; +} + +inline const uint8_t* unpack38_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 274877906943ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 38) | (w1 << 26)) & mask; + out[2] = (w1 >> 12) & mask; + out[3] = ((w1 >> 50) | (w2 << 14)) & mask; + out[4] = (w2 >> 24) & mask; + out[5] = ((w2 >> 62) | (w3 << 2)) & mask; + out[6] = ((w3 >> 36) | (w4 << 28)) & mask; + out[7] = (w4 >> 10) & mask; + out[8] = ((w4 >> 48) | (w5 << 16)) & mask; + out[9] = (w5 >> 22) & mask; + out[10] = ((w5 >> 60) | (w6 << 4)) & mask; + out[11] = ((w6 >> 34) | (w7 << 30)) & mask; + out[12] = (w7 >> 8) & mask; + out[13] = ((w7 >> 46) | (w8 << 18)) & mask; + out[14] = (w8 >> 20) & mask; + out[15] = ((w8 >> 58) | (w9 << 6)) & mask; + out[16] = ((w9 >> 32) | (w10 << 32)) & mask; + out[17] = (w10 >> 6) & mask; + out[18] = ((w10 >> 44) | (w11 << 20)) & mask; + out[19] = (w11 >> 18) & mask; + out[20] = ((w11 >> 56) | (w12 << 8)) & mask; + out[21] = ((w12 >> 30) | (w13 << 34)) & mask; + out[22] = (w13 >> 4) & mask; + out[23] = ((w13 >> 42) | (w14 << 22)) & mask; + out[24] = (w14 >> 16) & mask; + out[25] = ((w14 >> 54) | (w15 << 10)) & mask; + out[26] = ((w15 >> 28) | (w16 << 36)) & mask; + out[27] = (w16 >> 2) & mask; + out[28] = ((w16 >> 40) | (w17 << 24)) & mask; + out[29] = (w17 >> 14) & mask; + out[30] = ((w17 >> 52) | (w18 << 12)) & mask; + out[31] = w18 >> 26; + + return in; +} + +inline const uint8_t* unpack39_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 549755813887ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 39) | (w1 << 25)) & mask; + out[2] = (w1 >> 14) & mask; + out[3] = ((w1 >> 53) | (w2 << 11)) & mask; + out[4] = ((w2 >> 28) | (w3 << 36)) & mask; + out[5] = (w3 >> 3) & mask; + out[6] = ((w3 >> 42) | (w4 << 22)) & mask; + out[7] = (w4 >> 17) & mask; + out[8] = ((w4 >> 56) | (w5 << 8)) & mask; + out[9] = ((w5 >> 31) | (w6 << 33)) & mask; + out[10] = (w6 >> 6) & mask; + out[11] = ((w6 >> 45) | (w7 << 19)) & mask; + out[12] = (w7 >> 20) & mask; + out[13] = ((w7 >> 59) | (w8 << 5)) & mask; + out[14] = ((w8 >> 34) | (w9 << 30)) & mask; + out[15] = (w9 >> 9) & mask; + out[16] = ((w9 >> 48) | (w10 << 16)) & mask; + out[17] = (w10 >> 23) & mask; + out[18] = ((w10 >> 62) | (w11 << 2)) & mask; + out[19] = ((w11 >> 37) | (w12 << 27)) & mask; + out[20] = (w12 >> 12) & mask; + out[21] = ((w12 >> 51) | (w13 << 13)) & mask; + out[22] = ((w13 >> 26) | (w14 << 38)) & mask; + out[23] = (w14 >> 1) & mask; + out[24] = ((w14 >> 40) | (w15 << 24)) & mask; + out[25] = (w15 >> 15) & mask; + out[26] = ((w15 >> 54) | (w16 << 10)) & mask; + out[27] = ((w16 >> 29) | (w17 << 35)) & mask; + out[28] = (w17 >> 4) & mask; + out[29] = ((w17 >> 43) | (w18 << 21)) & mask; + out[30] = (w18 >> 18) & mask; + out[31] = ((w18 >> 57) | (w19 << 7)) & mask; + + return in; +} + +inline const uint8_t* unpack40_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1099511627775ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 40) | (w1 << 24)) & mask; + out[2] = (w1 >> 16) & mask; + out[3] = ((w1 >> 56) | (w2 << 8)) & mask; + out[4] = ((w2 >> 32) | (w3 << 32)) & mask; + out[5] = (w3 >> 8) & mask; + out[6] = ((w3 >> 48) | (w4 << 16)) & mask; + out[7] = w4 >> 24; + out[8] = (w5)&mask; + out[9] = ((w5 >> 40) | (w6 << 24)) & mask; + out[10] = (w6 >> 16) & mask; + out[11] = ((w6 >> 56) | (w7 << 8)) & mask; + out[12] = ((w7 >> 32) | (w8 << 32)) & mask; + out[13] = (w8 >> 8) & mask; + out[14] = ((w8 >> 48) | (w9 << 16)) & mask; + out[15] = w9 >> 24; + out[16] = (w10)&mask; + out[17] = ((w10 >> 40) | (w11 << 24)) & mask; + out[18] = (w11 >> 16) & mask; + out[19] = ((w11 >> 56) | (w12 << 8)) & mask; + out[20] = ((w12 >> 32) | (w13 << 32)) & mask; + out[21] = (w13 >> 8) & mask; + out[22] = ((w13 >> 48) | (w14 << 16)) & mask; + out[23] = w14 >> 24; + out[24] = (w15)&mask; + out[25] = ((w15 >> 40) | (w16 << 24)) & mask; + out[26] = (w16 >> 16) & mask; + out[27] = ((w16 >> 56) | (w17 << 8)) & mask; + out[28] = ((w17 >> 32) | (w18 << 32)) & mask; + out[29] = (w18 >> 8) & mask; + out[30] = ((w18 >> 48) | (w19 << 16)) & mask; + out[31] = w19 >> 24; + + return in; +} + +inline const uint8_t* unpack41_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2199023255551ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 41) | (w1 << 23)) & mask; + out[2] = (w1 >> 18) & mask; + out[3] = ((w1 >> 59) | (w2 << 5)) & mask; + out[4] = ((w2 >> 36) | (w3 << 28)) & mask; + out[5] = (w3 >> 13) & mask; + out[6] = ((w3 >> 54) | (w4 << 10)) & mask; + out[7] = ((w4 >> 31) | (w5 << 33)) & mask; + out[8] = (w5 >> 8) & mask; + out[9] = ((w5 >> 49) | (w6 << 15)) & mask; + out[10] = ((w6 >> 26) | (w7 << 38)) & mask; + out[11] = (w7 >> 3) & mask; + out[12] = ((w7 >> 44) | (w8 << 20)) & mask; + out[13] = (w8 >> 21) & mask; + out[14] = ((w8 >> 62) | (w9 << 2)) & mask; + out[15] = ((w9 >> 39) | (w10 << 25)) & mask; + out[16] = (w10 >> 16) & mask; + out[17] = ((w10 >> 57) | (w11 << 7)) & mask; + out[18] = ((w11 >> 34) | (w12 << 30)) & mask; + out[19] = (w12 >> 11) & mask; + out[20] = ((w12 >> 52) | (w13 << 12)) & mask; + out[21] = ((w13 >> 29) | (w14 << 35)) & mask; + out[22] = (w14 >> 6) & mask; + out[23] = ((w14 >> 47) | (w15 << 17)) & mask; + out[24] = ((w15 >> 24) | (w16 << 40)) & mask; + out[25] = (w16 >> 1) & mask; + out[26] = ((w16 >> 42) | (w17 << 22)) & mask; + out[27] = (w17 >> 19) & mask; + out[28] = ((w17 >> 60) | (w18 << 4)) & mask; + out[29] = ((w18 >> 37) | (w19 << 27)) & mask; + out[30] = (w19 >> 14) & mask; + out[31] = ((w19 >> 55) | (w20 << 9)) & mask; + + return in; +} + +inline const uint8_t* unpack42_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4398046511103ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 42) | (w1 << 22)) & mask; + out[2] = (w1 >> 20) & mask; + out[3] = ((w1 >> 62) | (w2 << 2)) & mask; + out[4] = ((w2 >> 40) | (w3 << 24)) & mask; + out[5] = (w3 >> 18) & mask; + out[6] = ((w3 >> 60) | (w4 << 4)) & mask; + out[7] = ((w4 >> 38) | (w5 << 26)) & mask; + out[8] = (w5 >> 16) & mask; + out[9] = ((w5 >> 58) | (w6 << 6)) & mask; + out[10] = ((w6 >> 36) | (w7 << 28)) & mask; + out[11] = (w7 >> 14) & mask; + out[12] = ((w7 >> 56) | (w8 << 8)) & mask; + out[13] = ((w8 >> 34) | (w9 << 30)) & mask; + out[14] = (w9 >> 12) & mask; + out[15] = ((w9 >> 54) | (w10 << 10)) & mask; + out[16] = ((w10 >> 32) | (w11 << 32)) & mask; + out[17] = (w11 >> 10) & mask; + out[18] = ((w11 >> 52) | (w12 << 12)) & mask; + out[19] = ((w12 >> 30) | (w13 << 34)) & mask; + out[20] = (w13 >> 8) & mask; + out[21] = ((w13 >> 50) | (w14 << 14)) & mask; + out[22] = ((w14 >> 28) | (w15 << 36)) & mask; + out[23] = (w15 >> 6) & mask; + out[24] = ((w15 >> 48) | (w16 << 16)) & mask; + out[25] = ((w16 >> 26) | (w17 << 38)) & mask; + out[26] = (w17 >> 4) & mask; + out[27] = ((w17 >> 46) | (w18 << 18)) & mask; + out[28] = ((w18 >> 24) | (w19 << 40)) & mask; + out[29] = (w19 >> 2) & mask; + out[30] = ((w19 >> 44) | (w20 << 20)) & mask; + out[31] = w20 >> 22; + + return in; +} + +inline const uint8_t* unpack43_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8796093022207ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 43) | (w1 << 21)) & mask; + out[2] = ((w1 >> 22) | (w2 << 42)) & mask; + out[3] = (w2 >> 1) & mask; + out[4] = ((w2 >> 44) | (w3 << 20)) & mask; + out[5] = ((w3 >> 23) | (w4 << 41)) & mask; + out[6] = (w4 >> 2) & mask; + out[7] = ((w4 >> 45) | (w5 << 19)) & mask; + out[8] = ((w5 >> 24) | (w6 << 40)) & mask; + out[9] = (w6 >> 3) & mask; + out[10] = ((w6 >> 46) | (w7 << 18)) & mask; + out[11] = ((w7 >> 25) | (w8 << 39)) & mask; + out[12] = (w8 >> 4) & mask; + out[13] = ((w8 >> 47) | (w9 << 17)) & mask; + out[14] = ((w9 >> 26) | (w10 << 38)) & mask; + out[15] = (w10 >> 5) & mask; + out[16] = ((w10 >> 48) | (w11 << 16)) & mask; + out[17] = ((w11 >> 27) | (w12 << 37)) & mask; + out[18] = (w12 >> 6) & mask; + out[19] = ((w12 >> 49) | (w13 << 15)) & mask; + out[20] = ((w13 >> 28) | (w14 << 36)) & mask; + out[21] = (w14 >> 7) & mask; + out[22] = ((w14 >> 50) | (w15 << 14)) & mask; + out[23] = ((w15 >> 29) | (w16 << 35)) & mask; + out[24] = (w16 >> 8) & mask; + out[25] = ((w16 >> 51) | (w17 << 13)) & mask; + out[26] = ((w17 >> 30) | (w18 << 34)) & mask; + out[27] = (w18 >> 9) & mask; + out[28] = ((w18 >> 52) | (w19 << 12)) & mask; + out[29] = ((w19 >> 31) | (w20 << 33)) & mask; + out[30] = (w20 >> 10) & mask; + out[31] = ((w20 >> 53) | (w21 << 11)) & mask; + + return in; +} + +inline const uint8_t* unpack44_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 17592186044415ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 44) | (w1 << 20)) & mask; + out[2] = ((w1 >> 24) | (w2 << 40)) & mask; + out[3] = (w2 >> 4) & mask; + out[4] = ((w2 >> 48) | (w3 << 16)) & mask; + out[5] = ((w3 >> 28) | (w4 << 36)) & mask; + out[6] = (w4 >> 8) & mask; + out[7] = ((w4 >> 52) | (w5 << 12)) & mask; + out[8] = ((w5 >> 32) | (w6 << 32)) & mask; + out[9] = (w6 >> 12) & mask; + out[10] = ((w6 >> 56) | (w7 << 8)) & mask; + out[11] = ((w7 >> 36) | (w8 << 28)) & mask; + out[12] = (w8 >> 16) & mask; + out[13] = ((w8 >> 60) | (w9 << 4)) & mask; + out[14] = ((w9 >> 40) | (w10 << 24)) & mask; + out[15] = w10 >> 20; + out[16] = (w11)&mask; + out[17] = ((w11 >> 44) | (w12 << 20)) & mask; + out[18] = ((w12 >> 24) | (w13 << 40)) & mask; + out[19] = (w13 >> 4) & mask; + out[20] = ((w13 >> 48) | (w14 << 16)) & mask; + out[21] = ((w14 >> 28) | (w15 << 36)) & mask; + out[22] = (w15 >> 8) & mask; + out[23] = ((w15 >> 52) | (w16 << 12)) & mask; + out[24] = ((w16 >> 32) | (w17 << 32)) & mask; + out[25] = (w17 >> 12) & mask; + out[26] = ((w17 >> 56) | (w18 << 8)) & mask; + out[27] = ((w18 >> 36) | (w19 << 28)) & mask; + out[28] = (w19 >> 16) & mask; + out[29] = ((w19 >> 60) | (w20 << 4)) & mask; + out[30] = ((w20 >> 40) | (w21 << 24)) & mask; + out[31] = w21 >> 20; + + return in; +} + +inline const uint8_t* unpack45_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 35184372088831ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 45) | (w1 << 19)) & mask; + out[2] = ((w1 >> 26) | (w2 << 38)) & mask; + out[3] = (w2 >> 7) & mask; + out[4] = ((w2 >> 52) | (w3 << 12)) & mask; + out[5] = ((w3 >> 33) | (w4 << 31)) & mask; + out[6] = (w4 >> 14) & mask; + out[7] = ((w4 >> 59) | (w5 << 5)) & mask; + out[8] = ((w5 >> 40) | (w6 << 24)) & mask; + out[9] = ((w6 >> 21) | (w7 << 43)) & mask; + out[10] = (w7 >> 2) & mask; + out[11] = ((w7 >> 47) | (w8 << 17)) & mask; + out[12] = ((w8 >> 28) | (w9 << 36)) & mask; + out[13] = (w9 >> 9) & mask; + out[14] = ((w9 >> 54) | (w10 << 10)) & mask; + out[15] = ((w10 >> 35) | (w11 << 29)) & mask; + out[16] = (w11 >> 16) & mask; + out[17] = ((w11 >> 61) | (w12 << 3)) & mask; + out[18] = ((w12 >> 42) | (w13 << 22)) & mask; + out[19] = ((w13 >> 23) | (w14 << 41)) & mask; + out[20] = (w14 >> 4) & mask; + out[21] = ((w14 >> 49) | (w15 << 15)) & mask; + out[22] = ((w15 >> 30) | (w16 << 34)) & mask; + out[23] = (w16 >> 11) & mask; + out[24] = ((w16 >> 56) | (w17 << 8)) & mask; + out[25] = ((w17 >> 37) | (w18 << 27)) & mask; + out[26] = (w18 >> 18) & mask; + out[27] = ((w18 >> 63) | (w19 << 1)) & mask; + out[28] = ((w19 >> 44) | (w20 << 20)) & mask; + out[29] = ((w20 >> 25) | (w21 << 39)) & mask; + out[30] = (w21 >> 6) & mask; + out[31] = ((w21 >> 51) | (w22 << 13)) & mask; + + return in; +} + +inline const uint8_t* unpack46_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 70368744177663ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 46) | (w1 << 18)) & mask; + out[2] = ((w1 >> 28) | (w2 << 36)) & mask; + out[3] = (w2 >> 10) & mask; + out[4] = ((w2 >> 56) | (w3 << 8)) & mask; + out[5] = ((w3 >> 38) | (w4 << 26)) & mask; + out[6] = ((w4 >> 20) | (w5 << 44)) & mask; + out[7] = (w5 >> 2) & mask; + out[8] = ((w5 >> 48) | (w6 << 16)) & mask; + out[9] = ((w6 >> 30) | (w7 << 34)) & mask; + out[10] = (w7 >> 12) & mask; + out[11] = ((w7 >> 58) | (w8 << 6)) & mask; + out[12] = ((w8 >> 40) | (w9 << 24)) & mask; + out[13] = ((w9 >> 22) | (w10 << 42)) & mask; + out[14] = (w10 >> 4) & mask; + out[15] = ((w10 >> 50) | (w11 << 14)) & mask; + out[16] = ((w11 >> 32) | (w12 << 32)) & mask; + out[17] = (w12 >> 14) & mask; + out[18] = ((w12 >> 60) | (w13 << 4)) & mask; + out[19] = ((w13 >> 42) | (w14 << 22)) & mask; + out[20] = ((w14 >> 24) | (w15 << 40)) & mask; + out[21] = (w15 >> 6) & mask; + out[22] = ((w15 >> 52) | (w16 << 12)) & mask; + out[23] = ((w16 >> 34) | (w17 << 30)) & mask; + out[24] = (w17 >> 16) & mask; + out[25] = ((w17 >> 62) | (w18 << 2)) & mask; + out[26] = ((w18 >> 44) | (w19 << 20)) & mask; + out[27] = ((w19 >> 26) | (w20 << 38)) & mask; + out[28] = (w20 >> 8) & mask; + out[29] = ((w20 >> 54) | (w21 << 10)) & mask; + out[30] = ((w21 >> 36) | (w22 << 28)) & mask; + out[31] = w22 >> 18; + + return in; +} + +inline const uint8_t* unpack47_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 140737488355327ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 47) | (w1 << 17)) & mask; + out[2] = ((w1 >> 30) | (w2 << 34)) & mask; + out[3] = (w2 >> 13) & mask; + out[4] = ((w2 >> 60) | (w3 << 4)) & mask; + out[5] = ((w3 >> 43) | (w4 << 21)) & mask; + out[6] = ((w4 >> 26) | (w5 << 38)) & mask; + out[7] = (w5 >> 9) & mask; + out[8] = ((w5 >> 56) | (w6 << 8)) & mask; + out[9] = ((w6 >> 39) | (w7 << 25)) & mask; + out[10] = ((w7 >> 22) | (w8 << 42)) & mask; + out[11] = (w8 >> 5) & mask; + out[12] = ((w8 >> 52) | (w9 << 12)) & mask; + out[13] = ((w9 >> 35) | (w10 << 29)) & mask; + out[14] = ((w10 >> 18) | (w11 << 46)) & mask; + out[15] = (w11 >> 1) & mask; + out[16] = ((w11 >> 48) | (w12 << 16)) & mask; + out[17] = ((w12 >> 31) | (w13 << 33)) & mask; + out[18] = (w13 >> 14) & mask; + out[19] = ((w13 >> 61) | (w14 << 3)) & mask; + out[20] = ((w14 >> 44) | (w15 << 20)) & mask; + out[21] = ((w15 >> 27) | (w16 << 37)) & mask; + out[22] = (w16 >> 10) & mask; + out[23] = ((w16 >> 57) | (w17 << 7)) & mask; + out[24] = ((w17 >> 40) | (w18 << 24)) & mask; + out[25] = ((w18 >> 23) | (w19 << 41)) & mask; + out[26] = (w19 >> 6) & mask; + out[27] = ((w19 >> 53) | (w20 << 11)) & mask; + out[28] = ((w20 >> 36) | (w21 << 28)) & mask; + out[29] = ((w21 >> 19) | (w22 << 45)) & mask; + out[30] = (w22 >> 2) & mask; + out[31] = ((w22 >> 49) | (w23 << 15)) & mask; + + return in; +} + +inline const uint8_t* unpack48_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 281474976710655ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 48) | (w1 << 16)) & mask; + out[2] = ((w1 >> 32) | (w2 << 32)) & mask; + out[3] = w2 >> 16; + out[4] = (w3)&mask; + out[5] = ((w3 >> 48) | (w4 << 16)) & mask; + out[6] = ((w4 >> 32) | (w5 << 32)) & mask; + out[7] = w5 >> 16; + out[8] = (w6)&mask; + out[9] = ((w6 >> 48) | (w7 << 16)) & mask; + out[10] = ((w7 >> 32) | (w8 << 32)) & mask; + out[11] = w8 >> 16; + out[12] = (w9)&mask; + out[13] = ((w9 >> 48) | (w10 << 16)) & mask; + out[14] = ((w10 >> 32) | (w11 << 32)) & mask; + out[15] = w11 >> 16; + out[16] = (w12)&mask; + out[17] = ((w12 >> 48) | (w13 << 16)) & mask; + out[18] = ((w13 >> 32) | (w14 << 32)) & mask; + out[19] = w14 >> 16; + out[20] = (w15)&mask; + out[21] = ((w15 >> 48) | (w16 << 16)) & mask; + out[22] = ((w16 >> 32) | (w17 << 32)) & mask; + out[23] = w17 >> 16; + out[24] = (w18)&mask; + out[25] = ((w18 >> 48) | (w19 << 16)) & mask; + out[26] = ((w19 >> 32) | (w20 << 32)) & mask; + out[27] = w20 >> 16; + out[28] = (w21)&mask; + out[29] = ((w21 >> 48) | (w22 << 16)) & mask; + out[30] = ((w22 >> 32) | (w23 << 32)) & mask; + out[31] = w23 >> 16; + + return in; +} + +inline const uint8_t* unpack49_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 562949953421311ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 49) | (w1 << 15)) & mask; + out[2] = ((w1 >> 34) | (w2 << 30)) & mask; + out[3] = ((w2 >> 19) | (w3 << 45)) & mask; + out[4] = (w3 >> 4) & mask; + out[5] = ((w3 >> 53) | (w4 << 11)) & mask; + out[6] = ((w4 >> 38) | (w5 << 26)) & mask; + out[7] = ((w5 >> 23) | (w6 << 41)) & mask; + out[8] = (w6 >> 8) & mask; + out[9] = ((w6 >> 57) | (w7 << 7)) & mask; + out[10] = ((w7 >> 42) | (w8 << 22)) & mask; + out[11] = ((w8 >> 27) | (w9 << 37)) & mask; + out[12] = (w9 >> 12) & mask; + out[13] = ((w9 >> 61) | (w10 << 3)) & mask; + out[14] = ((w10 >> 46) | (w11 << 18)) & mask; + out[15] = ((w11 >> 31) | (w12 << 33)) & mask; + out[16] = ((w12 >> 16) | (w13 << 48)) & mask; + out[17] = (w13 >> 1) & mask; + out[18] = ((w13 >> 50) | (w14 << 14)) & mask; + out[19] = ((w14 >> 35) | (w15 << 29)) & mask; + out[20] = ((w15 >> 20) | (w16 << 44)) & mask; + out[21] = (w16 >> 5) & mask; + out[22] = ((w16 >> 54) | (w17 << 10)) & mask; + out[23] = ((w17 >> 39) | (w18 << 25)) & mask; + out[24] = ((w18 >> 24) | (w19 << 40)) & mask; + out[25] = (w19 >> 9) & mask; + out[26] = ((w19 >> 58) | (w20 << 6)) & mask; + out[27] = ((w20 >> 43) | (w21 << 21)) & mask; + out[28] = ((w21 >> 28) | (w22 << 36)) & mask; + out[29] = (w22 >> 13) & mask; + out[30] = ((w22 >> 62) | (w23 << 2)) & mask; + out[31] = ((w23 >> 47) | (w24 << 17)) & mask; + + return in; +} + +inline const uint8_t* unpack50_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1125899906842623ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 50) | (w1 << 14)) & mask; + out[2] = ((w1 >> 36) | (w2 << 28)) & mask; + out[3] = ((w2 >> 22) | (w3 << 42)) & mask; + out[4] = (w3 >> 8) & mask; + out[5] = ((w3 >> 58) | (w4 << 6)) & mask; + out[6] = ((w4 >> 44) | (w5 << 20)) & mask; + out[7] = ((w5 >> 30) | (w6 << 34)) & mask; + out[8] = ((w6 >> 16) | (w7 << 48)) & mask; + out[9] = (w7 >> 2) & mask; + out[10] = ((w7 >> 52) | (w8 << 12)) & mask; + out[11] = ((w8 >> 38) | (w9 << 26)) & mask; + out[12] = ((w9 >> 24) | (w10 << 40)) & mask; + out[13] = (w10 >> 10) & mask; + out[14] = ((w10 >> 60) | (w11 << 4)) & mask; + out[15] = ((w11 >> 46) | (w12 << 18)) & mask; + out[16] = ((w12 >> 32) | (w13 << 32)) & mask; + out[17] = ((w13 >> 18) | (w14 << 46)) & mask; + out[18] = (w14 >> 4) & mask; + out[19] = ((w14 >> 54) | (w15 << 10)) & mask; + out[20] = ((w15 >> 40) | (w16 << 24)) & mask; + out[21] = ((w16 >> 26) | (w17 << 38)) & mask; + out[22] = (w17 >> 12) & mask; + out[23] = ((w17 >> 62) | (w18 << 2)) & mask; + out[24] = ((w18 >> 48) | (w19 << 16)) & mask; + out[25] = ((w19 >> 34) | (w20 << 30)) & mask; + out[26] = ((w20 >> 20) | (w21 << 44)) & mask; + out[27] = (w21 >> 6) & mask; + out[28] = ((w21 >> 56) | (w22 << 8)) & mask; + out[29] = ((w22 >> 42) | (w23 << 22)) & mask; + out[30] = ((w23 >> 28) | (w24 << 36)) & mask; + out[31] = w24 >> 14; + + return in; +} + +inline const uint8_t* unpack51_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2251799813685247ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 51) | (w1 << 13)) & mask; + out[2] = ((w1 >> 38) | (w2 << 26)) & mask; + out[3] = ((w2 >> 25) | (w3 << 39)) & mask; + out[4] = (w3 >> 12) & mask; + out[5] = ((w3 >> 63) | (w4 << 1)) & mask; + out[6] = ((w4 >> 50) | (w5 << 14)) & mask; + out[7] = ((w5 >> 37) | (w6 << 27)) & mask; + out[8] = ((w6 >> 24) | (w7 << 40)) & mask; + out[9] = (w7 >> 11) & mask; + out[10] = ((w7 >> 62) | (w8 << 2)) & mask; + out[11] = ((w8 >> 49) | (w9 << 15)) & mask; + out[12] = ((w9 >> 36) | (w10 << 28)) & mask; + out[13] = ((w10 >> 23) | (w11 << 41)) & mask; + out[14] = (w11 >> 10) & mask; + out[15] = ((w11 >> 61) | (w12 << 3)) & mask; + out[16] = ((w12 >> 48) | (w13 << 16)) & mask; + out[17] = ((w13 >> 35) | (w14 << 29)) & mask; + out[18] = ((w14 >> 22) | (w15 << 42)) & mask; + out[19] = (w15 >> 9) & mask; + out[20] = ((w15 >> 60) | (w16 << 4)) & mask; + out[21] = ((w16 >> 47) | (w17 << 17)) & mask; + out[22] = ((w17 >> 34) | (w18 << 30)) & mask; + out[23] = ((w18 >> 21) | (w19 << 43)) & mask; + out[24] = (w19 >> 8) & mask; + out[25] = ((w19 >> 59) | (w20 << 5)) & mask; + out[26] = ((w20 >> 46) | (w21 << 18)) & mask; + out[27] = ((w21 >> 33) | (w22 << 31)) & mask; + out[28] = ((w22 >> 20) | (w23 << 44)) & mask; + out[29] = (w23 >> 7) & mask; + out[30] = ((w23 >> 58) | (w24 << 6)) & mask; + out[31] = ((w24 >> 45) | (w25 << 19)) & mask; + + return in; +} + +inline const uint8_t* unpack52_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4503599627370495ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 52) | (w1 << 12)) & mask; + out[2] = ((w1 >> 40) | (w2 << 24)) & mask; + out[3] = ((w2 >> 28) | (w3 << 36)) & mask; + out[4] = ((w3 >> 16) | (w4 << 48)) & mask; + out[5] = (w4 >> 4) & mask; + out[6] = ((w4 >> 56) | (w5 << 8)) & mask; + out[7] = ((w5 >> 44) | (w6 << 20)) & mask; + out[8] = ((w6 >> 32) | (w7 << 32)) & mask; + out[9] = ((w7 >> 20) | (w8 << 44)) & mask; + out[10] = (w8 >> 8) & mask; + out[11] = ((w8 >> 60) | (w9 << 4)) & mask; + out[12] = ((w9 >> 48) | (w10 << 16)) & mask; + out[13] = ((w10 >> 36) | (w11 << 28)) & mask; + out[14] = ((w11 >> 24) | (w12 << 40)) & mask; + out[15] = w12 >> 12; + out[16] = (w13)&mask; + out[17] = ((w13 >> 52) | (w14 << 12)) & mask; + out[18] = ((w14 >> 40) | (w15 << 24)) & mask; + out[19] = ((w15 >> 28) | (w16 << 36)) & mask; + out[20] = ((w16 >> 16) | (w17 << 48)) & mask; + out[21] = (w17 >> 4) & mask; + out[22] = ((w17 >> 56) | (w18 << 8)) & mask; + out[23] = ((w18 >> 44) | (w19 << 20)) & mask; + out[24] = ((w19 >> 32) | (w20 << 32)) & mask; + out[25] = ((w20 >> 20) | (w21 << 44)) & mask; + out[26] = (w21 >> 8) & mask; + out[27] = ((w21 >> 60) | (w22 << 4)) & mask; + out[28] = ((w22 >> 48) | (w23 << 16)) & mask; + out[29] = ((w23 >> 36) | (w24 << 28)) & mask; + out[30] = ((w24 >> 24) | (w25 << 40)) & mask; + out[31] = w25 >> 12; + + return in; +} + +inline const uint8_t* unpack53_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 9007199254740991ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 53) | (w1 << 11)) & mask; + out[2] = ((w1 >> 42) | (w2 << 22)) & mask; + out[3] = ((w2 >> 31) | (w3 << 33)) & mask; + out[4] = ((w3 >> 20) | (w4 << 44)) & mask; + out[5] = (w4 >> 9) & mask; + out[6] = ((w4 >> 62) | (w5 << 2)) & mask; + out[7] = ((w5 >> 51) | (w6 << 13)) & mask; + out[8] = ((w6 >> 40) | (w7 << 24)) & mask; + out[9] = ((w7 >> 29) | (w8 << 35)) & mask; + out[10] = ((w8 >> 18) | (w9 << 46)) & mask; + out[11] = (w9 >> 7) & mask; + out[12] = ((w9 >> 60) | (w10 << 4)) & mask; + out[13] = ((w10 >> 49) | (w11 << 15)) & mask; + out[14] = ((w11 >> 38) | (w12 << 26)) & mask; + out[15] = ((w12 >> 27) | (w13 << 37)) & mask; + out[16] = ((w13 >> 16) | (w14 << 48)) & mask; + out[17] = (w14 >> 5) & mask; + out[18] = ((w14 >> 58) | (w15 << 6)) & mask; + out[19] = ((w15 >> 47) | (w16 << 17)) & mask; + out[20] = ((w16 >> 36) | (w17 << 28)) & mask; + out[21] = ((w17 >> 25) | (w18 << 39)) & mask; + out[22] = ((w18 >> 14) | (w19 << 50)) & mask; + out[23] = (w19 >> 3) & mask; + out[24] = ((w19 >> 56) | (w20 << 8)) & mask; + out[25] = ((w20 >> 45) | (w21 << 19)) & mask; + out[26] = ((w21 >> 34) | (w22 << 30)) & mask; + out[27] = ((w22 >> 23) | (w23 << 41)) & mask; + out[28] = ((w23 >> 12) | (w24 << 52)) & mask; + out[29] = (w24 >> 1) & mask; + out[30] = ((w24 >> 54) | (w25 << 10)) & mask; + out[31] = ((w25 >> 43) | (w26 << 21)) & mask; + + return in; +} + +inline const uint8_t* unpack54_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 18014398509481983ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 54) | (w1 << 10)) & mask; + out[2] = ((w1 >> 44) | (w2 << 20)) & mask; + out[3] = ((w2 >> 34) | (w3 << 30)) & mask; + out[4] = ((w3 >> 24) | (w4 << 40)) & mask; + out[5] = ((w4 >> 14) | (w5 << 50)) & mask; + out[6] = (w5 >> 4) & mask; + out[7] = ((w5 >> 58) | (w6 << 6)) & mask; + out[8] = ((w6 >> 48) | (w7 << 16)) & mask; + out[9] = ((w7 >> 38) | (w8 << 26)) & mask; + out[10] = ((w8 >> 28) | (w9 << 36)) & mask; + out[11] = ((w9 >> 18) | (w10 << 46)) & mask; + out[12] = (w10 >> 8) & mask; + out[13] = ((w10 >> 62) | (w11 << 2)) & mask; + out[14] = ((w11 >> 52) | (w12 << 12)) & mask; + out[15] = ((w12 >> 42) | (w13 << 22)) & mask; + out[16] = ((w13 >> 32) | (w14 << 32)) & mask; + out[17] = ((w14 >> 22) | (w15 << 42)) & mask; + out[18] = ((w15 >> 12) | (w16 << 52)) & mask; + out[19] = (w16 >> 2) & mask; + out[20] = ((w16 >> 56) | (w17 << 8)) & mask; + out[21] = ((w17 >> 46) | (w18 << 18)) & mask; + out[22] = ((w18 >> 36) | (w19 << 28)) & mask; + out[23] = ((w19 >> 26) | (w20 << 38)) & mask; + out[24] = ((w20 >> 16) | (w21 << 48)) & mask; + out[25] = (w21 >> 6) & mask; + out[26] = ((w21 >> 60) | (w22 << 4)) & mask; + out[27] = ((w22 >> 50) | (w23 << 14)) & mask; + out[28] = ((w23 >> 40) | (w24 << 24)) & mask; + out[29] = ((w24 >> 30) | (w25 << 34)) & mask; + out[30] = ((w25 >> 20) | (w26 << 44)) & mask; + out[31] = w26 >> 10; + + return in; +} + +inline const uint8_t* unpack55_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 36028797018963967ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 55) | (w1 << 9)) & mask; + out[2] = ((w1 >> 46) | (w2 << 18)) & mask; + out[3] = ((w2 >> 37) | (w3 << 27)) & mask; + out[4] = ((w3 >> 28) | (w4 << 36)) & mask; + out[5] = ((w4 >> 19) | (w5 << 45)) & mask; + out[6] = ((w5 >> 10) | (w6 << 54)) & mask; + out[7] = (w6 >> 1) & mask; + out[8] = ((w6 >> 56) | (w7 << 8)) & mask; + out[9] = ((w7 >> 47) | (w8 << 17)) & mask; + out[10] = ((w8 >> 38) | (w9 << 26)) & mask; + out[11] = ((w9 >> 29) | (w10 << 35)) & mask; + out[12] = ((w10 >> 20) | (w11 << 44)) & mask; + out[13] = ((w11 >> 11) | (w12 << 53)) & mask; + out[14] = (w12 >> 2) & mask; + out[15] = ((w12 >> 57) | (w13 << 7)) & mask; + out[16] = ((w13 >> 48) | (w14 << 16)) & mask; + out[17] = ((w14 >> 39) | (w15 << 25)) & mask; + out[18] = ((w15 >> 30) | (w16 << 34)) & mask; + out[19] = ((w16 >> 21) | (w17 << 43)) & mask; + out[20] = ((w17 >> 12) | (w18 << 52)) & mask; + out[21] = (w18 >> 3) & mask; + out[22] = ((w18 >> 58) | (w19 << 6)) & mask; + out[23] = ((w19 >> 49) | (w20 << 15)) & mask; + out[24] = ((w20 >> 40) | (w21 << 24)) & mask; + out[25] = ((w21 >> 31) | (w22 << 33)) & mask; + out[26] = ((w22 >> 22) | (w23 << 42)) & mask; + out[27] = ((w23 >> 13) | (w24 << 51)) & mask; + out[28] = (w24 >> 4) & mask; + out[29] = ((w24 >> 59) | (w25 << 5)) & mask; + out[30] = ((w25 >> 50) | (w26 << 14)) & mask; + out[31] = ((w26 >> 41) | (w27 << 23)) & mask; + + return in; +} + +inline const uint8_t* unpack56_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 72057594037927935ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 56) | (w1 << 8)) & mask; + out[2] = ((w1 >> 48) | (w2 << 16)) & mask; + out[3] = ((w2 >> 40) | (w3 << 24)) & mask; + out[4] = ((w3 >> 32) | (w4 << 32)) & mask; + out[5] = ((w4 >> 24) | (w5 << 40)) & mask; + out[6] = ((w5 >> 16) | (w6 << 48)) & mask; + out[7] = w6 >> 8; + out[8] = (w7)&mask; + out[9] = ((w7 >> 56) | (w8 << 8)) & mask; + out[10] = ((w8 >> 48) | (w9 << 16)) & mask; + out[11] = ((w9 >> 40) | (w10 << 24)) & mask; + out[12] = ((w10 >> 32) | (w11 << 32)) & mask; + out[13] = ((w11 >> 24) | (w12 << 40)) & mask; + out[14] = ((w12 >> 16) | (w13 << 48)) & mask; + out[15] = w13 >> 8; + out[16] = (w14)&mask; + out[17] = ((w14 >> 56) | (w15 << 8)) & mask; + out[18] = ((w15 >> 48) | (w16 << 16)) & mask; + out[19] = ((w16 >> 40) | (w17 << 24)) & mask; + out[20] = ((w17 >> 32) | (w18 << 32)) & mask; + out[21] = ((w18 >> 24) | (w19 << 40)) & mask; + out[22] = ((w19 >> 16) | (w20 << 48)) & mask; + out[23] = w20 >> 8; + out[24] = (w21)&mask; + out[25] = ((w21 >> 56) | (w22 << 8)) & mask; + out[26] = ((w22 >> 48) | (w23 << 16)) & mask; + out[27] = ((w23 >> 40) | (w24 << 24)) & mask; + out[28] = ((w24 >> 32) | (w25 << 32)) & mask; + out[29] = ((w25 >> 24) | (w26 << 40)) & mask; + out[30] = ((w26 >> 16) | (w27 << 48)) & mask; + out[31] = w27 >> 8; + + return in; +} + +inline const uint8_t* unpack57_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 144115188075855871ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 57) | (w1 << 7)) & mask; + out[2] = ((w1 >> 50) | (w2 << 14)) & mask; + out[3] = ((w2 >> 43) | (w3 << 21)) & mask; + out[4] = ((w3 >> 36) | (w4 << 28)) & mask; + out[5] = ((w4 >> 29) | (w5 << 35)) & mask; + out[6] = ((w5 >> 22) | (w6 << 42)) & mask; + out[7] = ((w6 >> 15) | (w7 << 49)) & mask; + out[8] = ((w7 >> 8) | (w8 << 56)) & mask; + out[9] = (w8 >> 1) & mask; + out[10] = ((w8 >> 58) | (w9 << 6)) & mask; + out[11] = ((w9 >> 51) | (w10 << 13)) & mask; + out[12] = ((w10 >> 44) | (w11 << 20)) & mask; + out[13] = ((w11 >> 37) | (w12 << 27)) & mask; + out[14] = ((w12 >> 30) | (w13 << 34)) & mask; + out[15] = ((w13 >> 23) | (w14 << 41)) & mask; + out[16] = ((w14 >> 16) | (w15 << 48)) & mask; + out[17] = ((w15 >> 9) | (w16 << 55)) & mask; + out[18] = (w16 >> 2) & mask; + out[19] = ((w16 >> 59) | (w17 << 5)) & mask; + out[20] = ((w17 >> 52) | (w18 << 12)) & mask; + out[21] = ((w18 >> 45) | (w19 << 19)) & mask; + out[22] = ((w19 >> 38) | (w20 << 26)) & mask; + out[23] = ((w20 >> 31) | (w21 << 33)) & mask; + out[24] = ((w21 >> 24) | (w22 << 40)) & mask; + out[25] = ((w22 >> 17) | (w23 << 47)) & mask; + out[26] = ((w23 >> 10) | (w24 << 54)) & mask; + out[27] = (w24 >> 3) & mask; + out[28] = ((w24 >> 60) | (w25 << 4)) & mask; + out[29] = ((w25 >> 53) | (w26 << 11)) & mask; + out[30] = ((w26 >> 46) | (w27 << 18)) & mask; + out[31] = ((w27 >> 39) | (w28 << 25)) & mask; + + return in; +} + +inline const uint8_t* unpack58_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 288230376151711743ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 58) | (w1 << 6)) & mask; + out[2] = ((w1 >> 52) | (w2 << 12)) & mask; + out[3] = ((w2 >> 46) | (w3 << 18)) & mask; + out[4] = ((w3 >> 40) | (w4 << 24)) & mask; + out[5] = ((w4 >> 34) | (w5 << 30)) & mask; + out[6] = ((w5 >> 28) | (w6 << 36)) & mask; + out[7] = ((w6 >> 22) | (w7 << 42)) & mask; + out[8] = ((w7 >> 16) | (w8 << 48)) & mask; + out[9] = ((w8 >> 10) | (w9 << 54)) & mask; + out[10] = (w9 >> 4) & mask; + out[11] = ((w9 >> 62) | (w10 << 2)) & mask; + out[12] = ((w10 >> 56) | (w11 << 8)) & mask; + out[13] = ((w11 >> 50) | (w12 << 14)) & mask; + out[14] = ((w12 >> 44) | (w13 << 20)) & mask; + out[15] = ((w13 >> 38) | (w14 << 26)) & mask; + out[16] = ((w14 >> 32) | (w15 << 32)) & mask; + out[17] = ((w15 >> 26) | (w16 << 38)) & mask; + out[18] = ((w16 >> 20) | (w17 << 44)) & mask; + out[19] = ((w17 >> 14) | (w18 << 50)) & mask; + out[20] = ((w18 >> 8) | (w19 << 56)) & mask; + out[21] = (w19 >> 2) & mask; + out[22] = ((w19 >> 60) | (w20 << 4)) & mask; + out[23] = ((w20 >> 54) | (w21 << 10)) & mask; + out[24] = ((w21 >> 48) | (w22 << 16)) & mask; + out[25] = ((w22 >> 42) | (w23 << 22)) & mask; + out[26] = ((w23 >> 36) | (w24 << 28)) & mask; + out[27] = ((w24 >> 30) | (w25 << 34)) & mask; + out[28] = ((w25 >> 24) | (w26 << 40)) & mask; + out[29] = ((w26 >> 18) | (w27 << 46)) & mask; + out[30] = ((w27 >> 12) | (w28 << 52)) & mask; + out[31] = w28 >> 6; + + return in; +} + +inline const uint8_t* unpack59_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 576460752303423487ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 59) | (w1 << 5)) & mask; + out[2] = ((w1 >> 54) | (w2 << 10)) & mask; + out[3] = ((w2 >> 49) | (w3 << 15)) & mask; + out[4] = ((w3 >> 44) | (w4 << 20)) & mask; + out[5] = ((w4 >> 39) | (w5 << 25)) & mask; + out[6] = ((w5 >> 34) | (w6 << 30)) & mask; + out[7] = ((w6 >> 29) | (w7 << 35)) & mask; + out[8] = ((w7 >> 24) | (w8 << 40)) & mask; + out[9] = ((w8 >> 19) | (w9 << 45)) & mask; + out[10] = ((w9 >> 14) | (w10 << 50)) & mask; + out[11] = ((w10 >> 9) | (w11 << 55)) & mask; + out[12] = (w11 >> 4) & mask; + out[13] = ((w11 >> 63) | (w12 << 1)) & mask; + out[14] = ((w12 >> 58) | (w13 << 6)) & mask; + out[15] = ((w13 >> 53) | (w14 << 11)) & mask; + out[16] = ((w14 >> 48) | (w15 << 16)) & mask; + out[17] = ((w15 >> 43) | (w16 << 21)) & mask; + out[18] = ((w16 >> 38) | (w17 << 26)) & mask; + out[19] = ((w17 >> 33) | (w18 << 31)) & mask; + out[20] = ((w18 >> 28) | (w19 << 36)) & mask; + out[21] = ((w19 >> 23) | (w20 << 41)) & mask; + out[22] = ((w20 >> 18) | (w21 << 46)) & mask; + out[23] = ((w21 >> 13) | (w22 << 51)) & mask; + out[24] = ((w22 >> 8) | (w23 << 56)) & mask; + out[25] = (w23 >> 3) & mask; + out[26] = ((w23 >> 62) | (w24 << 2)) & mask; + out[27] = ((w24 >> 57) | (w25 << 7)) & mask; + out[28] = ((w25 >> 52) | (w26 << 12)) & mask; + out[29] = ((w26 >> 47) | (w27 << 17)) & mask; + out[30] = ((w27 >> 42) | (w28 << 22)) & mask; + out[31] = ((w28 >> 37) | (w29 << 27)) & mask; + + return in; +} + +inline const uint8_t* unpack60_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1152921504606846975ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 60) | (w1 << 4)) & mask; + out[2] = ((w1 >> 56) | (w2 << 8)) & mask; + out[3] = ((w2 >> 52) | (w3 << 12)) & mask; + out[4] = ((w3 >> 48) | (w4 << 16)) & mask; + out[5] = ((w4 >> 44) | (w5 << 20)) & mask; + out[6] = ((w5 >> 40) | (w6 << 24)) & mask; + out[7] = ((w6 >> 36) | (w7 << 28)) & mask; + out[8] = ((w7 >> 32) | (w8 << 32)) & mask; + out[9] = ((w8 >> 28) | (w9 << 36)) & mask; + out[10] = ((w9 >> 24) | (w10 << 40)) & mask; + out[11] = ((w10 >> 20) | (w11 << 44)) & mask; + out[12] = ((w11 >> 16) | (w12 << 48)) & mask; + out[13] = ((w12 >> 12) | (w13 << 52)) & mask; + out[14] = ((w13 >> 8) | (w14 << 56)) & mask; + out[15] = w14 >> 4; + out[16] = (w15)&mask; + out[17] = ((w15 >> 60) | (w16 << 4)) & mask; + out[18] = ((w16 >> 56) | (w17 << 8)) & mask; + out[19] = ((w17 >> 52) | (w18 << 12)) & mask; + out[20] = ((w18 >> 48) | (w19 << 16)) & mask; + out[21] = ((w19 >> 44) | (w20 << 20)) & mask; + out[22] = ((w20 >> 40) | (w21 << 24)) & mask; + out[23] = ((w21 >> 36) | (w22 << 28)) & mask; + out[24] = ((w22 >> 32) | (w23 << 32)) & mask; + out[25] = ((w23 >> 28) | (w24 << 36)) & mask; + out[26] = ((w24 >> 24) | (w25 << 40)) & mask; + out[27] = ((w25 >> 20) | (w26 << 44)) & mask; + out[28] = ((w26 >> 16) | (w27 << 48)) & mask; + out[29] = ((w27 >> 12) | (w28 << 52)) & mask; + out[30] = ((w28 >> 8) | (w29 << 56)) & mask; + out[31] = w29 >> 4; + + return in; +} + +inline const uint8_t* unpack61_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2305843009213693951ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 61) | (w1 << 3)) & mask; + out[2] = ((w1 >> 58) | (w2 << 6)) & mask; + out[3] = ((w2 >> 55) | (w3 << 9)) & mask; + out[4] = ((w3 >> 52) | (w4 << 12)) & mask; + out[5] = ((w4 >> 49) | (w5 << 15)) & mask; + out[6] = ((w5 >> 46) | (w6 << 18)) & mask; + out[7] = ((w6 >> 43) | (w7 << 21)) & mask; + out[8] = ((w7 >> 40) | (w8 << 24)) & mask; + out[9] = ((w8 >> 37) | (w9 << 27)) & mask; + out[10] = ((w9 >> 34) | (w10 << 30)) & mask; + out[11] = ((w10 >> 31) | (w11 << 33)) & mask; + out[12] = ((w11 >> 28) | (w12 << 36)) & mask; + out[13] = ((w12 >> 25) | (w13 << 39)) & mask; + out[14] = ((w13 >> 22) | (w14 << 42)) & mask; + out[15] = ((w14 >> 19) | (w15 << 45)) & mask; + out[16] = ((w15 >> 16) | (w16 << 48)) & mask; + out[17] = ((w16 >> 13) | (w17 << 51)) & mask; + out[18] = ((w17 >> 10) | (w18 << 54)) & mask; + out[19] = ((w18 >> 7) | (w19 << 57)) & mask; + out[20] = ((w19 >> 4) | (w20 << 60)) & mask; + out[21] = (w20 >> 1) & mask; + out[22] = ((w20 >> 62) | (w21 << 2)) & mask; + out[23] = ((w21 >> 59) | (w22 << 5)) & mask; + out[24] = ((w22 >> 56) | (w23 << 8)) & mask; + out[25] = ((w23 >> 53) | (w24 << 11)) & mask; + out[26] = ((w24 >> 50) | (w25 << 14)) & mask; + out[27] = ((w25 >> 47) | (w26 << 17)) & mask; + out[28] = ((w26 >> 44) | (w27 << 20)) & mask; + out[29] = ((w27 >> 41) | (w28 << 23)) & mask; + out[30] = ((w28 >> 38) | (w29 << 26)) & mask; + out[31] = ((w29 >> 35) | (w30 << 29)) & mask; + + return in; +} + +inline const uint8_t* unpack62_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4611686018427387903ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 62) | (w1 << 2)) & mask; + out[2] = ((w1 >> 60) | (w2 << 4)) & mask; + out[3] = ((w2 >> 58) | (w3 << 6)) & mask; + out[4] = ((w3 >> 56) | (w4 << 8)) & mask; + out[5] = ((w4 >> 54) | (w5 << 10)) & mask; + out[6] = ((w5 >> 52) | (w6 << 12)) & mask; + out[7] = ((w6 >> 50) | (w7 << 14)) & mask; + out[8] = ((w7 >> 48) | (w8 << 16)) & mask; + out[9] = ((w8 >> 46) | (w9 << 18)) & mask; + out[10] = ((w9 >> 44) | (w10 << 20)) & mask; + out[11] = ((w10 >> 42) | (w11 << 22)) & mask; + out[12] = ((w11 >> 40) | (w12 << 24)) & mask; + out[13] = ((w12 >> 38) | (w13 << 26)) & mask; + out[14] = ((w13 >> 36) | (w14 << 28)) & mask; + out[15] = ((w14 >> 34) | (w15 << 30)) & mask; + out[16] = ((w15 >> 32) | (w16 << 32)) & mask; + out[17] = ((w16 >> 30) | (w17 << 34)) & mask; + out[18] = ((w17 >> 28) | (w18 << 36)) & mask; + out[19] = ((w18 >> 26) | (w19 << 38)) & mask; + out[20] = ((w19 >> 24) | (w20 << 40)) & mask; + out[21] = ((w20 >> 22) | (w21 << 42)) & mask; + out[22] = ((w21 >> 20) | (w22 << 44)) & mask; + out[23] = ((w22 >> 18) | (w23 << 46)) & mask; + out[24] = ((w23 >> 16) | (w24 << 48)) & mask; + out[25] = ((w24 >> 14) | (w25 << 50)) & mask; + out[26] = ((w25 >> 12) | (w26 << 52)) & mask; + out[27] = ((w26 >> 10) | (w27 << 54)) & mask; + out[28] = ((w27 >> 8) | (w28 << 56)) & mask; + out[29] = ((w28 >> 6) | (w29 << 58)) & mask; + out[30] = ((w29 >> 4) | (w30 << 60)) & mask; + out[31] = w30 >> 2; + + return in; +} + +inline const uint8_t* unpack63_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 9223372036854775807ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + uint64_t w31 = util::SafeLoadAs(in); + w31 = arrow::bit_util::FromLittleEndian(w31); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 63) | (w1 << 1)) & mask; + out[2] = ((w1 >> 62) | (w2 << 2)) & mask; + out[3] = ((w2 >> 61) | (w3 << 3)) & mask; + out[4] = ((w3 >> 60) | (w4 << 4)) & mask; + out[5] = ((w4 >> 59) | (w5 << 5)) & mask; + out[6] = ((w5 >> 58) | (w6 << 6)) & mask; + out[7] = ((w6 >> 57) | (w7 << 7)) & mask; + out[8] = ((w7 >> 56) | (w8 << 8)) & mask; + out[9] = ((w8 >> 55) | (w9 << 9)) & mask; + out[10] = ((w9 >> 54) | (w10 << 10)) & mask; + out[11] = ((w10 >> 53) | (w11 << 11)) & mask; + out[12] = ((w11 >> 52) | (w12 << 12)) & mask; + out[13] = ((w12 >> 51) | (w13 << 13)) & mask; + out[14] = ((w13 >> 50) | (w14 << 14)) & mask; + out[15] = ((w14 >> 49) | (w15 << 15)) & mask; + out[16] = ((w15 >> 48) | (w16 << 16)) & mask; + out[17] = ((w16 >> 47) | (w17 << 17)) & mask; + out[18] = ((w17 >> 46) | (w18 << 18)) & mask; + out[19] = ((w18 >> 45) | (w19 << 19)) & mask; + out[20] = ((w19 >> 44) | (w20 << 20)) & mask; + out[21] = ((w20 >> 43) | (w21 << 21)) & mask; + out[22] = ((w21 >> 42) | (w22 << 22)) & mask; + out[23] = ((w22 >> 41) | (w23 << 23)) & mask; + out[24] = ((w23 >> 40) | (w24 << 24)) & mask; + out[25] = ((w24 >> 39) | (w25 << 25)) & mask; + out[26] = ((w25 >> 38) | (w26 << 26)) & mask; + out[27] = ((w26 >> 37) | (w27 << 27)) & mask; + out[28] = ((w27 >> 36) | (w28 << 28)) & mask; + out[29] = ((w28 >> 35) | (w29 << 29)) & mask; + out[30] = ((w29 >> 34) | (w30 << 30)) & mask; + out[31] = ((w30 >> 33) | (w31 << 31)) & mask; + + return in; +} + +inline const uint8_t* unpack64_64(const uint8_t* in, uint64_t* out) { + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + uint64_t w31 = util::SafeLoadAs(in); + w31 = arrow::bit_util::FromLittleEndian(w31); + in += 8; + out[0] = w0; + out[1] = w1; + out[2] = w2; + out[3] = w3; + out[4] = w4; + out[5] = w5; + out[6] = w6; + out[7] = w7; + out[8] = w8; + out[9] = w9; + out[10] = w10; + out[11] = w11; + out[12] = w12; + out[13] = w13; + out[14] = w14; + out[15] = w15; + out[16] = w16; + out[17] = w17; + out[18] = w18; + out[19] = w19; + out[20] = w20; + out[21] = w21; + out[22] = w22; + out[23] = w23; + out[24] = w24; + out[25] = w25; + out[26] = w26; + out[27] = w27; + out[28] = w28; + out[29] = w29; + out[30] = w30; + out[31] = w31; + + return in; +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h new file mode 100644 index 0000000000000000000000000000000000000000..7a7d8bf8c44777f4c9e053c6ee1b086d7d954bd0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_avx2(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h new file mode 100644 index 0000000000000000000000000000000000000000..96723f803e0c1a64ef753ab6a51d8f2bd8c173d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h new file mode 100644 index 0000000000000000000000000000000000000000..4c661dcce3798c737c1d20bce525dcaa88c83078 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h @@ -0,0 +1,4251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This file was modified from its original version for inclusion in parquet-cpp. +// Original source: +// https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +// The original copyright notice follows. + +// This code is released under the +// Apache License Version 2.0 http://www.apache.org/licenses/. +// (c) Daniel Lemire 2013 + +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace internal { + +inline const uint32_t* unpack1_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) & 1; + out++; + *out = (inl >> 1) & 1; + out++; + *out = (inl >> 2) & 1; + out++; + *out = (inl >> 3) & 1; + out++; + *out = (inl >> 4) & 1; + out++; + *out = (inl >> 5) & 1; + out++; + *out = (inl >> 6) & 1; + out++; + *out = (inl >> 7) & 1; + out++; + *out = (inl >> 8) & 1; + out++; + *out = (inl >> 9) & 1; + out++; + *out = (inl >> 10) & 1; + out++; + *out = (inl >> 11) & 1; + out++; + *out = (inl >> 12) & 1; + out++; + *out = (inl >> 13) & 1; + out++; + *out = (inl >> 14) & 1; + out++; + *out = (inl >> 15) & 1; + out++; + *out = (inl >> 16) & 1; + out++; + *out = (inl >> 17) & 1; + out++; + *out = (inl >> 18) & 1; + out++; + *out = (inl >> 19) & 1; + out++; + *out = (inl >> 20) & 1; + out++; + *out = (inl >> 21) & 1; + out++; + *out = (inl >> 22) & 1; + out++; + *out = (inl >> 23) & 1; + out++; + *out = (inl >> 24) & 1; + out++; + *out = (inl >> 25) & 1; + out++; + *out = (inl >> 26) & 1; + out++; + *out = (inl >> 27) & 1; + out++; + *out = (inl >> 28) & 1; + out++; + *out = (inl >> 29) & 1; + out++; + *out = (inl >> 30) & 1; + out++; + *out = (inl >> 31); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack2_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 2); + out++; + *out = (inl >> 2) % (1U << 2); + out++; + *out = (inl >> 4) % (1U << 2); + out++; + *out = (inl >> 6) % (1U << 2); + out++; + *out = (inl >> 8) % (1U << 2); + out++; + *out = (inl >> 10) % (1U << 2); + out++; + *out = (inl >> 12) % (1U << 2); + out++; + *out = (inl >> 14) % (1U << 2); + out++; + *out = (inl >> 16) % (1U << 2); + out++; + *out = (inl >> 18) % (1U << 2); + out++; + *out = (inl >> 20) % (1U << 2); + out++; + *out = (inl >> 22) % (1U << 2); + out++; + *out = (inl >> 24) % (1U << 2); + out++; + *out = (inl >> 26) % (1U << 2); + out++; + *out = (inl >> 28) % (1U << 2); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 2); + out++; + *out = (inl >> 2) % (1U << 2); + out++; + *out = (inl >> 4) % (1U << 2); + out++; + *out = (inl >> 6) % (1U << 2); + out++; + *out = (inl >> 8) % (1U << 2); + out++; + *out = (inl >> 10) % (1U << 2); + out++; + *out = (inl >> 12) % (1U << 2); + out++; + *out = (inl >> 14) % (1U << 2); + out++; + *out = (inl >> 16) % (1U << 2); + out++; + *out = (inl >> 18) % (1U << 2); + out++; + *out = (inl >> 20) % (1U << 2); + out++; + *out = (inl >> 22) % (1U << 2); + out++; + *out = (inl >> 24) % (1U << 2); + out++; + *out = (inl >> 26) % (1U << 2); + out++; + *out = (inl >> 28) % (1U << 2); + out++; + *out = (inl >> 30); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack3_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 3); + out++; + *out = (inl >> 3) % (1U << 3); + out++; + *out = (inl >> 6) % (1U << 3); + out++; + *out = (inl >> 9) % (1U << 3); + out++; + *out = (inl >> 12) % (1U << 3); + out++; + *out = (inl >> 15) % (1U << 3); + out++; + *out = (inl >> 18) % (1U << 3); + out++; + *out = (inl >> 21) % (1U << 3); + out++; + *out = (inl >> 24) % (1U << 3); + out++; + *out = (inl >> 27) % (1U << 3); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (3 - 1); + out++; + *out = (inl >> 1) % (1U << 3); + out++; + *out = (inl >> 4) % (1U << 3); + out++; + *out = (inl >> 7) % (1U << 3); + out++; + *out = (inl >> 10) % (1U << 3); + out++; + *out = (inl >> 13) % (1U << 3); + out++; + *out = (inl >> 16) % (1U << 3); + out++; + *out = (inl >> 19) % (1U << 3); + out++; + *out = (inl >> 22) % (1U << 3); + out++; + *out = (inl >> 25) % (1U << 3); + out++; + *out = (inl >> 28) % (1U << 3); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (3 - 2); + out++; + *out = (inl >> 2) % (1U << 3); + out++; + *out = (inl >> 5) % (1U << 3); + out++; + *out = (inl >> 8) % (1U << 3); + out++; + *out = (inl >> 11) % (1U << 3); + out++; + *out = (inl >> 14) % (1U << 3); + out++; + *out = (inl >> 17) % (1U << 3); + out++; + *out = (inl >> 20) % (1U << 3); + out++; + *out = (inl >> 23) % (1U << 3); + out++; + *out = (inl >> 26) % (1U << 3); + out++; + *out = (inl >> 29); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack4_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack5_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 5); + out++; + *out = (inl >> 5) % (1U << 5); + out++; + *out = (inl >> 10) % (1U << 5); + out++; + *out = (inl >> 15) % (1U << 5); + out++; + *out = (inl >> 20) % (1U << 5); + out++; + *out = (inl >> 25) % (1U << 5); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (5 - 3); + out++; + *out = (inl >> 3) % (1U << 5); + out++; + *out = (inl >> 8) % (1U << 5); + out++; + *out = (inl >> 13) % (1U << 5); + out++; + *out = (inl >> 18) % (1U << 5); + out++; + *out = (inl >> 23) % (1U << 5); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (5 - 1); + out++; + *out = (inl >> 1) % (1U << 5); + out++; + *out = (inl >> 6) % (1U << 5); + out++; + *out = (inl >> 11) % (1U << 5); + out++; + *out = (inl >> 16) % (1U << 5); + out++; + *out = (inl >> 21) % (1U << 5); + out++; + *out = (inl >> 26) % (1U << 5); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (5 - 4); + out++; + *out = (inl >> 4) % (1U << 5); + out++; + *out = (inl >> 9) % (1U << 5); + out++; + *out = (inl >> 14) % (1U << 5); + out++; + *out = (inl >> 19) % (1U << 5); + out++; + *out = (inl >> 24) % (1U << 5); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (5 - 2); + out++; + *out = (inl >> 2) % (1U << 5); + out++; + *out = (inl >> 7) % (1U << 5); + out++; + *out = (inl >> 12) % (1U << 5); + out++; + *out = (inl >> 17) % (1U << 5); + out++; + *out = (inl >> 22) % (1U << 5); + out++; + *out = (inl >> 27); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack6_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 6); + out++; + *out = (inl >> 6) % (1U << 6); + out++; + *out = (inl >> 12) % (1U << 6); + out++; + *out = (inl >> 18) % (1U << 6); + out++; + *out = (inl >> 24) % (1U << 6); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (6 - 4); + out++; + *out = (inl >> 4) % (1U << 6); + out++; + *out = (inl >> 10) % (1U << 6); + out++; + *out = (inl >> 16) % (1U << 6); + out++; + *out = (inl >> 22) % (1U << 6); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (6 - 2); + out++; + *out = (inl >> 2) % (1U << 6); + out++; + *out = (inl >> 8) % (1U << 6); + out++; + *out = (inl >> 14) % (1U << 6); + out++; + *out = (inl >> 20) % (1U << 6); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 6); + out++; + *out = (inl >> 6) % (1U << 6); + out++; + *out = (inl >> 12) % (1U << 6); + out++; + *out = (inl >> 18) % (1U << 6); + out++; + *out = (inl >> 24) % (1U << 6); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (6 - 4); + out++; + *out = (inl >> 4) % (1U << 6); + out++; + *out = (inl >> 10) % (1U << 6); + out++; + *out = (inl >> 16) % (1U << 6); + out++; + *out = (inl >> 22) % (1U << 6); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (6 - 2); + out++; + *out = (inl >> 2) % (1U << 6); + out++; + *out = (inl >> 8) % (1U << 6); + out++; + *out = (inl >> 14) % (1U << 6); + out++; + *out = (inl >> 20) % (1U << 6); + out++; + *out = (inl >> 26); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack7_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 7); + out++; + *out = (inl >> 7) % (1U << 7); + out++; + *out = (inl >> 14) % (1U << 7); + out++; + *out = (inl >> 21) % (1U << 7); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (7 - 3); + out++; + *out = (inl >> 3) % (1U << 7); + out++; + *out = (inl >> 10) % (1U << 7); + out++; + *out = (inl >> 17) % (1U << 7); + out++; + *out = (inl >> 24) % (1U << 7); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (7 - 6); + out++; + *out = (inl >> 6) % (1U << 7); + out++; + *out = (inl >> 13) % (1U << 7); + out++; + *out = (inl >> 20) % (1U << 7); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (7 - 2); + out++; + *out = (inl >> 2) % (1U << 7); + out++; + *out = (inl >> 9) % (1U << 7); + out++; + *out = (inl >> 16) % (1U << 7); + out++; + *out = (inl >> 23) % (1U << 7); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (7 - 5); + out++; + *out = (inl >> 5) % (1U << 7); + out++; + *out = (inl >> 12) % (1U << 7); + out++; + *out = (inl >> 19) % (1U << 7); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (7 - 1); + out++; + *out = (inl >> 1) % (1U << 7); + out++; + *out = (inl >> 8) % (1U << 7); + out++; + *out = (inl >> 15) % (1U << 7); + out++; + *out = (inl >> 22) % (1U << 7); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (7 - 4); + out++; + *out = (inl >> 4) % (1U << 7); + out++; + *out = (inl >> 11) % (1U << 7); + out++; + *out = (inl >> 18) % (1U << 7); + out++; + *out = (inl >> 25); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack8_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack9_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 9); + out++; + *out = (inl >> 9) % (1U << 9); + out++; + *out = (inl >> 18) % (1U << 9); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (9 - 4); + out++; + *out = (inl >> 4) % (1U << 9); + out++; + *out = (inl >> 13) % (1U << 9); + out++; + *out = (inl >> 22) % (1U << 9); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (9 - 8); + out++; + *out = (inl >> 8) % (1U << 9); + out++; + *out = (inl >> 17) % (1U << 9); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (9 - 3); + out++; + *out = (inl >> 3) % (1U << 9); + out++; + *out = (inl >> 12) % (1U << 9); + out++; + *out = (inl >> 21) % (1U << 9); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (9 - 7); + out++; + *out = (inl >> 7) % (1U << 9); + out++; + *out = (inl >> 16) % (1U << 9); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (9 - 2); + out++; + *out = (inl >> 2) % (1U << 9); + out++; + *out = (inl >> 11) % (1U << 9); + out++; + *out = (inl >> 20) % (1U << 9); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (9 - 6); + out++; + *out = (inl >> 6) % (1U << 9); + out++; + *out = (inl >> 15) % (1U << 9); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (9 - 1); + out++; + *out = (inl >> 1) % (1U << 9); + out++; + *out = (inl >> 10) % (1U << 9); + out++; + *out = (inl >> 19) % (1U << 9); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (9 - 5); + out++; + *out = (inl >> 5) % (1U << 9); + out++; + *out = (inl >> 14) % (1U << 9); + out++; + *out = (inl >> 23); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack10_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 10); + out++; + *out = (inl >> 10) % (1U << 10); + out++; + *out = (inl >> 20) % (1U << 10); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (10 - 8); + out++; + *out = (inl >> 8) % (1U << 10); + out++; + *out = (inl >> 18) % (1U << 10); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (10 - 6); + out++; + *out = (inl >> 6) % (1U << 10); + out++; + *out = (inl >> 16) % (1U << 10); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (10 - 4); + out++; + *out = (inl >> 4) % (1U << 10); + out++; + *out = (inl >> 14) % (1U << 10); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (10 - 2); + out++; + *out = (inl >> 2) % (1U << 10); + out++; + *out = (inl >> 12) % (1U << 10); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 10); + out++; + *out = (inl >> 10) % (1U << 10); + out++; + *out = (inl >> 20) % (1U << 10); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (10 - 8); + out++; + *out = (inl >> 8) % (1U << 10); + out++; + *out = (inl >> 18) % (1U << 10); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (10 - 6); + out++; + *out = (inl >> 6) % (1U << 10); + out++; + *out = (inl >> 16) % (1U << 10); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (10 - 4); + out++; + *out = (inl >> 4) % (1U << 10); + out++; + *out = (inl >> 14) % (1U << 10); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (10 - 2); + out++; + *out = (inl >> 2) % (1U << 10); + out++; + *out = (inl >> 12) % (1U << 10); + out++; + *out = (inl >> 22); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack11_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 11); + out++; + *out = (inl >> 11) % (1U << 11); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (11 - 1); + out++; + *out = (inl >> 1) % (1U << 11); + out++; + *out = (inl >> 12) % (1U << 11); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (11 - 2); + out++; + *out = (inl >> 2) % (1U << 11); + out++; + *out = (inl >> 13) % (1U << 11); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (11 - 3); + out++; + *out = (inl >> 3) % (1U << 11); + out++; + *out = (inl >> 14) % (1U << 11); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (11 - 4); + out++; + *out = (inl >> 4) % (1U << 11); + out++; + *out = (inl >> 15) % (1U << 11); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (11 - 5); + out++; + *out = (inl >> 5) % (1U << 11); + out++; + *out = (inl >> 16) % (1U << 11); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (11 - 6); + out++; + *out = (inl >> 6) % (1U << 11); + out++; + *out = (inl >> 17) % (1U << 11); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (11 - 7); + out++; + *out = (inl >> 7) % (1U << 11); + out++; + *out = (inl >> 18) % (1U << 11); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (11 - 8); + out++; + *out = (inl >> 8) % (1U << 11); + out++; + *out = (inl >> 19) % (1U << 11); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (11 - 9); + out++; + *out = (inl >> 9) % (1U << 11); + out++; + *out = (inl >> 20) % (1U << 11); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (11 - 10); + out++; + *out = (inl >> 10) % (1U << 11); + out++; + *out = (inl >> 21); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack12_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack13_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 13); + out++; + *out = (inl >> 13) % (1U << 13); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (13 - 7); + out++; + *out = (inl >> 7) % (1U << 13); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (13 - 1); + out++; + *out = (inl >> 1) % (1U << 13); + out++; + *out = (inl >> 14) % (1U << 13); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (13 - 8); + out++; + *out = (inl >> 8) % (1U << 13); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (13 - 2); + out++; + *out = (inl >> 2) % (1U << 13); + out++; + *out = (inl >> 15) % (1U << 13); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (13 - 9); + out++; + *out = (inl >> 9) % (1U << 13); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (13 - 3); + out++; + *out = (inl >> 3) % (1U << 13); + out++; + *out = (inl >> 16) % (1U << 13); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (13 - 10); + out++; + *out = (inl >> 10) % (1U << 13); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (13 - 4); + out++; + *out = (inl >> 4) % (1U << 13); + out++; + *out = (inl >> 17) % (1U << 13); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (13 - 11); + out++; + *out = (inl >> 11) % (1U << 13); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (13 - 5); + out++; + *out = (inl >> 5) % (1U << 13); + out++; + *out = (inl >> 18) % (1U << 13); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (13 - 12); + out++; + *out = (inl >> 12) % (1U << 13); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (13 - 6); + out++; + *out = (inl >> 6) % (1U << 13); + out++; + *out = (inl >> 19); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack14_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 14); + out++; + *out = (inl >> 14) % (1U << 14); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (14 - 10); + out++; + *out = (inl >> 10) % (1U << 14); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (14 - 6); + out++; + *out = (inl >> 6) % (1U << 14); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (14 - 2); + out++; + *out = (inl >> 2) % (1U << 14); + out++; + *out = (inl >> 16) % (1U << 14); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (14 - 12); + out++; + *out = (inl >> 12) % (1U << 14); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (14 - 8); + out++; + *out = (inl >> 8) % (1U << 14); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (14 - 4); + out++; + *out = (inl >> 4) % (1U << 14); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 14); + out++; + *out = (inl >> 14) % (1U << 14); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (14 - 10); + out++; + *out = (inl >> 10) % (1U << 14); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (14 - 6); + out++; + *out = (inl >> 6) % (1U << 14); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (14 - 2); + out++; + *out = (inl >> 2) % (1U << 14); + out++; + *out = (inl >> 16) % (1U << 14); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (14 - 12); + out++; + *out = (inl >> 12) % (1U << 14); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (14 - 8); + out++; + *out = (inl >> 8) % (1U << 14); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (14 - 4); + out++; + *out = (inl >> 4) % (1U << 14); + out++; + *out = (inl >> 18); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack15_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 15); + out++; + *out = (inl >> 15) % (1U << 15); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (15 - 13); + out++; + *out = (inl >> 13) % (1U << 15); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (15 - 11); + out++; + *out = (inl >> 11) % (1U << 15); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (15 - 9); + out++; + *out = (inl >> 9) % (1U << 15); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (15 - 7); + out++; + *out = (inl >> 7) % (1U << 15); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (15 - 5); + out++; + *out = (inl >> 5) % (1U << 15); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (15 - 3); + out++; + *out = (inl >> 3) % (1U << 15); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (15 - 1); + out++; + *out = (inl >> 1) % (1U << 15); + out++; + *out = (inl >> 16) % (1U << 15); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (15 - 14); + out++; + *out = (inl >> 14) % (1U << 15); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (15 - 12); + out++; + *out = (inl >> 12) % (1U << 15); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (15 - 10); + out++; + *out = (inl >> 10) % (1U << 15); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (15 - 8); + out++; + *out = (inl >> 8) % (1U << 15); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (15 - 6); + out++; + *out = (inl >> 6) % (1U << 15); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (15 - 4); + out++; + *out = (inl >> 4) % (1U << 15); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (15 - 2); + out++; + *out = (inl >> 2) % (1U << 15); + out++; + *out = (inl >> 17); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack16_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack17_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (17 - 2); + out++; + *out = (inl >> 2) % (1U << 17); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (17 - 4); + out++; + *out = (inl >> 4) % (1U << 17); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (17 - 6); + out++; + *out = (inl >> 6) % (1U << 17); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (17 - 8); + out++; + *out = (inl >> 8) % (1U << 17); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (17 - 10); + out++; + *out = (inl >> 10) % (1U << 17); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (17 - 12); + out++; + *out = (inl >> 12) % (1U << 17); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (17 - 14); + out++; + *out = (inl >> 14) % (1U << 17); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (17 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (17 - 1); + out++; + *out = (inl >> 1) % (1U << 17); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (17 - 3); + out++; + *out = (inl >> 3) % (1U << 17); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (17 - 5); + out++; + *out = (inl >> 5) % (1U << 17); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (17 - 7); + out++; + *out = (inl >> 7) % (1U << 17); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (17 - 9); + out++; + *out = (inl >> 9) % (1U << 17); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (17 - 11); + out++; + *out = (inl >> 11) % (1U << 17); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (17 - 13); + out++; + *out = (inl >> 13) % (1U << 17); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (17 - 15); + out++; + *out = (inl >> 15); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack18_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (18 - 4); + out++; + *out = (inl >> 4) % (1U << 18); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (18 - 8); + out++; + *out = (inl >> 8) % (1U << 18); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (18 - 12); + out++; + *out = (inl >> 12) % (1U << 18); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (18 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (18 - 2); + out++; + *out = (inl >> 2) % (1U << 18); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (18 - 6); + out++; + *out = (inl >> 6) % (1U << 18); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (18 - 10); + out++; + *out = (inl >> 10) % (1U << 18); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (18 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (18 - 4); + out++; + *out = (inl >> 4) % (1U << 18); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (18 - 8); + out++; + *out = (inl >> 8) % (1U << 18); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (18 - 12); + out++; + *out = (inl >> 12) % (1U << 18); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (18 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (18 - 2); + out++; + *out = (inl >> 2) % (1U << 18); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (18 - 6); + out++; + *out = (inl >> 6) % (1U << 18); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (18 - 10); + out++; + *out = (inl >> 10) % (1U << 18); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (18 - 14); + out++; + *out = (inl >> 14); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack19_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (19 - 6); + out++; + *out = (inl >> 6) % (1U << 19); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (19 - 12); + out++; + *out = (inl >> 12) % (1U << 19); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (19 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (19 - 5); + out++; + *out = (inl >> 5) % (1U << 19); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (19 - 11); + out++; + *out = (inl >> 11) % (1U << 19); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (19 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (19 - 4); + out++; + *out = (inl >> 4) % (1U << 19); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (19 - 10); + out++; + *out = (inl >> 10) % (1U << 19); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (19 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (19 - 3); + out++; + *out = (inl >> 3) % (1U << 19); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (19 - 9); + out++; + *out = (inl >> 9) % (1U << 19); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (19 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (19 - 2); + out++; + *out = (inl >> 2) % (1U << 19); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (19 - 8); + out++; + *out = (inl >> 8) % (1U << 19); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (19 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (19 - 1); + out++; + *out = (inl >> 1) % (1U << 19); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (19 - 7); + out++; + *out = (inl >> 7) % (1U << 19); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (19 - 13); + out++; + *out = (inl >> 13); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack20_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack21_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (21 - 10); + out++; + *out = (inl >> 10) % (1U << 21); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (21 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (21 - 9); + out++; + *out = (inl >> 9) % (1U << 21); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (21 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (21 - 8); + out++; + *out = (inl >> 8) % (1U << 21); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (21 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (21 - 7); + out++; + *out = (inl >> 7) % (1U << 21); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (21 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (21 - 6); + out++; + *out = (inl >> 6) % (1U << 21); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (21 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (21 - 5); + out++; + *out = (inl >> 5) % (1U << 21); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (21 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (21 - 4); + out++; + *out = (inl >> 4) % (1U << 21); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (21 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (21 - 3); + out++; + *out = (inl >> 3) % (1U << 21); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (21 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (21 - 2); + out++; + *out = (inl >> 2) % (1U << 21); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (21 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (21 - 1); + out++; + *out = (inl >> 1) % (1U << 21); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (21 - 11); + out++; + *out = (inl >> 11); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack22_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (22 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (22 - 2); + out++; + *out = (inl >> 2) % (1U << 22); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (22 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (22 - 4); + out++; + *out = (inl >> 4) % (1U << 22); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (22 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (22 - 6); + out++; + *out = (inl >> 6) % (1U << 22); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (22 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (22 - 8); + out++; + *out = (inl >> 8) % (1U << 22); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (22 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (22 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (22 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (22 - 2); + out++; + *out = (inl >> 2) % (1U << 22); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (22 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (22 - 4); + out++; + *out = (inl >> 4) % (1U << 22); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (22 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (22 - 6); + out++; + *out = (inl >> 6) % (1U << 22); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (22 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (22 - 8); + out++; + *out = (inl >> 8) % (1U << 22); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (22 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (22 - 10); + out++; + *out = (inl >> 10); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack23_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (23 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (23 - 5); + out++; + *out = (inl >> 5) % (1U << 23); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (23 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (23 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (23 - 1); + out++; + *out = (inl >> 1) % (1U << 23); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (23 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (23 - 6); + out++; + *out = (inl >> 6) % (1U << 23); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (23 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (23 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (23 - 2); + out++; + *out = (inl >> 2) % (1U << 23); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (23 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (23 - 7); + out++; + *out = (inl >> 7) % (1U << 23); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (23 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (23 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (23 - 3); + out++; + *out = (inl >> 3) % (1U << 23); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (23 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (23 - 8); + out++; + *out = (inl >> 8) % (1U << 23); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (23 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (23 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (23 - 4); + out++; + *out = (inl >> 4) % (1U << 23); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (23 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (23 - 9); + out++; + *out = (inl >> 9); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack24_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack25_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (25 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (25 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (25 - 4); + out++; + *out = (inl >> 4) % (1U << 25); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (25 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (25 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (25 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (25 - 1); + out++; + *out = (inl >> 1) % (1U << 25); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (25 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (25 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (25 - 5); + out++; + *out = (inl >> 5) % (1U << 25); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (25 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (25 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (25 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (25 - 2); + out++; + *out = (inl >> 2) % (1U << 25); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (25 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (25 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (25 - 6); + out++; + *out = (inl >> 6) % (1U << 25); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (25 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (25 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (25 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (25 - 3); + out++; + *out = (inl >> 3) % (1U << 25); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (25 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (25 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (25 - 7); + out++; + *out = (inl >> 7); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack26_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (26 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (26 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (26 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (26 - 2); + out++; + *out = (inl >> 2) % (1U << 26); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (26 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (26 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (26 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (26 - 4); + out++; + *out = (inl >> 4) % (1U << 26); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (26 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (26 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (26 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (26 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (26 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (26 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (26 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (26 - 2); + out++; + *out = (inl >> 2) % (1U << 26); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (26 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (26 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (26 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (26 - 4); + out++; + *out = (inl >> 4) % (1U << 26); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (26 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (26 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (26 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (26 - 6); + out++; + *out = (inl >> 6); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack27_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (27 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (27 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (27 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (27 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (27 - 2); + out++; + *out = (inl >> 2) % (1U << 27); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (27 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (27 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (27 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (27 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (27 - 4); + out++; + *out = (inl >> 4) % (1U << 27); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (27 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (27 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (27 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (27 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (27 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (27 - 1); + out++; + *out = (inl >> 1) % (1U << 27); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (27 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (27 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (27 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (27 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (27 - 3); + out++; + *out = (inl >> 3) % (1U << 27); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (27 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (27 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (27 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (27 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (27 - 5); + out++; + *out = (inl >> 5); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack28_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack29_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 29); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (29 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (29 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (29 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (29 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (29 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (29 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (29 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (29 - 5); + out++; + *out = (inl >> 5); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (29 - 2); + out++; + *out = (inl >> 2) % (1U << 29); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (29 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (29 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (29 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (29 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (29 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (29 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (29 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (29 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (29 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (29 - 1); + out++; + *out = (inl >> 1) % (1U << 29); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 27)) << (29 - 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (29 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (29 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (29 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (29 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (29 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (29 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (29 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (29 - 3); + out++; + *out = (inl >> 3); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack30_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (30 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (30 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (30 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (30 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (30 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (30 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (30 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (30 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (30 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (30 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (30 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (30 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (30 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (30 - 2); + out++; + *out = (inl >> 2); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (30 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (30 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (30 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (30 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (30 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (30 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (30 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (30 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (30 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (30 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (30 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (30 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (30 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (30 - 2); + out++; + *out = (inl >> 2); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack31_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 31); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 30)) << (31 - 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 29)) << (31 - 29); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (31 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 27)) << (31 - 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (31 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (31 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (31 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (31 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (31 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (31 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (31 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (31 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (31 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (31 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (31 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (31 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (31 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (31 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (31 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (31 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (31 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (31 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (31 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (31 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (31 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (31 - 5); + out++; + *out = (inl >> 5); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (31 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (31 - 3); + out++; + *out = (inl >> 3); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (31 - 2); + out++; + *out = (inl >> 2); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (31 - 1); + out++; + *out = (inl >> 1); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack32_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + out++; + + return in; +} + +inline const uint32_t* nullunpacker32(const uint32_t* in, uint32_t* out) { + for (int k = 0; k < 32; ++k) { + out[k] = 0; + } + return in; +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..9d02cd568acbc9661f763259e1d4ed134f609e4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_neon(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/cancel.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/cancel.h new file mode 100644 index 0000000000000000000000000000000000000000..f0d704b2ce08644064b627639ed536dac21bcd71 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/cancel.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class StopToken; + +struct StopSourceImpl; + +/// EXPERIMENTAL +class ARROW_EXPORT StopSource { + public: + StopSource(); + ~StopSource(); + + // Consumer API (the side that stops) + void RequestStop(); + void RequestStop(Status error); + // Async-signal-safe. TODO Deprecate this? + void RequestStopFromSignal(int signum); + + StopToken token(); + + // For internal use only + void Reset(); + + protected: + std::shared_ptr impl_; +}; + +/// EXPERIMENTAL +class ARROW_EXPORT StopToken { + public: + // Public for Cython + StopToken() {} + + explicit StopToken(std::shared_ptr impl) : impl_(std::move(impl)) {} + + // A trivial token that never propagates any stop request + static StopToken Unstoppable() { return StopToken(); } + + /// \brief Check if the stop source has been cancelled. + /// + /// Producers should call this method, whenever convenient, to check and + /// see if they should stop producing early (i.e. have been cancelled). + /// Failure to call this method often enough will lead to an unresponsive + /// cancellation. + /// + /// This is part of the producer API (the side that gets asked to stop) + /// This method is thread-safe + /// + /// \return An OK status if the stop source has not been cancelled or a + /// cancel error if the source has been cancelled. + Status Poll() const; + bool IsStopRequested() const; + + protected: + std::shared_ptr impl_; +}; + +/// EXPERIMENTAL: Set a global StopSource that can receive signals +/// +/// The only allowed order of calls is the following: +/// - SetSignalStopSource() +/// - any number of pairs of (RegisterCancellingSignalHandler, +/// UnregisterCancellingSignalHandler) calls +/// - ResetSignalStopSource() +/// +/// Beware that these settings are process-wide. Typically, only one +/// thread should call these APIs, even in a multithreaded setting. +ARROW_EXPORT +Result SetSignalStopSource(); + +/// EXPERIMENTAL: Reset the global signal-receiving StopSource +/// +/// This will invalidate the pointer returned by SetSignalStopSource. +ARROW_EXPORT +void ResetSignalStopSource(); + +/// EXPERIMENTAL: Register signal handler triggering the signal-receiving StopSource +/// +/// Note that those handlers are automatically un-registered in a fork()ed process, +/// therefore the child process will need to call RegisterCancellingSignalHandler() +/// if desired. +ARROW_EXPORT +Status RegisterCancellingSignalHandler(const std::vector& signals); + +/// EXPERIMENTAL: Unregister signal handler set up by RegisterCancellingSignalHandler +ARROW_EXPORT +void UnregisterCancellingSignalHandler(); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/compare.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/compare.h new file mode 100644 index 0000000000000000000000000000000000000000..0594b6002ff573afcb420b260c921a78277c9daf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/compare.h @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +/// CRTP helper for declaring equality comparison. Defines operator== and operator!= +template +class EqualityComparable { + public: + ~EqualityComparable() { + static_assert( + std::is_same().Equals(std::declval())), + bool>::value, + "EqualityComparable depends on the method T::Equals(const T&) const"); + } + + template + bool Equals(const std::shared_ptr& other, Extra&&... extra) const { + if (other == NULLPTR) { + return false; + } + return cast().Equals(*other, std::forward(extra)...); + } + + struct PtrsEqual { + bool operator()(const std::shared_ptr& l, const std::shared_ptr& r) const { + return l->Equals(*r); + } + }; + + friend bool operator==(T const& a, T const& b) { return a.Equals(b); } + friend bool operator!=(T const& a, T const& b) { return !(a == b); } + + private: + const T& cast() const { return static_cast(*this); } +}; + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bf4d5e12d02d349c3a0e0fce43f6be5ef4d585 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +constexpr int kUseDefaultCompressionLevel = std::numeric_limits::min(); + +/// \brief Streaming compressor interface +/// +class ARROW_EXPORT Compressor { + public: + virtual ~Compressor() = default; + + struct CompressResult { + int64_t bytes_read; + int64_t bytes_written; + }; + struct FlushResult { + int64_t bytes_written; + bool should_retry; + }; + struct EndResult { + int64_t bytes_written; + bool should_retry; + }; + + /// \brief Compress some input. + /// + /// If bytes_read is 0 on return, then a larger output buffer should be supplied. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Flush part of the compressed output. + /// + /// If should_retry is true on return, Flush() should be called again + /// with a larger buffer. + virtual Result Flush(int64_t output_len, uint8_t* output) = 0; + + /// \brief End compressing, doing whatever is necessary to end the stream. + /// + /// If should_retry is true on return, End() should be called again + /// with a larger buffer. Otherwise, the Compressor should not be used anymore. + /// + /// End() implies Flush(). + virtual Result End(int64_t output_len, uint8_t* output) = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Streaming decompressor interface +/// +class ARROW_EXPORT Decompressor { + public: + virtual ~Decompressor() = default; + + struct DecompressResult { + // XXX is need_more_output necessary? (Brotli?) + int64_t bytes_read; + int64_t bytes_written; + bool need_more_output; + }; + + /// \brief Decompress some input. + /// + /// If need_more_output is true on return, a larger output buffer needs + /// to be supplied. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Return whether the compressed stream is finished. + /// + /// This is a heuristic. If true is returned, then it is guaranteed + /// that the stream is finished. If false is returned, however, it may + /// simply be that the underlying library isn't able to provide the information. + virtual bool IsFinished() = 0; + + /// \brief Reinitialize decompressor, making it ready for a new compressed stream. + virtual Status Reset() = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Compression codec options +class ARROW_EXPORT CodecOptions { + public: + explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel) + : compression_level(compression_level) {} + + virtual ~CodecOptions() = default; + + int compression_level; +}; + +// ---------------------------------------------------------------------- +// GZip codec options implementation + +enum class GZipFormat { + ZLIB, + DEFLATE, + GZIP, +}; + +class ARROW_EXPORT GZipCodecOptions : public CodecOptions { + public: + GZipFormat gzip_format = GZipFormat::GZIP; + std::optional window_bits; +}; + +// ---------------------------------------------------------------------- +// brotli codec options implementation + +class ARROW_EXPORT BrotliCodecOptions : public CodecOptions { + public: + std::optional window_bits; +}; + +/// \brief Compression codec +class ARROW_EXPORT Codec { + public: + virtual ~Codec() = default; + + /// \brief Return special value to indicate that a codec implementation + /// should use its default compression level + static int UseDefaultCompressionLevel(); + + /// \brief Return a string name for compression type + static const std::string& GetCodecAsString(Compression::type t); + + /// \brief Return compression type for name (all lower case) + static Result GetCompressionType(const std::string& name); + + /// \brief Create a codec for the given compression algorithm with CodecOptions + static Result> Create( + Compression::type codec, const CodecOptions& codec_options = CodecOptions{}); + + /// \brief Create a codec for the given compression algorithm + static Result> Create(Compression::type codec, + int compression_level); + + /// \brief Return true if support for indicated codec has been enabled + static bool IsAvailable(Compression::type codec); + + /// \brief Return true if indicated codec supports setting a compression level + static bool SupportsCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MinimumCompressionLevel(Compression::type codec); + + /// \brief Return the largest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MaximumCompressionLevel(Compression::type codec); + + /// \brief Return the default compression level + /// Note: This function creates a temporary Codec instance + static Result DefaultCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level + virtual int minimum_compression_level() const = 0; + + /// \brief Return the largest supported compression level + virtual int maximum_compression_level() const = 0; + + /// \brief Return the default compression level + virtual int default_compression_level() const = 0; + + /// \brief One-shot decompression function + /// + /// output_buffer_len must be correct and therefore be obtained in advance. + /// The actual decompressed length is returned. + /// + /// \note One-shot decompression is not always compatible with streaming + /// compression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) = 0; + + /// \brief One-shot compression function + /// + /// output_buffer_len must first have been computed using MaxCompressedLen(). + /// The actual compressed length is returned. + /// + /// \note One-shot compression is not always compatible with streaming + /// decompression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, uint8_t* output_buffer) = 0; + + virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeCompressor() = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeDecompressor() = 0; + + /// \brief This Codec's compression type + virtual Compression::type compression_type() const = 0; + + /// \brief The name of this Codec's compression type + const std::string& name() const { return GetCodecAsString(compression_type()); } + + /// \brief This Codec's compression level, if applicable + virtual int compression_level() const { return UseDefaultCompressionLevel(); } + + private: + /// \brief Initializes the codec's resources. + virtual Status Init(); +}; + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h new file mode 100644 index 0000000000000000000000000000000000000000..ff1584552a8ffc77fa518002bd285795ec0d1408 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/mutex.h" + +namespace arrow { +namespace util { + +template +class ConcurrentMap { + public: + void Insert(const K& key, const V& value) { + auto lock = mutex_.Lock(); + map_.insert({key, value}); + } + + template + V GetOrInsert(const K& key, ValueFunc&& compute_value_func) { + auto lock = mutex_.Lock(); + auto it = map_.find(key); + if (it == map_.end()) { + auto pair = map_.emplace(key, compute_value_func()); + it = pair.first; + } + return it->second; + } + + void Erase(const K& key) { + auto lock = mutex_.Lock(); + map_.erase(key); + } + + void Clear() { + auto lock = mutex_.Lock(); + map_.clear(); + } + + size_t size() const { + auto lock = mutex_.Lock(); + return map_.size(); + } + + private: + std::unordered_map map_; + mutable arrow::util::Mutex mutex_; +}; + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h new file mode 100644 index 0000000000000000000000000000000000000000..25572245954a9a9cd5fe9c340b68e1b8e544124a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#define ARROW_VERSION_MAJOR 16 +#define ARROW_VERSION_MINOR 0 +#define ARROW_VERSION_PATCH 0 +#define ARROW_VERSION ((ARROW_VERSION_MAJOR * 1000) + ARROW_VERSION_MINOR) * 1000 + ARROW_VERSION_PATCH + +#define ARROW_VERSION_STRING "16.0.0" + +#define ARROW_SO_VERSION "1600" +#define ARROW_FULL_SO_VERSION "1600.0.0" + +#define ARROW_CXX_COMPILER_ID "GNU" +#define ARROW_CXX_COMPILER_VERSION "12.2.1" +#define ARROW_CXX_COMPILER_FLAGS " -fdiagnostics-color=always" + +#define ARROW_BUILD_TYPE "RELEASE" + +#define ARROW_GIT_ID "" +#define ARROW_GIT_DESCRIPTION "" + +#define ARROW_PACKAGE_KIND "python-wheel-manylinux228" + +#define ARROW_COMPUTE +#define ARROW_CSV +/* #undef ARROW_CUDA */ +#define ARROW_DATASET +#define ARROW_FILESYSTEM +#define ARROW_FLIGHT +/* #undef ARROW_FLIGHT_SQL */ +#define ARROW_IPC +#define ARROW_JEMALLOC +#define ARROW_JEMALLOC_VENDORED +#define ARROW_JSON +#define ARROW_MIMALLOC +#define ARROW_ORC +#define ARROW_PARQUET +#define ARROW_SUBSTRAIT + +#define ARROW_AZURE +#define ARROW_ENABLE_THREADING +#define ARROW_GCS +#define ARROW_HDFS +#define ARROW_S3 +/* #undef ARROW_USE_GLOG */ +#define ARROW_USE_NATIVE_INT128 +#define ARROW_WITH_BROTLI +#define ARROW_WITH_BZ2 +#define ARROW_WITH_LZ4 +/* #undef ARROW_WITH_MUSL */ +/* #undef ARROW_WITH_OPENTELEMETRY */ +#define ARROW_WITH_RE2 +#define ARROW_WITH_SNAPPY +/* #undef ARROW_WITH_UCX */ +#define ARROW_WITH_UTF8PROC +#define ARROW_WITH_ZLIB +#define ARROW_WITH_ZSTD +#define PARQUET_REQUIRE_ENCRYPTION diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..c23d6ccd9886e4539d52d537abb85da1dcc93385 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h @@ -0,0 +1,411 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/chunked_array.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { +namespace internal { + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool); + +template +class Converter { + public: + using Self = Converter; + using InputType = Input; + using OptionsType = Options; + + virtual ~Converter() = default; + + Status Construct(std::shared_ptr type, OptionsType options, + MemoryPool* pool) { + type_ = std::move(type); + options_ = std::move(options); + return Init(pool); + } + + virtual Status Append(InputType value) { return Status::NotImplemented("Append"); } + + virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) { + return Status::NotImplemented("Extend"); + } + + virtual Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + return Status::NotImplemented("ExtendMasked"); + } + + const std::shared_ptr& builder() const { return builder_; } + + const std::shared_ptr& type() const { return type_; } + + OptionsType options() const { return options_; } + + bool may_overflow() const { return may_overflow_; } + + bool rewind_on_overflow() const { return rewind_on_overflow_; } + + virtual Status Reserve(int64_t additional_capacity) { + return builder_->Reserve(additional_capacity); + } + + Status AppendNull() { return builder_->AppendNull(); } + + virtual Result> ToArray() { return builder_->Finish(); } + + virtual Result> ToArray(int64_t length) { + ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray()); + return arr->Slice(0, length); + } + + virtual Result> ToChunkedArray() { + ARROW_ASSIGN_OR_RAISE(auto array, ToArray()); + std::vector> chunks = {std::move(array)}; + return std::make_shared(chunks); + } + + protected: + virtual Status Init(MemoryPool* pool) { return Status::OK(); } + + std::shared_ptr type_; + std::shared_ptr builder_; + OptionsType options_; + bool may_overflow_ = false; + bool rewind_on_overflow_ = false; +}; + +template +class PrimitiveConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + + protected: + Status Init(MemoryPool* pool) override { + this->builder_ = std::make_shared(this->type_, pool); + // Narrow variable-sized binary types may overflow + this->may_overflow_ = is_binary_like(this->type_->id()); + primitive_type_ = checked_cast(this->type_.get()); + primitive_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const ArrowType* primitive_type_; + BuilderType* primitive_builder_; +}; + +template class ConverterTrait> +class ListConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + using ConverterType = typename ConverterTrait::type; + + protected: + Status Init(MemoryPool* pool) override { + list_type_ = checked_cast(this->type_.get()); + ARROW_ASSIGN_OR_RAISE(value_converter_, + (MakeConverter( + list_type_->value_type(), this->options_, pool))); + this->builder_ = + std::make_shared(pool, value_converter_->builder(), this->type_); + list_builder_ = checked_cast(this->builder_.get()); + // Narrow list types may overflow + this->may_overflow_ = this->rewind_on_overflow_ = + sizeof(typename ArrowType::offset_type) < sizeof(int64_t); + return Status::OK(); + } + + const ArrowType* list_type_; + BuilderType* list_builder_; + std::unique_ptr value_converter_; +}; + +template class ConverterTrait> +class StructConverter : public BaseConverter { + public: + using ConverterType = typename ConverterTrait::type; + + Status Reserve(int64_t additional_capacity) override { + ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity)); + for (const auto& child : children_) { + ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity)); + } + return Status::OK(); + } + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr child_converter; + std::vector> child_builders; + + struct_type_ = checked_cast(this->type_.get()); + for (const auto& field : struct_type_->fields()) { + ARROW_ASSIGN_OR_RAISE(child_converter, + (MakeConverter( + field->type(), this->options_, pool))); + this->may_overflow_ |= child_converter->may_overflow(); + this->rewind_on_overflow_ = this->may_overflow_; + child_builders.push_back(child_converter->builder()); + children_.push_back(std::move(child_converter)); + } + + this->builder_ = + std::make_shared(this->type_, pool, std::move(child_builders)); + struct_builder_ = checked_cast(this->builder_.get()); + + return Status::OK(); + } + + const StructType* struct_type_; + StructBuilder* struct_builder_; + std::vector> children_; +}; + +template +class DictionaryConverter : public BaseConverter { + public: + using BuilderType = DictionaryBuilder; + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr builder; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder)); + this->builder_ = std::move(builder); + this->may_overflow_ = false; + dict_type_ = checked_cast(this->type_.get()); + value_type_ = checked_cast(dict_type_->value_type().get()); + value_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const DictionaryType* dict_type_; + const ValueType* value_type_; + BuilderType* value_builder_; +}; + +template class ConverterTrait> +struct MakeConverterImpl { + template ::type> + Status Visit(const T&) { + out.reset(new ConverterType()); + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DictionaryType& t) { + switch (t.value_type()->id()) { +#define DICTIONARY_CASE(TYPE) \ + case TYPE::type_id: \ + out = std::make_unique< \ + typename ConverterTrait::template dictionary_type>(); \ + break; + DICTIONARY_CASE(BooleanType); + DICTIONARY_CASE(Int8Type); + DICTIONARY_CASE(Int16Type); + DICTIONARY_CASE(Int32Type); + DICTIONARY_CASE(Int64Type); + DICTIONARY_CASE(UInt8Type); + DICTIONARY_CASE(UInt16Type); + DICTIONARY_CASE(UInt32Type); + DICTIONARY_CASE(UInt64Type); + DICTIONARY_CASE(FloatType); + DICTIONARY_CASE(DoubleType); + DICTIONARY_CASE(BinaryType); + DICTIONARY_CASE(StringType); + DICTIONARY_CASE(FixedSizeBinaryType); +#undef DICTIONARY_CASE + default: + return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(), + " not implemented"); + } + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); } + + std::shared_ptr type; + typename BaseConverter::OptionsType options; + MemoryPool* pool; + std::unique_ptr out; +}; + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool) { + MakeConverterImpl visitor{ + std::move(type), std::move(options), pool, NULLPTR}; + ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor)); + return std::move(visitor.out); +} + +template +class Chunker { + public: + using InputType = typename Converter::InputType; + + explicit Chunker(std::unique_ptr converter) + : converter_(std::move(converter)) {} + + Status Reserve(int64_t additional_capacity) { + ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity)); + reserved_ += additional_capacity; + return Status::OK(); + } + + Status AppendNull() { + auto status = converter_->AppendNull(); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return converter_->AppendNull(); + } + ++length_; + return status; + } + + Status Append(InputType value) { + auto status = converter_->Append(value); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return Append(value); + } + ++length_; + return status; + } + + Status Extend(InputType values, int64_t size, int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->Extend(values, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->ExtendMasked(values, mask, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status FinishChunk() { + ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_)); + chunks_.push_back(chunk); + // Reserve space for the remaining items. + // Besides being an optimization, it is also required if the converter's + // implementation relies on unsafe builder methods in converter->Append(). + auto remaining = reserved_ - length_; + Reset(); + return Reserve(remaining); + } + + Result> ToChunkedArray() { + ARROW_RETURN_NOT_OK(FinishChunk()); + return std::make_shared(chunks_); + } + + protected: + void Reset() { + converter_->builder()->Reset(); + length_ = 0; + reserved_ = 0; + } + + int64_t length_ = 0; + int64_t reserved_ = 0; + std::unique_ptr converter_; + std::vector> chunks_; +}; + +template +static Result>> MakeChunker(std::unique_ptr converter) { + return std::make_unique>(std::move(converter)); +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h new file mode 100644 index 0000000000000000000000000000000000000000..a3c13cc3bea4d6be639b521051021f7cb1c07f14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef ARROW_COUNTING_SEMAPHORE_H +#define ARROW_COUNTING_SEMAPHORE_H + +#include + +#include "arrow/status.h" + +namespace arrow { +namespace util { + +/// \brief Simple mutex-based counting semaphore with timeout +class ARROW_EXPORT CountingSemaphore { + public: + /// \brief Create an instance with initial_avail starting permits + /// + /// \param[in] initial_avail The semaphore will start with this many permits available + /// \param[in] timeout_seconds A timeout to be applied to all operations. Operations + /// will return Status::Invalid if this timeout elapses + explicit CountingSemaphore(uint32_t initial_avail = 0, double timeout_seconds = 10); + ~CountingSemaphore(); + /// \brief Block until num_permits permits are available + Status Acquire(uint32_t num_permits); + /// \brief Make num_permits permits available + Status Release(uint32_t num_permits); + /// \brief Wait until num_waiters are waiting on permits + /// + /// This method is non-standard but useful in unit tests to ensure sequencing + Status WaitForWaiters(uint32_t num_waiters); + /// \brief Immediately time out any waiters + /// + /// This method will return Status::OK only if there were no waiters to time out. + /// Once closed any operation on this instance will return an invalid status. + Status Close(); + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace util +} // namespace arrow + +#endif // ARROW_COUNTING_SEMAPHORE_H diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..ed38a4dcf7ab87aad4db906dd8b6abc058387f8e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +ARROW_EXPORT +void DebugTrap(); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..345c74d95b1015474cbfc7c2dd932df45107d593 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/decimal.h @@ -0,0 +1,298 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/basic_decimal.h" + +namespace arrow { + +/// Represents a signed 128-bit integer in two's complement. +/// Calculations wrap around and overflow is ignored. +/// The max decimal precision that can be safely represented is +/// 38 significant digits. +/// +/// For a discussion of the algorithms, look at Knuth's volume 2, +/// Semi-numerical Algorithms section 4.3.1. +/// +/// Adapted from the Apache ORC C++ implementation +/// +/// The implementation is split into two parts : +/// +/// 1. BasicDecimal128 +/// - can be safely compiled to IR without references to libstdc++. +/// 2. Decimal128 +/// - has additional functionality on top of BasicDecimal128 to deal with +/// strings and streams. +class ARROW_EXPORT Decimal128 : public BasicDecimal128 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in Sphinx) + using BasicDecimal128::BasicDecimal128; + /// \endcond + + /// \brief constructor creates a Decimal128 from a BasicDecimal128. + constexpr Decimal128(const BasicDecimal128& value) noexcept // NOLINT runtime/explicit + : BasicDecimal128(value) {} + + /// \brief Parse the number from a base 10 string representation. + explicit Decimal128(const std::string& value); + + /// \brief Empty constructor creates a Decimal128 with a value of 0. + // This is required on some older compilers. + constexpr Decimal128() noexcept : BasicDecimal128() {} + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal128& divisor) const { + std::pair result; + auto dstatus = BasicDecimal128::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return std::move(result); + } + + /// \brief Convert the Decimal128 value to a base 10 decimal string with the given + /// scale. + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Cast this value to an int64_t. + explicit operator int64_t() const; + + /// \brief Convert a decimal string to a Decimal128 value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal128* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 16. + /// \return error status if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + /// \brief Convert Decimal128 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal128 out; + auto dstatus = BasicDecimal128::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return std::move(out); + } + + /// \brief Convert to a signed integer + template > + Result ToInteger() const { + constexpr auto min_value = std::numeric_limits::min(); + constexpr auto max_value = std::numeric_limits::max(); + const auto& self = *this; + if (self < min_value || self > max_value) { + return Status::Invalid("Invalid cast from Decimal128 to ", sizeof(T), + " byte integer"); + } + return static_cast(low_bits()); + } + + /// \brief Convert to a signed integer + template > + Status ToInteger(T* out) const { + return ToInteger().Value(out); + } + + /// \brief Convert to a floating-point number (scaled) + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal128& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +/// Represents a signed 256-bit integer in two's complement. +/// The max decimal precision that can be safely represented is +/// 76 significant digits. +/// +/// The implementation is split into two parts : +/// +/// 1. BasicDecimal256 +/// - can be safely compiled to IR without references to libstdc++. +/// 2. Decimal256 +/// - (TODO) has additional functionality on top of BasicDecimal256 to deal with +/// strings and streams. +class ARROW_EXPORT Decimal256 : public BasicDecimal256 { + public: + /// \cond FALSE + // (need to avoid a duplicate definition in Sphinx) + using BasicDecimal256::BasicDecimal256; + /// \endcond + + /// \brief constructor creates a Decimal256 from a BasicDecimal256. + constexpr Decimal256(const BasicDecimal256& value) noexcept // NOLINT(runtime/explicit) + : BasicDecimal256(value) {} + + /// \brief Parse the number from a base 10 string representation. + explicit Decimal256(const std::string& value); + + /// \brief Empty constructor creates a Decimal256 with a value of 0. + // This is required on some older compilers. + constexpr Decimal256() noexcept : BasicDecimal256() {} + + /// \brief Convert the Decimal256 value to a base 10 decimal string with the given + /// scale. + std::string ToString(int32_t scale) const; + + /// \brief Convert the value to an integer string + std::string ToIntegerString() const; + + /// \brief Convert a decimal string to a Decimal256 value, optionally including + /// precision and scale if they're passed in and not null. + static Status FromString(std::string_view s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const std::string& s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Status FromString(const char* s, Decimal256* out, int32_t* precision, + int32_t* scale = NULLPTR); + static Result FromString(std::string_view s); + static Result FromString(const std::string& s); + static Result FromString(const char* s); + + /// \brief Convert Decimal256 from one scale to another + Result Rescale(int32_t original_scale, int32_t new_scale) const { + Decimal256 out; + auto dstatus = BasicDecimal256::Rescale(original_scale, new_scale, &out); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return std::move(out); + } + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \return the pair of the quotient and the remainder + Result> Divide(const Decimal256& divisor) const { + std::pair result; + auto dstatus = BasicDecimal256::Divide(divisor, &result.first, &result.second); + ARROW_RETURN_NOT_OK(ToArrowStatus(dstatus)); + return std::move(result); + } + + /// \brief Convert from a big-endian byte representation. The length must be + /// between 1 and 32. + /// \return error status if the length is an invalid value + static Result FromBigEndian(const uint8_t* data, int32_t length); + + static Result FromReal(double real, int32_t precision, int32_t scale); + static Result FromReal(float real, int32_t precision, int32_t scale); + + /// \brief Convert to a floating-point number (scaled). + /// May return infinity in case of overflow. + float ToFloat(int32_t scale) const; + /// \brief Convert to a floating-point number (scaled) + double ToDouble(int32_t scale) const; + + /// \brief Convert to a floating-point number (scaled) + template >> + T ToReal(int32_t scale) const { + static_assert(std::is_same_v || std::is_same_v, + "Unexpected floating-point type"); + if constexpr (std::is_same_v) { + return ToFloat(scale); + } else { + return ToDouble(scale); + } + } + + ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Decimal256& decimal); + + private: + /// Converts internal error code to Status + Status ToArrowStatus(DecimalStatus dstatus) const; +}; + +/// For an integer type, return the max number of decimal digits +/// (=minimal decimal precision) it can represent. +inline Result MaxDecimalDigitsForInteger(Type::type type_id) { + switch (type_id) { + case Type::INT8: + case Type::UINT8: + return 3; + case Type::INT16: + case Type::UINT16: + return 5; + case Type::INT32: + case Type::UINT32: + return 10; + case Type::INT64: + return 19; + case Type::UINT64: + return 20; + default: + break; + } + return Status::Invalid("Not an integer type: ", type_id); +} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..dd9af907ecc374e94138e0fec20e87739a271658 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h @@ -0,0 +1,656 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This is a private header for number-to-string formatting utilities + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/double_conversion.h" +#include "arrow/util/macros.h" +#include "arrow/util/string.h" +#include "arrow/util/time.h" +#include "arrow/util/visibility.h" +#include "arrow/vendored/datetime.h" + +namespace arrow { +namespace internal { + +/// \brief The entry point for conversion to strings. +template +class StringFormatter; + +template +struct is_formattable { + template ::value_type> + static std::true_type Test(U*); + + template + static std::false_type Test(...); + + static constexpr bool value = decltype(Test(NULLPTR))::value; +}; + +template +using enable_if_formattable = enable_if_t::value, R>; + +template +using Return = decltype(std::declval()(std::string_view{})); + +///////////////////////////////////////////////////////////////////////// +// Boolean formatting + +template <> +class StringFormatter { + public: + explicit StringFormatter(const DataType* = NULLPTR) {} + + using value_type = bool; + + template + Return operator()(bool value, Appender&& append) { + if (value) { + const char string[] = "true"; + return append(std::string_view(string)); + } else { + const char string[] = "false"; + return append(std::string_view(string)); + } + } +}; + +///////////////////////////////////////////////////////////////////////// +// Decimals formatting + +template +class DecimalToStringFormatterMixin { + public: + explicit DecimalToStringFormatterMixin(const DataType* type) + : scale_(static_cast(type)->scale()) {} + + using value_type = typename TypeTraits::CType; + + template + Return operator()(const value_type& value, Appender&& append) { + return append(value.ToString(scale_)); + } + + private: + int32_t scale_; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Integer formatting + +namespace detail { + +// A 2x100 direct table mapping integers in [0..99] to their decimal representations. +ARROW_EXPORT extern const char digit_pairs[]; + +// Based on fmtlib's format_int class: +// Write digits from right to left into a stack allocated buffer. +// \pre *cursor points to the byte after the one that will be written. +// \post *cursor points to the byte that was written. +inline void FormatOneChar(char c, char** cursor) { *(--(*cursor)) = c; } + +template +void FormatOneDigit(Int value, char** cursor) { + assert(value >= 0 && value <= 9); + FormatOneChar(static_cast('0' + value), cursor); +} + +// GH-35662: I don't know why but the following combination causes SEGV: +// * template implementation without inline +// * MinGW +// * Release build +template +inline void FormatTwoDigits(Int value, char** cursor) { + assert(value >= 0 && value <= 99); + auto digit_pair = &digit_pairs[value * 2]; + FormatOneChar(digit_pair[1], cursor); + FormatOneChar(digit_pair[0], cursor); +} + +template +void FormatAllDigits(Int value, char** cursor) { + assert(value >= 0); + while (value >= 100) { + FormatTwoDigits(value % 100, cursor); + value /= 100; + } + + if (value >= 10) { + FormatTwoDigits(value, cursor); + } else { + FormatOneDigit(value, cursor); + } +} + +template +void FormatAllDigitsLeftPadded(Int value, size_t pad, char pad_char, char** cursor) { + auto end = *cursor - pad; + FormatAllDigits(value, cursor); + while (*cursor > end) { + FormatOneChar(pad_char, cursor); + } +} + +template +std::string_view ViewDigitBuffer(const std::array& buffer, + char* cursor) { + auto buffer_end = buffer.data() + BUFFER_SIZE; + return {cursor, static_cast(buffer_end - cursor)}; +} + +template ::type> +constexpr UInt Abs(Int value) { + return value < 0 ? ~static_cast(value) + 1 : static_cast(value); +} + +template +constexpr size_t Digits10(Int value) { + return value <= 9 ? 1 : Digits10(value / 10) + 1; +} + +} // namespace detail + +template +class IntToStringFormatterMixin { + public: + explicit IntToStringFormatterMixin(const DataType* = NULLPTR) {} + + using value_type = typename ARROW_TYPE::c_type; + + template + Return operator()(value_type value, Appender&& append) { + constexpr size_t buffer_size = + detail::Digits10(std::numeric_limits::max()) + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + detail::FormatAllDigits(detail::Abs(value), &cursor); + if (value < 0) { + detail::FormatOneChar('-', &cursor); + } + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Floating-point formatting + +class ARROW_EXPORT FloatToStringFormatter { + public: + FloatToStringFormatter(); + FloatToStringFormatter(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode); + ~FloatToStringFormatter(); + + // Returns the number of characters written + int FormatFloat(float v, char* out_buffer, int out_size); + int FormatFloat(double v, char* out_buffer, int out_size); + int FormatFloat(uint16_t v, char* out_buffer, int out_size); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +template +class FloatToStringFormatterMixin : public FloatToStringFormatter { + public: + using value_type = typename ARROW_TYPE::c_type; + + static constexpr int buffer_size = 50; + + explicit FloatToStringFormatterMixin(const DataType* = NULLPTR) {} + + FloatToStringFormatterMixin(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode) + : FloatToStringFormatter(flags, inf_symbol, nan_symbol, exp_character, + decimal_in_shortest_low, decimal_in_shortest_high, + max_leading_padding_zeroes_in_precision_mode, + max_trailing_padding_zeroes_in_precision_mode) {} + + template + Return operator()(value_type value, Appender&& append) { + char buffer[buffer_size]; + int size = FormatFloat(value, buffer, buffer_size); + return append(std::string_view(buffer, size)); + } +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Temporal formatting + +namespace detail { + +constexpr size_t BufferSizeYYYY_MM_DD() { + // "-"? "99999-12-31" + return 1 + detail::Digits10(99999) + 1 + detail::Digits10(12) + 1 + + detail::Digits10(31); +} + +inline void FormatYYYY_MM_DD(arrow_vendored::date::year_month_day ymd, char** cursor) { + FormatTwoDigits(static_cast(ymd.day()), cursor); + FormatOneChar('-', cursor); + FormatTwoDigits(static_cast(ymd.month()), cursor); + FormatOneChar('-', cursor); + auto year = static_cast(ymd.year()); + const auto is_neg_year = year < 0; + year = std::abs(year); + assert(year <= 99999); + FormatTwoDigits(year % 100, cursor); + year /= 100; + FormatTwoDigits(year % 100, cursor); + if (year >= 100) { + FormatOneDigit(year / 100, cursor); + } + if (is_neg_year) { + FormatOneChar('-', cursor); + } +} + +template +constexpr size_t BufferSizeHH_MM_SS() { + // "23:59:59" ("." "9"+)? + return detail::Digits10(23) + 1 + detail::Digits10(59) + 1 + detail::Digits10(59) + 1 + + detail::Digits10(Duration::period::den) - 1; +} + +template +void FormatHH_MM_SS(arrow_vendored::date::hh_mm_ss hms, char** cursor) { + constexpr size_t subsecond_digits = Digits10(Duration::period::den) - 1; + if (subsecond_digits != 0) { + FormatAllDigitsLeftPadded(hms.subseconds().count(), subsecond_digits, '0', cursor); + FormatOneChar('.', cursor); + } + FormatTwoDigits(hms.seconds().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.minutes().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.hours().count(), cursor); +} + +// Some out-of-bound datetime values would result in erroneous printing +// because of silent integer wraparound in the `arrow_vendored::date` library. +// +// To avoid such misprinting, we must therefore check the bounds explicitly. +// The bounds correspond to start of year -32767 and end of year 32767, +// respectively (-32768 is an invalid year value in `arrow_vendored::date`). +// +// Note these values are the same as documented for C++20: +// https://en.cppreference.com/w/cpp/chrono/year_month_day/operator_days +template +bool IsDateTimeInRange(Unit duration) { + constexpr Unit kMinIncl = + std::chrono::duration_cast(arrow_vendored::date::days{-12687428}); + constexpr Unit kMaxExcl = + std::chrono::duration_cast(arrow_vendored::date::days{11248738}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +// IsDateTimeInRange() specialization for nanoseconds: a 64-bit number of +// nanoseconds cannot represent years outside of the [-32767, 32767] +// range, and the {kMinIncl, kMaxExcl} constants above would overflow. +constexpr bool IsDateTimeInRange(std::chrono::nanoseconds duration) { return true; } + +template +bool IsTimeInRange(Unit duration) { + constexpr Unit kMinIncl = std::chrono::duration_cast(std::chrono::seconds{0}); + constexpr Unit kMaxExcl = std::chrono::duration_cast(std::chrono::seconds{86400}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +template +Return FormatOutOfRange(RawValue&& raw_value, Appender&& append) { + // XXX locale-sensitive but good enough for now + std::string formatted = ""; + return append(std::move(formatted)); +} + +const auto kEpoch = arrow_vendored::date::sys_days{arrow_vendored::date::jan / 1 / 1970}; + +} // namespace detail + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +class DateToStringFormatterMixin { + public: + explicit DateToStringFormatterMixin(const DataType* = NULLPTR) {} + + protected: + template + Return FormatDays(arrow_vendored::date::days since_epoch, Appender&& append) { + arrow_vendored::date::sys_days timepoint_days{since_epoch}; + + constexpr size_t buffer_size = detail::BufferSizeYYYY_MM_DD(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatYYYY_MM_DD(arrow_vendored::date::year_month_day{timepoint_days}, + &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date32Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = arrow_vendored::date::days{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(since_epoch, std::forward(append)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date64Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = std::chrono::milliseconds{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(std::chrono::duration_cast(since_epoch), + std::forward(append)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = int64_t; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()), + timezone_(checked_cast(*type).timezone()) {} + + template + Return operator()(Duration, value_type value, Appender&& append) { + using arrow_vendored::date::days; + + const Duration since_epoch{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + + const auto timepoint = detail::kEpoch + since_epoch; + // Round days towards zero + // (the naive approach of using arrow_vendored::date::floor() would + // result in UB for very large negative timestamps, similarly as + // https://github.com/HowardHinnant/date/issues/696) + auto timepoint_days = std::chrono::time_point_cast(timepoint); + Duration since_midnight; + if (timepoint_days <= timepoint) { + // Year >= 1970 + since_midnight = timepoint - timepoint_days; + } else { + // Year < 1970 + since_midnight = days(1) - (timepoint_days - timepoint); + timepoint_days -= days(1); + } + + // YYYY_MM_DD " " HH_MM_SS "Z"? + constexpr size_t buffer_size = + detail::BufferSizeYYYY_MM_DD() + 1 + detail::BufferSizeHH_MM_SS() + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + if (timezone_.size() > 0) { + detail::FormatOneChar('Z', &cursor); + } + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + detail::FormatOneChar(' ', &cursor); + detail::FormatYYYY_MM_DD(timepoint_days, &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; + std::string timezone_; +}; + +template +class StringFormatter> { + public: + using value_type = typename T::c_type; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()) {} + + template + Return operator()(Duration, value_type count, Appender&& append) { + const Duration since_midnight{count}; + if (!ARROW_PREDICT_TRUE(detail::IsTimeInRange(since_midnight))) { + return detail::FormatOutOfRange(count, append); + } + + constexpr size_t buffer_size = detail::BufferSizeHH_MM_SS(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; +}; + +template <> +class StringFormatter { + public: + using value_type = MonthIntervalType::c_type; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*'m'*/ 3 + /*negative signs*/ 1 + + /*months*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval), &cursor); + if (interval < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = DayTimeIntervalType::DayMilliseconds; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*d, ms*/ 3 + /*negative signs*/ 2 + + /*days/milliseconds*/ 2 * detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('m', &cursor); + detail::FormatAllDigits(detail::Abs(interval.milliseconds), &cursor); + if (interval.milliseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = MonthDayNanoIntervalType::MonthDayNanos; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*m, d, ns*/ 4 + /*negative signs*/ 3 + + /*months/days*/ 2 * detail::Digits10(std::numeric_limits::max()) + + /*nanoseconds*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('n', &cursor); + detail::FormatAllDigits(detail::Abs(interval.nanoseconds), &cursor); + if (interval.nanoseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval.months), &cursor); + if (interval.months < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h new file mode 100644 index 0000000000000000000000000000000000000000..dd1c38a78216e3f3d0d8c3f42c63a7d43f4bab44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/hash_util.h @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { +namespace internal { + +// ---------------------------------------------------------------------- +// BEGIN Hash utilities from Boost + +namespace detail { + +#if defined(_MSC_VER) +#define ARROW_HASH_ROTL32(x, r) _rotl(x, r) +#else +#define ARROW_HASH_ROTL32(x, r) (x << r) | (x >> (32 - r)) +#endif + +template +inline void hash_combine_impl(SizeT& seed, SizeT value) { + seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2); +} + +inline void hash_combine_impl(uint32_t& h1, uint32_t k1) { + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + k1 *= c1; + k1 = ARROW_HASH_ROTL32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ARROW_HASH_ROTL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; +} + +#undef ARROW_HASH_ROTL32 + +} // namespace detail + +template +inline void hash_combine(std::size_t& seed, T const& v) { + std::hash hasher; + return ::arrow::internal::detail::hash_combine_impl(seed, hasher(v)); +} + +// END Hash utilities from Boost +// ---------------------------------------------------------------------- + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe78be2470ddb846b5816be632e9921c041a23e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +// "safe-math.h" includes from the Windows headers. +#include "arrow/util/windows_compatibility.h" +#include "arrow/vendored/portable-snippets/safe-math.h" +// clang-format off (avoid include reordering) +#include "arrow/util/windows_fixup.h" +// clang-format on + +namespace arrow { +namespace internal { + +// Define functions AddWithOverflow, SubtractWithOverflow, MultiplyWithOverflow +// with the signature `bool(T u, T v, T* out)` where T is an integer type. +// On overflow, these functions return true. Otherwise, false is returned +// and `out` is updated with the result of the operation. + +#define OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type v, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u, v); \ + } + +#define OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint8_t, uint8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint16_t, uint16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint32_t, uint32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint64_t, uint64) + +OPS_WITH_OVERFLOW(AddWithOverflow, add) +OPS_WITH_OVERFLOW(SubtractWithOverflow, sub) +OPS_WITH_OVERFLOW(MultiplyWithOverflow, mul) +OPS_WITH_OVERFLOW(DivideWithOverflow, div) + +#undef OP_WITH_OVERFLOW +#undef OPS_WITH_OVERFLOW + +// Define function NegateWithOverflow with the signature `bool(T u, T* out)` +// where T is a signed integer type. On overflow, these functions return true. +// Otherwise, false is returned and `out` is updated with the result of the +// operation. + +#define UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u); \ + } + +#define SIGNED_UNARY_OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) + +SIGNED_UNARY_OPS_WITH_OVERFLOW(NegateWithOverflow, neg) + +#undef UNARY_OP_WITH_OVERFLOW +#undef SIGNED_UNARY_OPS_WITH_OVERFLOW + +/// Signed addition with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedAdd(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) + + static_cast(v)); +} + +/// Signed subtraction with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedSubtract(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) - + static_cast(v)); +} + +/// Signed negation with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedNegate(SignedInt u) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(~static_cast(u) + 1); +} + +/// Signed left shift with well-defined behaviour on negative numbers or overflow +template +SignedInt SafeLeftShift(SignedInt u, Shift shift) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) << shift); +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h new file mode 100644 index 0000000000000000000000000000000000000000..5f5bbd169e2eb60e97958d7375f63c15ae5d9fe4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h @@ -0,0 +1,452 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifndef _WIN32 +#define ARROW_HAVE_SIGACTION 1 +#endif + +#include +#include +#include +#include +#include +#include + +#if ARROW_HAVE_SIGACTION +#include // Needed for struct sigaction +#endif + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow::internal { + +// NOTE: 8-bit path strings on Windows are encoded using UTF-8. +// Using MBCS would fail encoding some paths. + +#if defined(_WIN32) +using NativePathString = std::wstring; +#else +using NativePathString = std::string; +#endif + +class ARROW_EXPORT PlatformFilename { + public: + struct Impl; + + ~PlatformFilename(); + PlatformFilename(); + PlatformFilename(const PlatformFilename&); + PlatformFilename(PlatformFilename&&); + PlatformFilename& operator=(const PlatformFilename&); + PlatformFilename& operator=(PlatformFilename&&); + explicit PlatformFilename(NativePathString path); + explicit PlatformFilename(const NativePathString::value_type* path); + + const NativePathString& ToNative() const; + std::string ToString() const; + + PlatformFilename Parent() const; + Result Real() const; + + // These functions can fail for character encoding reasons. + static Result FromString(std::string_view file_name); + Result Join(std::string_view child_name) const; + + PlatformFilename Join(const PlatformFilename& child_name) const; + + bool operator==(const PlatformFilename& other) const; + bool operator!=(const PlatformFilename& other) const; + + // Made public to avoid the proliferation of friend declarations. + const Impl* impl() const { return impl_.get(); } + + private: + std::unique_ptr impl_; + + explicit PlatformFilename(Impl impl); +}; + +/// Create a directory if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDir(const PlatformFilename& dir_path); + +/// Create a directory and its parents if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDirTree(const PlatformFilename& dir_path); + +/// Delete a directory's contents (but not the directory itself) if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirContents(const PlatformFilename& dir_path, + bool allow_not_found = true); + +/// Delete a directory tree if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true); + +// Non-recursively list the contents of the given directory. +// The returned names are the children's base names, not including dir_path. +ARROW_EXPORT +Result> ListDir(const PlatformFilename& dir_path); + +/// Delete a file if it exists. +/// +/// Return whether the file existed. +ARROW_EXPORT +Result DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true); + +/// Return whether a file exists. +ARROW_EXPORT +Result FileExists(const PlatformFilename& path); + +// TODO expose this more publicly to make it available from io/file.h? +/// A RAII wrapper for a file descriptor. +/// +/// The underlying file descriptor is automatically closed on destruction. +/// Moving is supported with well-defined semantics. +/// Furthermore, closing is idempotent. +class ARROW_EXPORT FileDescriptor { + public: + FileDescriptor() = default; + explicit FileDescriptor(int fd) : fd_(fd) {} + FileDescriptor(FileDescriptor&&); + FileDescriptor& operator=(FileDescriptor&&); + + ~FileDescriptor(); + + Status Close(); + + /// May return -1 if closed or default-initialized + int fd() const { return fd_.load(); } + + /// Detach and return the underlying file descriptor + int Detach(); + + bool closed() const { return fd_.load() == -1; } + + protected: + static void CloseFromDestructor(int fd); + + std::atomic fd_{-1}; +}; + +/// Open a file for reading and return a file descriptor. +ARROW_EXPORT +Result FileOpenReadable(const PlatformFilename& file_name); + +/// Open a file for writing and return a file descriptor. +ARROW_EXPORT +Result FileOpenWritable(const PlatformFilename& file_name, + bool write_only = true, bool truncate = true, + bool append = false); + +/// Read from current file position. Return number of bytes read. +ARROW_EXPORT +Result FileRead(int fd, uint8_t* buffer, int64_t nbytes); +/// Read from given file position. Return number of bytes read. +ARROW_EXPORT +Result FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes); + +ARROW_EXPORT +Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes); +ARROW_EXPORT +Status FileTruncate(int fd, const int64_t size); + +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos); +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos, int whence); +ARROW_EXPORT +Result FileTell(int fd); +ARROW_EXPORT +Result FileGetSize(int fd); + +ARROW_EXPORT +Status FileClose(int fd); + +struct Pipe { + FileDescriptor rfd; + FileDescriptor wfd; + + Status Close() { return rfd.Close() & wfd.Close(); } +}; + +ARROW_EXPORT +Result CreatePipe(); + +ARROW_EXPORT +Status SetPipeFileDescriptorNonBlocking(int fd); + +class ARROW_EXPORT SelfPipe { + public: + static Result> Make(bool signal_safe); + virtual ~SelfPipe(); + + /// \brief Wait for a wakeup. + /// + /// Status::Invalid is returned if the pipe has been shutdown. + /// Otherwise the next sent payload is returned. + virtual Result Wait() = 0; + + /// \brief Wake up the pipe by sending a payload. + /// + /// This method is async-signal-safe if `signal_safe` was set to true. + virtual void Send(uint64_t payload) = 0; + + /// \brief Wake up the pipe and shut it down. + virtual Status Shutdown() = 0; +}; + +ARROW_EXPORT +int64_t GetPageSize(); + +struct MemoryRegion { + void* addr; + size_t size; +}; + +ARROW_EXPORT +Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes, + void** new_addr); +ARROW_EXPORT +Status MemoryAdviseWillNeed(const std::vector& regions); + +ARROW_EXPORT +Result GetEnvVar(const char* name); +ARROW_EXPORT +Result GetEnvVar(const std::string& name); +ARROW_EXPORT +Result GetEnvVarNative(const char* name); +ARROW_EXPORT +Result GetEnvVarNative(const std::string& name); + +ARROW_EXPORT +Status SetEnvVar(const char* name, const char* value); +ARROW_EXPORT +Status SetEnvVar(const std::string& name, const std::string& value); +ARROW_EXPORT +Status DelEnvVar(const char* name); +ARROW_EXPORT +Status DelEnvVar(const std::string& name); + +ARROW_EXPORT +std::string ErrnoMessage(int errnum); +#if _WIN32 +ARROW_EXPORT +std::string WinErrorMessage(int errnum); +#endif + +ARROW_EXPORT +std::shared_ptr StatusDetailFromErrno(int errnum); +ARROW_EXPORT +std::optional ErrnoFromStatusDetail(const StatusDetail& detail); +#if _WIN32 +ARROW_EXPORT +std::shared_ptr StatusDetailFromWinError(int errnum); +#endif +ARROW_EXPORT +std::shared_ptr StatusDetailFromSignal(int signum); + +template +Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromErrno(int errnum, Args&&... args) { + return StatusFromErrno(errnum, StatusCode::IOError, std::forward(args)...); +} + +#if _WIN32 +template +Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromWinError(int errnum, Args&&... args) { + return StatusFromWinError(errnum, StatusCode::IOError, std::forward(args)...); +} +#endif + +template +Status StatusFromSignal(int signum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum), + std::forward(args)...); +} + +template +Status CancelledFromSignal(int signum, Args&&... args) { + return StatusFromSignal(signum, StatusCode::Cancelled, std::forward(args)...); +} + +ARROW_EXPORT +int ErrnoFromStatus(const Status&); + +// Always returns 0 on non-Windows platforms (for Python). +ARROW_EXPORT +int WinErrorFromStatus(const Status&); + +ARROW_EXPORT +int SignalFromStatus(const Status&); + +class ARROW_EXPORT TemporaryDir { + public: + ~TemporaryDir(); + + /// '/'-terminated path to the temporary dir + const PlatformFilename& path() { return path_; } + + /// Create a temporary subdirectory in the system temporary dir, + /// named starting with `prefix`. + static Result> Make(const std::string& prefix); + + private: + PlatformFilename path_; + + explicit TemporaryDir(PlatformFilename&&); +}; + +class ARROW_EXPORT SignalHandler { + public: + using Callback = void (*)(int); + + SignalHandler(); + explicit SignalHandler(Callback cb); +#if ARROW_HAVE_SIGACTION + explicit SignalHandler(const struct sigaction& sa); +#endif + + Callback callback() const; +#if ARROW_HAVE_SIGACTION + const struct sigaction& action() const; +#endif + + protected: +#if ARROW_HAVE_SIGACTION + // Storing the full sigaction allows to restore the entire signal handling + // configuration. + struct sigaction sa_; +#else + Callback cb_; +#endif +}; + +/// \brief Return the current handler for the given signal number. +ARROW_EXPORT +Result GetSignalHandler(int signum); + +/// \brief Set a new handler for the given signal number. +/// +/// The old signal handler is returned. +ARROW_EXPORT +Result SetSignalHandler(int signum, const SignalHandler& handler); + +/// \brief Reinstate the signal handler +/// +/// For use in signal handlers. This is needed on platforms without sigaction() +/// such as Windows, as the default signal handler is restored there as +/// soon as a signal is raised. +ARROW_EXPORT +void ReinstateSignalHandler(int signum, SignalHandler::Callback handler); + +/// \brief Send a signal to the current process +/// +/// The thread which will receive the signal is unspecified. +ARROW_EXPORT +Status SendSignal(int signum); + +/// \brief Send a signal to the given thread +/// +/// This function isn't supported on Windows. +ARROW_EXPORT +Status SendSignalToThread(int signum, uint64_t thread_id); + +/// \brief Get an unpredictable random seed +/// +/// This function may be slightly costly, so should only be used to initialize +/// a PRNG, not to generate a large amount of random numbers. +/// It is better to use this function rather than std::random_device, unless +/// absolutely necessary (e.g. to generate a cryptographic secret). +ARROW_EXPORT +int64_t GetRandomSeed(); + +/// \brief Get the current thread id +/// +/// In addition to having the same properties as std::thread, the returned value +/// is a regular integer value, which is more convenient than an opaque type. +ARROW_EXPORT +uint64_t GetThreadId(); + +/// \brief Get the current memory used by the current process in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetCurrentRSS(); + +/// \brief Get the total memory available to the system in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetTotalMemoryBytes(); + +/// \brief Load a dynamic library +/// +/// This wraps dlopen() except on Windows, where LoadLibrary() is called. +/// These two platforms handle absolute paths consistently; relative paths +/// or the library's bare name may be handled but inconsistently. +/// +/// \return An opaque handle for the dynamic library, which can be used for +/// subsequent symbol lookup. Nullptr will never be returned; instead +/// an error will be raised. +ARROW_EXPORT Result LoadDynamicLibrary(const PlatformFilename& path); + +/// \brief Load a dynamic library +/// +/// An overload taking null terminated string. +ARROW_EXPORT Result LoadDynamicLibrary(const char* path); + +/// \brief Retrieve a symbol by name from a library handle. +/// +/// This wraps dlsym() except on Windows, where GetProcAddress() is called. +/// +/// \return The address associated with the named symbol. Nullptr will never be +/// returned; instead an error will be raised. +ARROW_EXPORT Result GetSymbol(void* handle, const char* name); + +template +Result GetSymbolAs(void* handle, const char* name) { + ARROW_ASSIGN_OR_RAISE(void* sym, GetSymbol(handle, name)); + return reinterpret_cast(sym); +} + +} // namespace arrow::internal diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5e716d0fd113d339a34b16e6f7353a169829e3e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h @@ -0,0 +1,568 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/compare.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +template +class Iterator; + +template +struct IterationTraits { + /// \brief a reserved value which indicates the end of iteration. By + /// default this is NULLPTR since most iterators yield pointer types. + /// Specialize IterationTraits if different end semantics are required. + /// + /// Note: This should not be used to determine if a given value is a + /// terminal value. Use IsIterationEnd (which uses IsEnd) instead. This + /// is only for returning terminal values. + static T End() { return T(NULLPTR); } + + /// \brief Checks to see if the value is a terminal value. + /// A method is used here since T is not necessarily comparable in many + /// cases even though it has a distinct final value + static bool IsEnd(const T& val) { return val == End(); } +}; + +template +T IterationEnd() { + return IterationTraits::End(); +} + +template +bool IsIterationEnd(const T& val) { + return IterationTraits::IsEnd(val); +} + +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of optional, + /// nullopt indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static std::optional End() { return std::nullopt; } + + /// \brief by default when iterating through a sequence of optional, + /// nullopt (!has_value()) indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static bool IsEnd(const std::optional& val) { return !val.has_value(); } + + // TODO(bkietz) The range-for loop over Iterator> yields + // Result> which is unnecessary (since only the unyielded end optional + // is nullopt. Add IterationTraits::GetRangeElement() to handle this case +}; + +/// \brief A generic Iterator that can return errors +template +class Iterator : public util::EqualityComparable> { + public: + /// \brief Iterator may be constructed from any type which has a member function + /// with signature Result Next(); + /// End of iterator is signalled by returning IteratorTraits::End(); + /// + /// The argument is moved or copied to the heap and kept in a unique_ptr. Only + /// its destructor and its Next method (which are stored in function pointers) are + /// referenced after construction. + /// + /// This approach is used to dodge MSVC linkage hell (ARROW-6244, ARROW-6558) when using + /// an abstract template base class: instead of being inlined as usual for a template + /// function the base's virtual destructor will be exported, leading to multiple + /// definition errors when linking to any other TU where the base is instantiated. + template + explicit Iterator(Wrapped has_next) + : ptr_(new Wrapped(std::move(has_next)), Delete), next_(Next) {} + + Iterator() : ptr_(NULLPTR, [](void*) {}) {} + + /// \brief Return the next element of the sequence, IterationTraits::End() when the + /// iteration is completed. Calling this on a default constructed Iterator + /// will result in undefined behavior. + Result Next() { return next_(ptr_.get()); } + + /// Pass each element of the sequence to a visitor. Will return any error status + /// returned by the visitor, terminating iteration. + template + Status Visit(Visitor&& visitor) { + for (;;) { + ARROW_ASSIGN_OR_RAISE(auto value, Next()); + + if (IsIterationEnd(value)) break; + + ARROW_RETURN_NOT_OK(visitor(std::move(value))); + } + + return Status::OK(); + } + + /// Iterators will only compare equal if they are both null. + /// Equality comparability is required to make an Iterator of Iterators + /// (to check for the end condition). + bool Equals(const Iterator& other) const { return ptr_ == other.ptr_; } + + explicit operator bool() const { return ptr_ != NULLPTR; } + + class RangeIterator { + public: + RangeIterator() : value_(IterationTraits::End()) {} + + explicit RangeIterator(Iterator i) + : value_(IterationTraits::End()), + iterator_(std::make_shared(std::move(i))) { + Next(); + } + + bool operator!=(const RangeIterator& other) const { return value_ != other.value_; } + + RangeIterator& operator++() { + Next(); + return *this; + } + + Result operator*() { + ARROW_RETURN_NOT_OK(value_.status()); + + auto value = std::move(value_); + value_ = IterationTraits::End(); + return value; + } + + private: + void Next() { + if (!value_.ok()) { + value_ = IterationTraits::End(); + return; + } + value_ = iterator_->Next(); + } + + Result value_; + std::shared_ptr iterator_; + }; + + RangeIterator begin() { return RangeIterator(std::move(*this)); } + + RangeIterator end() { return RangeIterator(); } + + /// \brief Move every element of this iterator into a vector. + Result> ToVector() { + std::vector out; + for (auto maybe_element : *this) { + ARROW_ASSIGN_OR_RAISE(auto element, maybe_element); + out.push_back(std::move(element)); + } + // ARROW-8193: On gcc-4.8 without the explicit move it tries to use the + // copy constructor, which may be deleted on the elements of type T + return std::move(out); + } + + private: + /// Implementation of deleter for ptr_: Casts from void* to the wrapped type and + /// deletes that. + template + static void Delete(void* ptr) { + delete static_cast(ptr); + } + + /// Implementation of Next: Casts from void* to the wrapped type and invokes that + /// type's Next member function. + template + static Result Next(void* ptr) { + return static_cast(ptr)->Next(); + } + + /// ptr_ is a unique_ptr to void with a custom deleter: a function pointer which first + /// casts from void* to a pointer to the wrapped type then deletes that. + std::unique_ptr ptr_; + + /// next_ is a function pointer which first casts from void* to a pointer to the wrapped + /// type then invokes its Next member function. + Result (*next_)(void*) = NULLPTR; +}; + +template +struct TransformFlow { + using YieldValueType = T; + + TransformFlow(YieldValueType value, bool ready_for_next) + : finished_(false), + ready_for_next_(ready_for_next), + yield_value_(std::move(value)) {} + TransformFlow(bool finished, bool ready_for_next) + : finished_(finished), ready_for_next_(ready_for_next), yield_value_() {} + + bool HasValue() const { return yield_value_.has_value(); } + bool Finished() const { return finished_; } + bool ReadyForNext() const { return ready_for_next_; } + T Value() const { return *yield_value_; } + + bool finished_ = false; + bool ready_for_next_ = false; + std::optional yield_value_; +}; + +struct TransformFinish { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(true, true); + } +}; + +struct TransformSkip { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(false, true); + } +}; + +template +TransformFlow TransformYield(T value = {}, bool ready_for_next = true) { + return TransformFlow(std::move(value), ready_for_next); +} + +template +using Transformer = std::function>(T)>; + +template +class TransformIterator { + public: + explicit TransformIterator(Iterator it, Transformer transformer) + : it_(std::move(it)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Result Next() { + while (!finished_) { + ARROW_ASSIGN_OR_RAISE(std::optional next, Pump()); + if (next.has_value()) { + return std::move(*next); + } + ARROW_ASSIGN_OR_RAISE(last_value_, it_.Next()); + } + return IterationTraits::End(); + } + + private: + // Calls the transform function on the current value. Can return in several ways + // * If the next value is requested (e.g. skip) it will return an empty optional + // * If an invalid status is encountered that will be returned + // * If finished it will return IterationTraits::End() + // * If a value is returned by the transformer that will be returned + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + auto next_res = transformer_(*last_value_); + if (!next_res.ok()) { + finished_ = true; + return next_res.status(); + } + auto next = *next_res; + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + Iterator it_; + Transformer transformer_; + std::optional last_value_; + bool finished_ = false; +}; + +/// \brief Transforms an iterator according to a transformer, returning a new Iterator. +/// +/// The transformer will be called on each element of the source iterator and for each +/// call it can yield a value, skip, or finish the iteration. When yielding a value the +/// transformer can choose to consume the source item (the default, ready_for_next = true) +/// or to keep it and it will be called again on the same value. +/// +/// This is essentially a more generic form of the map operation that can return 0, 1, or +/// many values for each of the source items. +/// +/// The transformer will be exposed to the end of the source sequence +/// (IterationTraits::End) in case it needs to return some penultimate item(s). +/// +/// Any invalid status returned by the transformer will be returned immediately. +template +Iterator MakeTransformedIterator(Iterator it, Transformer op) { + return Iterator(TransformIterator(std::move(it), std::move(op))); +} + +template +struct IterationTraits> { + // The end condition for an Iterator of Iterators is a default constructed (null) + // Iterator. + static Iterator End() { return Iterator(); } + static bool IsEnd(const Iterator& val) { return !val; } +}; + +template +class FunctionIterator { + public: + explicit FunctionIterator(Fn fn) : fn_(std::move(fn)) {} + + Result Next() { return fn_(); } + + private: + Fn fn_; +}; + +/// \brief Construct an Iterator which invokes a callable on Next() +template ::ValueType> +Iterator MakeFunctionIterator(Fn fn) { + return Iterator(FunctionIterator(std::move(fn))); +} + +template +Iterator MakeEmptyIterator() { + return MakeFunctionIterator([]() -> Result { return IterationTraits::End(); }); +} + +template +Iterator MakeErrorIterator(Status s) { + return MakeFunctionIterator([s]() -> Result { + ARROW_RETURN_NOT_OK(s); + return IterationTraits::End(); + }); +} + +/// \brief Simple iterator which yields the elements of a std::vector +template +class VectorIterator { + public: + explicit VectorIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return IterationTraits::End(); + } + return std::move(elements_[i_++]); + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorIterator(std::vector v) { + return Iterator(VectorIterator(std::move(v))); +} + +/// \brief Simple iterator which yields *pointers* to the elements of a std::vector. +/// This is provided to support T where IterationTraits::End is not specialized +template +class VectorPointingIterator { + public: + explicit VectorPointingIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return NULLPTR; + } + return &elements_[i_++]; + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorPointingIterator(std::vector v) { + return Iterator(VectorPointingIterator(std::move(v))); +} + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template +class MapIterator { + public: + explicit MapIterator(Fn map, Iterator it) + : map_(std::move(map)), it_(std::move(it)) {} + + Result Next() { + ARROW_ASSIGN_OR_RAISE(I i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + return map_(std::move(i)); + } + + private: + Fn map_; + Iterator it_; +}; + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template , + typename To = internal::call_traits::return_type> +Iterator MakeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +/// \brief Like MapIterator, but where the function can fail. +template , + typename To = typename internal::call_traits::return_type::ValueType> +Iterator MakeMaybeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +struct FilterIterator { + enum Action { ACCEPT, REJECT }; + + template + static Result> Reject() { + return std::make_pair(IterationTraits::End(), REJECT); + } + + template + static Result> Accept(To out) { + return std::make_pair(std::move(out), ACCEPT); + } + + template + static Result> MaybeAccept(Result maybe_out) { + return std::move(maybe_out).Map(Accept); + } + + template + static Result> Error(Status s) { + return s; + } + + template + class Impl { + public: + explicit Impl(Fn filter, Iterator it) : filter_(filter), it_(std::move(it)) {} + + Result Next() { + To out = IterationTraits::End(); + Action action; + + for (;;) { + ARROW_ASSIGN_OR_RAISE(From i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + ARROW_ASSIGN_OR_RAISE(std::tie(out, action), filter_(std::move(i))); + + if (action == ACCEPT) return out; + } + } + + private: + Fn filter_; + Iterator it_; + }; +}; + +/// \brief Like MapIterator, but where the function can fail or reject elements. +template < + typename Fn, typename From = typename internal::call_traits::argument_type<0, Fn>, + typename Ret = typename internal::call_traits::return_type::ValueType, + typename To = typename std::tuple_element<0, Ret>::type, + typename Enable = typename std::enable_if::type, FilterIterator::Action>::value>::type> +Iterator MakeFilterIterator(Fn filter, Iterator it) { + return Iterator( + FilterIterator::Impl(std::move(filter), std::move(it))); +} + +/// \brief FlattenIterator takes an iterator generating iterators and yields a +/// unified iterator that flattens/concatenates in a single stream. +template +class FlattenIterator { + public: + explicit FlattenIterator(Iterator> it) : parent_(std::move(it)) {} + + Result Next() { + if (IsIterationEnd(child_)) { + // Pop from parent's iterator. + ARROW_ASSIGN_OR_RAISE(child_, parent_.Next()); + + // Check if final iteration reached. + if (IsIterationEnd(child_)) { + return IterationTraits::End(); + } + + return Next(); + } + + // Pop from child_ and check for depletion. + ARROW_ASSIGN_OR_RAISE(T out, child_.Next()); + if (IsIterationEnd(out)) { + // Reset state such that we pop from parent on the recursive call + child_ = IterationTraits>::End(); + + return Next(); + } + + return out; + } + + private: + Iterator> parent_; + Iterator child_ = IterationTraits>::End(); +}; + +template +Iterator MakeFlattenIterator(Iterator> it) { + return Iterator(FlattenIterator(std::move(it))); +} + +template +Iterator MakeIteratorFromReader( + const std::shared_ptr& reader) { + return MakeFunctionIterator([reader] { return reader->Next(); }); +} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/key_value_metadata.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/key_value_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..57ade11e758684777fc8e2828c9c3d1b9deb0bee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/key_value_metadata.h @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief A container for key-value pair type metadata. Not thread-safe +class ARROW_EXPORT KeyValueMetadata { + public: + KeyValueMetadata(); + KeyValueMetadata(std::vector keys, std::vector values); + explicit KeyValueMetadata(const std::unordered_map& map); + + static std::shared_ptr Make(std::vector keys, + std::vector values); + + void ToUnorderedMap(std::unordered_map* out) const; + void Append(std::string key, std::string value); + + Result Get(std::string_view key) const; + bool Contains(std::string_view key) const; + // Note that deleting may invalidate known indices + Status Delete(std::string_view key); + Status Delete(int64_t index); + Status DeleteMany(std::vector indices); + Status Set(std::string key, std::string value); + + void reserve(int64_t n); + + int64_t size() const; + const std::string& key(int64_t i) const; + const std::string& value(int64_t i) const; + const std::vector& keys() const { return keys_; } + const std::vector& values() const { return values_; } + + std::vector> sorted_pairs() const; + + /// \brief Perform linear search for key, returning -1 if not found + int FindKey(std::string_view key) const; + + std::shared_ptr Copy() const; + + /// \brief Return a new KeyValueMetadata by combining the passed metadata + /// with this KeyValueMetadata. Colliding keys will be overridden by the + /// passed metadata. Assumes keys in both containers are unique + std::shared_ptr Merge(const KeyValueMetadata& other) const; + + bool Equals(const KeyValueMetadata& other) const; + std::string ToString() const; + + private: + std::vector keys_; + std::vector values_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(KeyValueMetadata); +}; + +/// \brief Create a KeyValueMetadata instance +/// +/// \param pairs key-value mapping +ARROW_EXPORT std::shared_ptr key_value_metadata( + const std::unordered_map& pairs); + +/// \brief Create a KeyValueMetadata instance +/// +/// \param keys sequence of metadata keys +/// \param values sequence of corresponding metadata values +ARROW_EXPORT std::shared_ptr key_value_metadata( + std::vector keys, std::vector values); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h new file mode 100644 index 0000000000000000000000000000000000000000..9e4533c4b4760a416b0aca4b91c32ffd324d7f08 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +#if __cpp_lib_launder +using std::launder; +#else +template +constexpr T* launder(T* p) noexcept { + return p; +} +#endif + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..7ee87c5d6ac8160c921ce83153e30112335ad7fe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +// Not provided by default in MSVC, +// and _USE_MATH_DEFINES is not reliable with unity builds +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 +#endif +#ifndef M_PI_4 +#define M_PI_4 0.785398163397448309616 +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..4250d0694b7dd283aad6bbb159bd3e36328fe7ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +// A helper function for doing memcpy with multiple threads. This is required +// to saturate the memory bandwidth of modern cpus. +void parallel_memcopy(uint8_t* dst, const uint8_t* src, int64_t nbytes, + uintptr_t block_size, int num_threads); + +// A helper function for checking if two wrapped objects implementing `Equals` +// are equal. +template +bool SharedPtrEquals(const std::shared_ptr& left, const std::shared_ptr& right) { + if (left == right) return true; + if (left == NULLPTR || right == NULLPTR) return false; + return left->Equals(*right); +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/parallel.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..80f60fbdb3676a181f1d21b73f2e0d108eb58b78 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/parallel.h @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/functional.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/vector.h" + +namespace arrow { +namespace internal { + +// A parallelizer that takes a `Status(int)` function and calls it with +// arguments between 0 and `num_tasks - 1`, on an arbitrary number of threads. + +template +Status ParallelFor(int num_tasks, FUNCTION&& func, + Executor* executor = internal::GetCpuThreadPool()) { + std::vector> futures(num_tasks); + + for (int i = 0; i < num_tasks; ++i) { + ARROW_ASSIGN_OR_RAISE(futures[i], executor->Submit(func, i)); + } + auto st = Status::OK(); + for (auto& fut : futures) { + st &= fut.status(); + } + return st; +} + +template ::ValueType> +Future> ParallelForAsync( + std::vector inputs, FUNCTION&& func, + Executor* executor = internal::GetCpuThreadPool()) { + std::vector> futures(inputs.size()); + for (size_t i = 0; i < inputs.size(); ++i) { + ARROW_ASSIGN_OR_RAISE(futures[i], executor->Submit(func, i, std::move(inputs[i]))); + } + return All(std::move(futures)) + .Then([](const std::vector>& results) -> Result> { + return UnwrapOrRaise(results); + }); +} + +// A parallelizer that takes a `Status(int)` function and calls it with +// arguments between 0 and `num_tasks - 1`, in sequence or in parallel, +// depending on the input boolean. + +template +Status OptionalParallelFor(bool use_threads, int num_tasks, FUNCTION&& func, + Executor* executor = internal::GetCpuThreadPool()) { + if (use_threads) { + return ParallelFor(num_tasks, std::forward(func), executor); + } else { + for (int i = 0; i < num_tasks; ++i) { + RETURN_NOT_OK(func(i)); + } + return Status::OK(); + } +} + +// A parallelizer that takes a `Result(int index, T item)` function and +// calls it with each item from the input array, in sequence or in parallel, +// depending on the input boolean. + +template ::ValueType> +Future> OptionalParallelForAsync( + bool use_threads, std::vector inputs, FUNCTION&& func, + Executor* executor = internal::GetCpuThreadPool()) { + if (use_threads) { + return ParallelForAsync(std::move(inputs), std::forward(func), executor); + } else { + std::vector result(inputs.size()); + for (size_t i = 0; i < inputs.size(); ++i) { + ARROW_ASSIGN_OR_RAISE(result[i], func(i, inputs[i])); + } + return result; + } +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h new file mode 100644 index 0000000000000000000000000000000000000000..768f2328200fb2635213358226cfdb3f9273c808 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/pcg/pcg_random.hpp" // IWYU pragma: export + +namespace arrow { +namespace random { + +using pcg32 = ::arrow_vendored::pcg32; +using pcg64 = ::arrow_vendored::pcg64; +using pcg32_fast = ::arrow_vendored::pcg32_fast; +using pcg64_fast = ::arrow_vendored::pcg64_fast; +using pcg32_oneseq = ::arrow_vendored::pcg32_oneseq; +using pcg64_oneseq = ::arrow_vendored::pcg64_oneseq; + +} // namespace random +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h new file mode 100644 index 0000000000000000000000000000000000000000..82cea473c5b277323772c6914ee28b1903b5240d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. template + +#pragma once + +#include +#include "arrow/util/string.h" + +using arrow::internal::ToChars; + +namespace arrow { +namespace internal { + +namespace detail { + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) { + TuplePrinter::Print(os, t); + *os << std::get(t); + } +}; + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) {} +}; + +} // namespace detail + +// Print elements from a tuple to a stream, in order. +// Typical use is to pack a bunch of existing values with std::forward_as_tuple() +// before passing it to this function. +template +void PrintTuple(OStream* os, const std::tuple& tup) { + detail::TuplePrinter, sizeof...(Args)>::Print(os, tup); +} + +template +struct PrintVector { + const Range& range_; + const Separator& separator_; + + template // template to dodge inclusion of + friend Os& operator<<(Os& os, PrintVector l) { + bool first = true; + os << "["; + for (const auto& element : l.range_) { + if (first) { + first = false; + } else { + os << l.separator_; + } + os << ToChars(element); // use ToChars to avoid locale dependence + } + os << "]"; + return os; + } +}; +template +PrintVector(const Range&, const Separator&) -> PrintVector; +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h new file mode 100644 index 0000000000000000000000000000000000000000..6c71fa6e155e8818801db2ccb18127d75d6364a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/ProducerConsumerQueue.h" + +namespace arrow { +namespace util { + +template +using SpscQueue = arrow_vendored::folly::ProducerConsumerQueue; + +} +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h new file mode 100644 index 0000000000000000000000000000000000000000..20553287985423970c228308742a7f85464a4a87 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h @@ -0,0 +1,258 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace arrow::internal { + +/// Create a vector containing the values from start up to stop +template +std::vector Iota(T start, T stop) { + if (start > stop) { + return {}; + } + std::vector result(static_cast(stop - start)); + std::iota(result.begin(), result.end(), start); + return result; +} + +/// Create a vector containing the values from 0 up to length +template +std::vector Iota(T length) { + return Iota(static_cast(0), length); +} + +/// Create a range from a callable which takes a single index parameter +/// and returns the value of iterator on each call and a length. +/// Only iterators obtained from the same range should be compared, the +/// behaviour generally similar to other STL containers. +template +class LazyRange { + private: + // callable which generates the values + // has to be defined at the beginning of the class for type deduction + const Generator gen_; + // the length of the range + int64_t length_; +#ifdef _MSC_VER + // workaround to VS2010 not supporting decltype properly + // see https://stackoverflow.com/questions/21782846/decltype-for-class-member-function + static Generator gen_static_; +#endif + + public: +#ifdef _MSC_VER + using return_type = decltype(gen_static_(0)); +#else + using return_type = decltype(gen_(0)); +#endif + + /// Construct a new range from a callable and length + LazyRange(Generator gen, int64_t length) : gen_(gen), length_(length) {} + + // Class of the dependent iterator, created implicitly by begin and end + class RangeIter { + public: + using difference_type = int64_t; + using value_type = return_type; + using reference = const value_type&; + using pointer = const value_type*; + using iterator_category = std::forward_iterator_tag; + +#ifdef _MSC_VER + // msvc complains about unchecked iterators, + // see https://stackoverflow.com/questions/21655496/error-c4996-checked-iterators + using _Unchecked_type = typename LazyRange::RangeIter; +#endif + + RangeIter() = delete; + RangeIter(const RangeIter& other) = default; + RangeIter& operator=(const RangeIter& other) = default; + + RangeIter(const LazyRange& range, int64_t index) + : range_(&range), index_(index) {} + + const return_type operator*() const { return range_->gen_(index_); } + + RangeIter operator+(difference_type length) const { + return RangeIter(*range_, index_ + length); + } + + // pre-increment + RangeIter& operator++() { + ++index_; + return *this; + } + + // post-increment + RangeIter operator++(int) { + auto copy = RangeIter(*this); + ++index_; + return copy; + } + + bool operator==(const typename LazyRange::RangeIter& other) const { + return this->index_ == other.index_ && this->range_ == other.range_; + } + + bool operator!=(const typename LazyRange::RangeIter& other) const { + return this->index_ != other.index_ || this->range_ != other.range_; + } + + int64_t operator-(const typename LazyRange::RangeIter& other) const { + return this->index_ - other.index_; + } + + bool operator<(const typename LazyRange::RangeIter& other) const { + return this->index_ < other.index_; + } + + private: + // parent range reference + const LazyRange* range_; + // current index + int64_t index_; + }; + + friend class RangeIter; + + // Create a new begin const iterator + RangeIter begin() { return RangeIter(*this, 0); } + + // Create a new end const iterator + RangeIter end() { return RangeIter(*this, length_); } +}; + +/// Helper function to create a lazy range from a callable (e.g. lambda) and length +template +LazyRange MakeLazyRange(Generator&& gen, int64_t length) { + return LazyRange(std::forward(gen), length); +} + +/// \brief A helper for iterating multiple ranges simultaneously, similar to C++23's +/// zip() view adapter modelled after python's built-in zip() function. +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// std::function()> GetNames = ... +/// for (auto [table, name] : Zip(tables, GetNames())) { +/// static_assert(std::is_same_v); +/// static_assert(std::is_same_v); +/// // temporaries (like this vector of strings) are kept alive for the +/// // duration of a loop and are safely movable). +/// RegisterTableWithName(std::move(name), &table); +/// } +/// \endcode +/// +/// The zipped sequence ends as soon as any of its member ranges ends. +/// +/// Always use `auto` for the loop's declaration; it will always be a tuple +/// of references so for example using `const auto&` will compile but will +/// *look* like forcing const-ness even though the members of the tuple are +/// still mutable references. +/// +/// NOTE: we *could* make Zip a more full fledged range and enable things like +/// - gtest recognizing it as a container; it currently doesn't since Zip is +/// always mutable so this breaks: +/// EXPECT_THAT(Zip(std::vector{0}, std::vector{1}), +/// ElementsAre(std::tuple{0, 1})); +/// - letting it be random access when possible so we can do things like *sort* +/// parallel ranges +/// - ... +/// +/// However doing this will increase the compile time overhead of using Zip as +/// long as we're still using headers. Therefore until we can use c++20 modules: +/// *don't* extend Zip. +template +struct Zip; + +template +Zip(Ranges&&...) -> Zip, std::index_sequence_for>; + +template +struct Zip, std::index_sequence> { + explicit Zip(Ranges... ranges) : ranges_(std::forward(ranges)...) {} + + std::tuple ranges_; + + using sentinel = std::tuple(ranges_)))...>; + constexpr sentinel end() { return {std::end(std::get(ranges_))...}; } + + struct iterator : std::tuple(ranges_)))...> { + using std::tuple(ranges_)))...>::tuple; + + constexpr auto operator*() { + return std::tuple(*this))...>{*std::get(*this)...}; + } + + constexpr iterator& operator++() { + (++std::get(*this), ...); + return *this; + } + + constexpr bool operator!=(const sentinel& s) const { + bool all_iterators_valid = (... && (std::get(*this) != std::get(s))); + return all_iterators_valid; + } + }; + constexpr iterator begin() { return {std::begin(std::get(ranges_))...}; } +}; + +/// \brief A lazy sequence of integers which starts from 0 and never stops. +/// +/// This can be used in conjunction with Zip() to emulate python's built-in +/// enumerate() function: +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// for (auto [i, table] : Zip(Enumerate<>, tables)) { +/// std::cout << "#" << i << ": " << table.name() << std::endl; +/// } +/// \endcode +template +constexpr auto Enumerate = [] { + struct { + struct sentinel {}; + constexpr sentinel end() const { return {}; } + + struct iterator { + I value{0}; + + constexpr I operator*() { return value; } + + constexpr iterator& operator++() { + ++value; + return *this; + } + + constexpr std::true_type operator!=(sentinel) const { return {}; } + }; + constexpr iterator begin() const { return {}; } + } out; + + return out; +}(); + +} // namespace arrow::internal diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h new file mode 100644 index 0000000000000000000000000000000000000000..590fbac7153889129e7bca7652125980cb4457cd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// Match regex against target and produce string_views out of matches. +inline bool RegexMatch(const std::regex& regex, std::string_view target, + std::initializer_list out_matches) { + assert(regex.mark_count() == out_matches.size()); + + std::match_results match; + if (!std::regex_match(target.begin(), target.end(), match, regex)) { + return false; + } + + // Match #0 is the whole matched sequence + assert(regex.mark_count() + 1 == match.size()); + auto out_it = out_matches.begin(); + for (size_t i = 1; i < match.size(); ++i) { + **out_it++ = target.substr(match.position(i), match.length(i)); + } + return true; +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f5690062a049dd2485fe68461237eb6d9e0265 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h @@ -0,0 +1,826 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Imported from Apache Impala (incubating) on 2016-01-29 and modified for use +// in parquet-cpp, Arrow + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/bit_run_reader.h" +#include "arrow/util/bit_stream_utils.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +/// Utility classes to do run length encoding (RLE) for fixed bit width values. If runs +/// are sufficiently long, RLE is used, otherwise, the values are just bit-packed +/// (literal encoding). +/// For both types of runs, there is a byte-aligned indicator which encodes the length +/// of the run and the type of the run. +/// This encoding has the benefit that when there aren't any long enough runs, values +/// are always decoded at fixed (can be precomputed) bit offsets OR both the value and +/// the run length are byte aligned. This allows for very efficient decoding +/// implementations. +/// The encoding is: +/// encoded-block := run* +/// run := literal-run | repeated-run +/// literal-run := literal-indicator < literal bytes > +/// repeated-run := repeated-indicator < repeated value. padded to byte boundary > +/// literal-indicator := varint_encode( number_of_groups << 1 | 1) +/// repeated-indicator := varint_encode( number_of_repetitions << 1 ) +// +/// Each run is preceded by a varint. The varint's least significant bit is +/// used to indicate whether the run is a literal run or a repeated run. The rest +/// of the varint is used to determine the length of the run (eg how many times the +/// value repeats). +// +/// In the case of literal runs, the run length is always a multiple of 8 (i.e. encode +/// in groups of 8), so that no matter the bit-width of the value, the sequence will end +/// on a byte boundary without padding. +/// Given that we know it is a multiple of 8, we store the number of 8-groups rather than +/// the actual number of encoded ints. (This means that the total number of encoded values +/// cannot be determined from the encoded data, since the number of values in the last +/// group may not be a multiple of 8). For the last group of literal runs, we pad +/// the group to 8 with zeros. This allows for 8 at a time decoding on the read side +/// without the need for additional checks. +// +/// There is a break-even point when it is more storage efficient to do run length +/// encoding. For 1 bit-width values, that point is 8 values. They require 2 bytes +/// for both the repeated encoding or the literal encoding. This value can always +/// be computed based on the bit-width. +/// TODO: think about how to use this for strings. The bit packing isn't quite the same. +// +/// Examples with bit-width 1 (eg encoding booleans): +/// ---------------------------------------- +/// 100 1s followed by 100 0s: +/// <1, padded to 1 byte> <0, padded to 1 byte> +/// - (total 4 bytes) +// +/// alternating 1s and 0s (200 total): +/// 200 ints = 25 groups of 8 +/// <25 bytes of values, bitpacked> +/// (total 26 bytes, 1 byte overhead) +// + +/// Decoder class for RLE encoded data. +class RleDecoder { + public: + /// Create a decoder object. buffer/buffer_len is the decoded data. + /// bit_width is the width of each value (before encoding). + RleDecoder(const uint8_t* buffer, int buffer_len, int bit_width) + : bit_reader_(buffer, buffer_len), + bit_width_(bit_width), + current_value_(0), + repeat_count_(0), + literal_count_(0) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + } + + RleDecoder() : bit_width_(-1) {} + + void Reset(const uint8_t* buffer, int buffer_len, int bit_width) { + DCHECK_GE(bit_width, 0); + DCHECK_LE(bit_width, 64); + bit_reader_.Reset(buffer, buffer_len); + bit_width_ = bit_width; + current_value_ = 0; + repeat_count_ = 0; + literal_count_ = 0; + } + + /// Gets the next value. Returns false if there are no more. + template + bool Get(T* val); + + /// Gets a batch of values. Returns the number of decoded elements. + template + int GetBatch(T* values, int batch_size); + + /// Like GetBatch but add spacing for null entries + template + int GetBatchSpaced(int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out); + + /// Like GetBatch but the values are then decoded using the provided dictionary + template + int GetBatchWithDict(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size); + + /// Like GetBatchWithDict but add spacing for null entries + /// + /// Null entries will be zero-initialized in `values` to avoid leaking + /// private data. + template + int GetBatchWithDictSpaced(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset); + + protected: + ::arrow::bit_util::BitReader bit_reader_; + /// Number of bits needed to encode the value. Must be between 0 and 64. + int bit_width_; + uint64_t current_value_; + int32_t repeat_count_; + int32_t literal_count_; + + private: + /// Fills literal_count_ and repeat_count_ with next values. Returns false if there + /// are no more. + template + bool NextCounts(); + + /// Utility methods for retrieving spaced values. + template + int GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, T* out); +}; + +/// Class to incrementally build the rle data. This class does not allocate any memory. +/// The encoding has two modes: encoding repeated runs and literal runs. +/// If the run is sufficiently short, it is more efficient to encode as a literal run. +/// This class does so by buffering 8 values at a time. If they are not all the same +/// they are added to the literal run. If they are the same, they are added to the +/// repeated run. When we switch modes, the previous run is flushed out. +class RleEncoder { + public: + /// buffer/buffer_len: preallocated output buffer. + /// bit_width: max number of bits for value. + /// TODO: consider adding a min_repeated_run_length so the caller can control + /// when values should be encoded as repeated runs. Currently this is derived + /// based on the bit_width, which can determine a storage optimal choice. + /// TODO: allow 0 bit_width (and have dict encoder use it) + RleEncoder(uint8_t* buffer, int buffer_len, int bit_width) + : bit_width_(bit_width), bit_writer_(buffer, buffer_len) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + max_run_byte_size_ = MinBufferSize(bit_width); + DCHECK_GE(buffer_len, max_run_byte_size_) << "Input buffer not big enough."; + Clear(); + } + + /// Returns the minimum buffer size needed to use the encoder for 'bit_width' + /// This is the maximum length of a single run for 'bit_width'. + /// It is not valid to pass a buffer less than this length. + static int MinBufferSize(int bit_width) { + /// 1 indicator byte and MAX_VALUES_PER_LITERAL_RUN 'bit_width' values. + int max_literal_run_size = 1 + static_cast(::arrow::bit_util::BytesForBits( + MAX_VALUES_PER_LITERAL_RUN * bit_width)); + /// Up to kMaxVlqByteLength indicator and a single 'bit_width' value. + int max_repeated_run_size = + ::arrow::bit_util::BitReader::kMaxVlqByteLength + + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + return std::max(max_literal_run_size, max_repeated_run_size); + } + + /// Returns the maximum byte size it could take to encode 'num_values'. + static int MaxBufferSize(int bit_width, int num_values) { + // For a bit_width > 1, the worst case is the repetition of "literal run of length 8 + // and then a repeated run of length 8". + // 8 values per smallest run, 8 bits per byte + int bytes_per_run = bit_width; + int num_runs = static_cast(::arrow::bit_util::CeilDiv(num_values, 8)); + int literal_max_size = num_runs + num_runs * bytes_per_run; + + // In the very worst case scenario, the data is a concatenation of repeated + // runs of 8 values. Repeated run has a 1 byte varint followed by the + // bit-packed repeated value + int min_repeated_run_size = + 1 + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + int repeated_max_size = num_runs * min_repeated_run_size; + + return std::max(literal_max_size, repeated_max_size); + } + + /// Encode value. Returns true if the value fits in buffer, false otherwise. + /// This value must be representable with bit_width_ bits. + bool Put(uint64_t value); + + /// Flushes any pending values to the underlying buffer. + /// Returns the total number of bytes written + int Flush(); + + /// Resets all the state in the encoder. + void Clear(); + + /// Returns pointer to underlying buffer + uint8_t* buffer() { return bit_writer_.buffer(); } + int32_t len() { return bit_writer_.bytes_written(); } + + private: + /// Flushes any buffered values. If this is part of a repeated run, this is largely + /// a no-op. + /// If it is part of a literal run, this will call FlushLiteralRun, which writes + /// out the buffered literal values. + /// If 'done' is true, the current run would be written even if it would normally + /// have been buffered more. This should only be called at the end, when the + /// encoder has received all values even if it would normally continue to be + /// buffered. + void FlushBufferedValues(bool done); + + /// Flushes literal values to the underlying buffer. If update_indicator_byte, + /// then the current literal run is complete and the indicator byte is updated. + void FlushLiteralRun(bool update_indicator_byte); + + /// Flushes a repeated run to the underlying buffer. + void FlushRepeatedRun(); + + /// Checks and sets buffer_full_. This must be called after flushing a run to + /// make sure there are enough bytes remaining to encode the next run. + void CheckBufferFull(); + + /// The maximum number of values in a single literal run + /// (number of groups encodable by a 1-byte indicator * 8) + static const int MAX_VALUES_PER_LITERAL_RUN = (1 << 6) * 8; + + /// Number of bits needed to encode the value. Must be between 0 and 64. + const int bit_width_; + + /// Underlying buffer. + ::arrow::bit_util::BitWriter bit_writer_; + + /// If true, the buffer is full and subsequent Put()'s will fail. + bool buffer_full_; + + /// The maximum byte size a single run can take. + int max_run_byte_size_; + + /// We need to buffer at most 8 values for literals. This happens when the + /// bit_width is 1 (so 8 values fit in one byte). + /// TODO: generalize this to other bit widths + int64_t buffered_values_[8]; + + /// Number of values in buffered_values_ + int num_buffered_values_; + + /// The current (also last) value that was written and the count of how + /// many times in a row that value has been seen. This is maintained even + /// if we are in a literal run. If the repeat_count_ get high enough, we switch + /// to encoding repeated runs. + uint64_t current_value_; + int repeat_count_; + + /// Number of literals in the current run. This does not include the literals + /// that might be in buffered_values_. Only after we've got a group big enough + /// can we decide if they should part of the literal_count_ or repeat_count_ + int literal_count_; + + /// Pointer to a byte in the underlying buffer that stores the indicator byte. + /// This is reserved as soon as we need a literal run but the value is written + /// when the literal run is complete. + uint8_t* literal_indicator_byte_; +}; + +template +inline bool RleDecoder::Get(T* val) { + return GetBatch(val, 1) == 1; +} + +template +inline int RleDecoder::GetBatch(T* values, int batch_size) { + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { // Repeated value case. + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, static_cast(current_value_)); + + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(remaining, literal_count_); + int actual_read = bit_reader_.GetBatch(bit_width_, out, literal_batch); + if (actual_read != literal_batch) { + return values_read; + } + + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, + T* out) { + if (ARROW_PREDICT_FALSE(null_count == batch_size)) { + converter.FillZero(out, out + batch_size); + return batch_size; + } + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + int values_remaining = batch_size - null_count; + + // Assume no bits to start. + arrow::internal::BitRunReader bit_reader(valid_bits, valid_bits_offset, + /*length=*/batch_size); + arrow::internal::BitRun valid_run = bit_reader.NextRun(); + while (values_read < batch_size) { + if (ARROW_PREDICT_FALSE(valid_run.length == 0)) { + valid_run = bit_reader.NextRun(); + } + + DCHECK_GT(batch_size, 0); + DCHECK_GT(valid_run.length, 0); + + if (valid_run.set) { + if ((repeat_count_ == 0) && (literal_count_ == 0)) { + if (!NextCounts()) return values_read; + DCHECK((repeat_count_ > 0) ^ (literal_count_ > 0)); + } + + if (repeat_count_ > 0) { + int repeat_batch = 0; + // Consume the entire repeat counts incrementing repeat_batch to + // be the total of nulls + values consumed, we only need to + // get the total count because we can fill in the same value for + // nulls and non-nulls. This proves to be a big efficiency win. + while (repeat_count_ > 0 && (values_read + repeat_batch) < batch_size) { + DCHECK_GT(valid_run.length, 0); + if (valid_run.set) { + int update_size = std::min(static_cast(valid_run.length), repeat_count_); + repeat_count_ -= update_size; + repeat_batch += update_size; + valid_run.length -= update_size; + values_remaining -= update_size; + } else { + // We can consume all nulls here because we would do so on + // the next loop anyways. + repeat_batch += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + RunType current_value = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!converter.IsValid(current_value))) { + return values_read; + } + converter.Fill(out, out + repeat_batch, current_value); + out += repeat_batch; + values_read += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(values_remaining, literal_count_); + DCHECK_GT(literal_batch, 0); + + // Decode the literals + constexpr int kBufferSize = 1024; + RunType indices[kBufferSize]; + literal_batch = std::min(literal_batch, kBufferSize); + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (!converter.IsValid(indices, /*length=*/actual_read)) { + return values_read; + } + int skipped = 0; + int literals_read = 0; + while (literals_read < literal_batch) { + if (valid_run.set) { + int update_size = std::min(literal_batch - literals_read, + static_cast(valid_run.length)); + converter.Copy(out, indices + literals_read, update_size); + literals_read += update_size; + out += update_size; + valid_run.length -= update_size; + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + skipped += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + literal_count_ -= literal_batch; + values_remaining -= literal_batch; + values_read += literal_batch + skipped; + } + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + values_read += static_cast(valid_run.length); + valid_run.length = 0; + } + } + DCHECK_EQ(valid_run.length, 0); + DCHECK_EQ(values_remaining, 0); + return values_read; +} + +// Converter for GetSpaced that handles runs that get returned +// directly as output. +template +struct PlainRleConverter { + T kZero = {}; + inline bool IsValid(const T& values) const { return true; } + inline bool IsValid(const T* values, int32_t length) const { return true; } + inline void Fill(T* begin, T* end, const T& run_value) const { + std::fill(begin, end, run_value); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + inline void Copy(T* out, const T* values, int length) const { + std::memcpy(out, values, length * sizeof(T)); + } +}; + +template +inline int RleDecoder::GetBatchSpaced(int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out) { + if (null_count == 0) { + return GetBatch(out, batch_size); + } + + PlainRleConverter converter; + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatch(out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +static inline bool IndexInRange(int32_t idx, int32_t dictionary_length) { + return idx >= 0 && idx < dictionary_length; +} + +// Converter for GetSpaced that handles runs of returned dictionary +// indices. +template +struct DictionaryConverter { + T kZero = {}; + const T* dictionary; + int32_t dictionary_length; + + inline bool IsValid(int32_t value) { return IndexInRange(value, dictionary_length); } + + inline bool IsValid(const int32_t* values, int32_t length) const { + using IndexType = int32_t; + IndexType min_index = std::numeric_limits::max(); + IndexType max_index = std::numeric_limits::min(); + for (int x = 0; x < length; x++) { + min_index = std::min(values[x], min_index); + max_index = std::max(values[x], max_index); + } + + return IndexInRange(min_index, dictionary_length) && + IndexInRange(max_index, dictionary_length); + } + inline void Fill(T* begin, T* end, const int32_t& run_value) const { + std::fill(begin, end, dictionary[run_value]); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + + inline void Copy(T* out, const int32_t* values, int length) const { + for (int x = 0; x < length; x++) { + out[x] = dictionary[values[x]]; + } + } +}; + +template +inline int RleDecoder::GetBatchWithDict(const T* dictionary, int32_t dictionary_length, + T* values, int batch_size) { + // Per https://github.com/apache/parquet-format/blob/master/Encodings.md, + // the maximum dictionary index width in Parquet is 32 bits. + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { + auto idx = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!IndexInRange(idx, dictionary_length))) { + return values_read; + } + T val = dictionary[idx]; + + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, val); + + /* Upkeep counters */ + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + constexpr int kBufferSize = 1024; + IndexType indices[kBufferSize]; + + int literal_batch = std::min(remaining, literal_count_); + literal_batch = std::min(literal_batch, kBufferSize); + + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (ARROW_PREDICT_FALSE(!converter.IsValid(indices, /*length=*/literal_batch))) { + return values_read; + } + converter.Copy(out, indices, literal_batch); + + /* Upkeep counters */ + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetBatchWithDictSpaced(const T* dictionary, + int32_t dictionary_length, T* out, + int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (null_count == 0) { + return GetBatchWithDict(dictionary, dictionary_length, out, batch_size); + } + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatchWithDict(dictionary, dictionary_length, out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +template +bool RleDecoder::NextCounts() { + // Read the next run's indicator int, it could be a literal or repeated run. + // The int is encoded as a vlq-encoded value. + uint32_t indicator_value = 0; + if (!bit_reader_.GetVlqInt(&indicator_value)) return false; + + // lsb indicates if it is a literal run or repeated run + bool is_literal = indicator_value & 1; + uint32_t count = indicator_value >> 1; + if (is_literal) { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX) / 8)) { + return false; + } + literal_count_ = count * 8; + } else { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX))) { + return false; + } + repeat_count_ = count; + T value = {}; + if (!bit_reader_.GetAligned( + static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8)), &value)) { + return false; + } + current_value_ = static_cast(value); + } + return true; +} + +/// This function buffers input values 8 at a time. After seeing all 8 values, +/// it decides whether they should be encoded as a literal or repeated run. +inline bool RleEncoder::Put(uint64_t value) { + DCHECK(bit_width_ == 64 || value < (1ULL << bit_width_)); + if (ARROW_PREDICT_FALSE(buffer_full_)) return false; + + if (ARROW_PREDICT_TRUE(current_value_ == value)) { + ++repeat_count_; + if (repeat_count_ > 8) { + // This is just a continuation of the current run, no need to buffer the + // values. + // Note that this is the fast path for long repeated runs. + return true; + } + } else { + if (repeat_count_ >= 8) { + // We had a run that was long enough but it has ended. Flush the + // current repeated run. + DCHECK_EQ(literal_count_, 0); + FlushRepeatedRun(); + } + repeat_count_ = 1; + current_value_ = value; + } + + buffered_values_[num_buffered_values_] = value; + if (++num_buffered_values_ == 8) { + DCHECK_EQ(literal_count_ % 8, 0); + FlushBufferedValues(false); + } + return true; +} + +inline void RleEncoder::FlushLiteralRun(bool update_indicator_byte) { + if (literal_indicator_byte_ == NULL) { + // The literal indicator byte has not been reserved yet, get one now. + literal_indicator_byte_ = bit_writer_.GetNextBytePtr(); + DCHECK(literal_indicator_byte_ != NULL); + } + + // Write all the buffered values as bit packed literals + for (int i = 0; i < num_buffered_values_; ++i) { + bool success = bit_writer_.PutValue(buffered_values_[i], bit_width_); + DCHECK(success) << "There is a bug in using CheckBufferFull()"; + } + num_buffered_values_ = 0; + + if (update_indicator_byte) { + // At this point we need to write the indicator byte for the literal run. + // We only reserve one byte, to allow for streaming writes of literal values. + // The logic makes sure we flush literal runs often enough to not overrun + // the 1 byte. + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + int32_t indicator_value = (num_groups << 1) | 1; + DCHECK_EQ(indicator_value & 0xFFFFFF00, 0); + *literal_indicator_byte_ = static_cast(indicator_value); + literal_indicator_byte_ = NULL; + literal_count_ = 0; + CheckBufferFull(); + } +} + +inline void RleEncoder::FlushRepeatedRun() { + DCHECK_GT(repeat_count_, 0); + bool result = true; + // The lsb of 0 indicates this is a repeated run + int32_t indicator_value = repeat_count_ << 1 | 0; + result &= bit_writer_.PutVlqInt(static_cast(indicator_value)); + result &= bit_writer_.PutAligned( + current_value_, static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8))); + DCHECK(result); + num_buffered_values_ = 0; + repeat_count_ = 0; + CheckBufferFull(); +} + +/// Flush the values that have been buffered. At this point we decide whether +/// we need to switch between the run types or continue the current one. +inline void RleEncoder::FlushBufferedValues(bool done) { + if (repeat_count_ >= 8) { + // Clear the buffered values. They are part of the repeated run now and we + // don't want to flush them out as literals. + num_buffered_values_ = 0; + if (literal_count_ != 0) { + // There was a current literal run. All the values in it have been flushed + // but we still need to update the indicator byte. + DCHECK_EQ(literal_count_ % 8, 0); + DCHECK_EQ(repeat_count_, 8); + FlushLiteralRun(true); + } + DCHECK_EQ(literal_count_, 0); + return; + } + + literal_count_ += num_buffered_values_; + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + if (num_groups + 1 >= (1 << 6)) { + // We need to start a new literal run because the indicator byte we've reserved + // cannot store more values. + DCHECK(literal_indicator_byte_ != NULL); + FlushLiteralRun(true); + } else { + FlushLiteralRun(done); + } + repeat_count_ = 0; +} + +inline int RleEncoder::Flush() { + if (literal_count_ > 0 || repeat_count_ > 0 || num_buffered_values_ > 0) { + bool all_repeat = literal_count_ == 0 && (repeat_count_ == num_buffered_values_ || + num_buffered_values_ == 0); + // There is something pending, figure out if it's a repeated or literal run + if (repeat_count_ > 0 && all_repeat) { + FlushRepeatedRun(); + } else { + DCHECK_EQ(literal_count_ % 8, 0); + // Buffer the last group of literals to 8 by padding with 0s. + for (; num_buffered_values_ != 0 && num_buffered_values_ < 8; + ++num_buffered_values_) { + buffered_values_[num_buffered_values_] = 0; + } + literal_count_ += num_buffered_values_; + FlushLiteralRun(true); + repeat_count_ = 0; + } + } + bit_writer_.Flush(); + DCHECK_EQ(num_buffered_values_, 0); + DCHECK_EQ(literal_count_, 0); + DCHECK_EQ(repeat_count_, 0); + + return bit_writer_.bytes_written(); +} + +inline void RleEncoder::CheckBufferFull() { + int bytes_written = bit_writer_.bytes_written(); + if (bytes_written + max_run_byte_size_ > bit_writer_.buffer_len()) { + buffer_full_ = true; + } +} + +inline void RleEncoder::Clear() { + buffer_full_ = false; + current_value_ = 0; + repeat_count_ = 0; + num_buffered_values_ = 0; + literal_count_ = 0; + literal_indicator_byte_ = NULL; + bit_writer_.Clear(); +} + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h new file mode 100644 index 0000000000000000000000000000000000000000..ee9105d5f4beb431f155f8b47b7efdcc72452bc5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef _MSC_VER +// MSVC x86_64/arm64 + +#if defined(_M_AMD64) || defined(_M_X64) +#include +#endif + +#else +// gcc/clang (possibly others) + +#if defined(ARROW_HAVE_BMI2) +#include +#endif + +#if defined(ARROW_HAVE_AVX2) || defined(ARROW_HAVE_AVX512) +#include +#elif defined(ARROW_HAVE_SSE4_2) +#include +#endif + +#ifdef ARROW_HAVE_NEON +#include +#endif + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..52e191c4c07846b922a5bd830c2cbbde50538eba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h @@ -0,0 +1,511 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/aligned_storage.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + void destroy() noexcept {} +}; + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + ~StaticVectorStorageBase() noexcept { destroy(); } + + void destroy() noexcept { storage_type::destroy_several(static_data_, size_); } +}; + +template ::value> +struct StaticVectorStorage : public StaticVectorStorageBase { + using Base = StaticVectorStorageBase; + using typename Base::storage_type; + + using Base::size_; + using Base::static_data_; + + StaticVectorStorage() noexcept = default; + + constexpr storage_type* storage_ptr() { return static_data_; } + + constexpr const storage_type* const_storage_ptr() const { return static_data_; } + + // Adjust storage size, but don't initialize any objects + void bump_size(size_t addend) { + assert(size_ + addend <= N); + size_ += addend; + } + + void ensure_capacity(size_t min_capacity) { assert(min_capacity <= N); } + + // Adjust storage size, but don't destroy any objects + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + // Move objects from another storage, but don't destroy any objects currently + // stored in *this. + // You need to call destroy() first if necessary (e.g. in a + // move assignment operator). + void move_construct(StaticVectorStorage&& other) noexcept { + size_ = other.size_; + if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return N; } + + constexpr size_t max_size() const { return N; } + + void reserve(size_t n) {} + + void clear() { + storage_type::destroy_several(static_data_, size_); + size_ = 0; + } +}; + +template +struct SmallVectorStorage { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + storage_type* data_ = static_data_; + size_t dynamic_capacity_ = 0; + + SmallVectorStorage() noexcept = default; + + ~SmallVectorStorage() { destroy(); } + + constexpr storage_type* storage_ptr() { return data_; } + + constexpr const storage_type* const_storage_ptr() const { return data_; } + + void bump_size(size_t addend) { + const size_t new_size = size_ + addend; + ensure_capacity(new_size); + size_ = new_size; + } + + void ensure_capacity(size_t min_capacity) { + if (dynamic_capacity_) { + // Grow dynamic storage if necessary + if (min_capacity > dynamic_capacity_) { + size_t new_capacity = std::max(dynamic_capacity_ * 2, min_capacity); + reallocate_dynamic(new_capacity); + } + } else if (min_capacity > N) { + switch_to_dynamic(min_capacity); + } + } + + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + void destroy() noexcept { + storage_type::destroy_several(data_, size_); + if (dynamic_capacity_) { + delete[] data_; + } + } + + void move_construct(SmallVectorStorage&& other) noexcept { + size_ = other.size_; + dynamic_capacity_ = other.dynamic_capacity_; + if (dynamic_capacity_) { + data_ = other.data_; + other.data_ = other.static_data_; + other.dynamic_capacity_ = 0; + other.size_ = 0; + } else if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return dynamic_capacity_ ? dynamic_capacity_ : N; } + + constexpr size_t max_size() const { return std::numeric_limits::max(); } + + void reserve(size_t n) { + if (dynamic_capacity_) { + if (n > dynamic_capacity_) { + reallocate_dynamic(n); + } + } else if (n > N) { + switch_to_dynamic(n); + } + } + + void clear() { + storage_type::destroy_several(data_, size_); + size_ = 0; + } + + private: + void switch_to_dynamic(size_t new_capacity) { + dynamic_capacity_ = new_capacity; + data_ = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(static_data_, data_, size_); + } + + void reallocate_dynamic(size_t new_capacity) { + assert(new_capacity >= size_); + auto new_data = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(data_, new_data, size_); + delete[] data_; + dynamic_capacity_ = new_capacity; + data_ = new_data; + } +}; + +template +class StaticVectorImpl { + private: + Storage storage_; + + T* data_ptr() { return storage_.storage_ptr()->get(); } + + constexpr const T* const_data_ptr() const { + return storage_.const_storage_ptr()->get(); + } + + public: + using size_type = size_t; + using difference_type = ptrdiff_t; + using value_type = T; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = T*; + using const_iterator = const T*; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + constexpr StaticVectorImpl() noexcept = default; + + // Move and copy constructors + StaticVectorImpl(StaticVectorImpl&& other) noexcept { + storage_.move_construct(std::move(other.storage_)); + } + + StaticVectorImpl& operator=(StaticVectorImpl&& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + // TODO move_assign? + storage_.destroy(); + storage_.move_construct(std::move(other.storage_)); + } + return *this; + } + + StaticVectorImpl(const StaticVectorImpl& other) { + init_by_copying(other.storage_.size_, other.const_data_ptr()); + } + + StaticVectorImpl& operator=(const StaticVectorImpl& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + assign_by_copying(other.storage_.size_, other.data()); + } + return *this; + } + + // Automatic conversion from std::vector, for convenience + StaticVectorImpl(const std::vector& other) { // NOLINT: explicit + init_by_copying(other.size(), other.data()); + } + + StaticVectorImpl(std::vector&& other) noexcept { // NOLINT: explicit + init_by_moving(other.size(), other.data()); + } + + StaticVectorImpl& operator=(const std::vector& other) { + assign_by_copying(other.size(), other.data()); + return *this; + } + + StaticVectorImpl& operator=(std::vector&& other) noexcept { + assign_by_moving(other.size(), other.data()); + return *this; + } + + // Constructing from count and optional initialization value + explicit StaticVectorImpl(size_t count) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(); + } + } + + StaticVectorImpl(size_t count, const T& value) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(value); + } + } + + StaticVectorImpl(std::initializer_list values) { + storage_.bump_size(values.size()); + auto* p = storage_.storage_ptr(); + for (auto&& v : values) { + // Unfortunately, cannot move initializer values + p++->construct(v); + } + } + + // Size inspection + + constexpr bool empty() const { return storage_.size_ == 0; } + + constexpr size_t size() const { return storage_.size_; } + + constexpr size_t capacity() const { return storage_.capacity(); } + + constexpr size_t max_size() const { return storage_.max_size(); } + + // Data access + + T& operator[](size_t i) { return data_ptr()[i]; } + + constexpr const T& operator[](size_t i) const { return const_data_ptr()[i]; } + + T& front() { return data_ptr()[0]; } + + constexpr const T& front() const { return const_data_ptr()[0]; } + + T& back() { return data_ptr()[storage_.size_ - 1]; } + + constexpr const T& back() const { return const_data_ptr()[storage_.size_ - 1]; } + + T* data() { return data_ptr(); } + + constexpr const T* data() const { return const_data_ptr(); } + + // Iterators + + iterator begin() { return iterator(data_ptr()); } + + constexpr const_iterator begin() const { return const_iterator(const_data_ptr()); } + + constexpr const_iterator cbegin() const { return const_iterator(const_data_ptr()); } + + iterator end() { return iterator(data_ptr() + storage_.size_); } + + constexpr const_iterator end() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + constexpr const_iterator cend() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + + constexpr const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + constexpr const_reverse_iterator crbegin() const { + return const_reverse_iterator(end()); + } + + reverse_iterator rend() { return reverse_iterator(begin()); } + + constexpr const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + constexpr const_reverse_iterator crend() const { + return const_reverse_iterator(begin()); + } + + // Mutations + + void reserve(size_t n) { storage_.reserve(n); } + + void clear() { storage_.clear(); } + + void push_back(const T& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(value); + } + + void push_back(T&& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::move(value)); + } + + template + void emplace_back(Args&&... args) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::forward(args)...); + } + + template + iterator insert(const_iterator insert_at, InputIt first, InputIt last) { + const size_t n = storage_.size_; + const size_t it_size = static_cast(last - first); // XXX might be O(n)? + const size_t pos = static_cast(insert_at - const_data_ptr()); + storage_.bump_size(it_size); + auto* p = storage_.storage_ptr(); + if (it_size == 0) { + return p[pos].get(); + } + const size_t end_pos = pos + it_size; + + // Move [pos; n) to [end_pos; end_pos + n - pos) + size_t i = n; + size_t j = end_pos + n - pos; + while (j > std::max(n, end_pos)) { + p[--j].move_construct(&p[--i]); + } + while (j > end_pos) { + p[--j].move_assign(&p[--i]); + } + assert(j == end_pos); + // Copy [first; last) to [pos; end_pos) + j = pos; + while (j < std::min(n, end_pos)) { + p[j++].assign(*first++); + } + while (j < end_pos) { + p[j++].construct(*first++); + } + assert(first == last); + return p[pos].get(); + } + + void resize(size_t n) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(T{}); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + void resize(size_t n, const T& value) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(value); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + private: + template + void init_by_copying(size_t n, InputIt src) { + storage_.bump_size(n); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } + + template + void init_by_moving(size_t n, InputIt src) { + init_by_copying(n, std::make_move_iterator(src)); + } + + template + void assign_by_copying(size_t n, InputIt src) { + const size_t old_size = storage_.size_; + if (n > old_size) { + storage_.bump_size(n - old_size); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < old_size; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = old_size; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } else { + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = n; i < old_size; ++i) { + dest[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + template + void assign_by_moving(size_t n, InputIt src) { + assign_by_copying(n, std::make_move_iterator(src)); + } +}; + +template +using StaticVector = StaticVectorImpl>; + +template +using SmallVector = StaticVectorImpl>; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h new file mode 100644 index 0000000000000000000000000000000000000000..cdffe0b2317e5ba555c37ec16e5294bc912a49d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace arrow { +namespace internal { + +template > +std::vector ArgSort(const std::vector& values, Cmp&& cmp = {}) { + std::vector indices(values.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), + [&](int64_t i, int64_t j) -> bool { return cmp(values[i], values[j]); }); + return indices; +} + +template +size_t Permute(const std::vector& indices, std::vector* values) { + if (indices.size() <= 1) { + return indices.size(); + } + + // mask indicating which of values are in the correct location + std::vector sorted(indices.size(), false); + + size_t cycle_count = 0; + + for (auto cycle_start = sorted.begin(); cycle_start != sorted.end(); + cycle_start = std::find(cycle_start, sorted.end(), false)) { + ++cycle_count; + + // position in which an element belongs WRT sort + auto sort_into = static_cast(cycle_start - sorted.begin()); + + if (indices[sort_into] == sort_into) { + // trivial cycle + sorted[sort_into] = true; + continue; + } + + // resolve this cycle + const auto end = sort_into; + for (int64_t take_from = indices[sort_into]; take_from != end; + take_from = indices[sort_into]) { + std::swap(values->at(sort_into), values->at(take_from)); + sorted[sort_into] = true; + sort_into = take_from; + } + sorted[sort_into] = true; + } + + return cycle_count; +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h new file mode 100644 index 0000000000000000000000000000000000000000..8265e1d22ae0e78d7343b2fce6a0de4bc669ccc8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/bit_run_reader.h" + +namespace arrow { +namespace util { +namespace internal { + +/// \brief Compress the buffer to spaced, excluding the null entries. +/// +/// \param[in] src the source buffer +/// \param[in] num_values the size of source buffer +/// \param[in] valid_bits bitmap data indicating position of valid slots +/// \param[in] valid_bits_offset offset into valid_bits +/// \param[out] output the output buffer spaced +/// \return The size of spaced buffer. +template +inline int SpacedCompress(const T* src, int num_values, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* output) { + int num_valid_values = 0; + + arrow::internal::SetBitRunReader reader(valid_bits, valid_bits_offset, num_values); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + std::memcpy(output + num_valid_values, src + run.position, run.length * sizeof(T)); + num_valid_values += static_cast(run.length); + } + + return num_valid_values; +} + +/// \brief Relocate values in buffer into positions of non-null values as indicated by +/// a validity bitmap. +/// +/// \param[in, out] buffer the in-place buffer +/// \param[in] num_values total size of buffer including null slots +/// \param[in] null_count number of null slots +/// \param[in] valid_bits bitmap data indicating position of valid slots +/// \param[in] valid_bits_offset offset into valid_bits +/// \return The number of values expanded, including nulls. +template +inline int SpacedExpand(T* buffer, int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset) { + // Point to end as we add the spacing from the back. + int idx_decode = num_values - null_count; + + // Depending on the number of nulls, some of the value slots in buffer may + // be uninitialized, and this will cause valgrind warnings / potentially UB + std::memset(static_cast(buffer + idx_decode), 0, null_count * sizeof(T)); + if (idx_decode == 0) { + // All nulls, nothing more to do + return num_values; + } + + arrow::internal::ReverseSetBitRunReader reader(valid_bits, valid_bits_offset, + num_values); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + idx_decode -= static_cast(run.length); + assert(idx_decode >= 0); + std::memmove(buffer + run.position, buffer + idx_decode, run.length * sizeof(T)); + } + + // Otherwise caller gave an incorrect null_count + assert(idx_decode == 0); + return num_values; +} + +} // namespace internal +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h new file mode 100644 index 0000000000000000000000000000000000000000..71cf9ed44890a78675e4187e03b4c01bff60ae54 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +namespace arrow::util { + +template +class span; + +// This trait is used to check if a type R can be used to construct a span. +// Specifically, it checks if std::data(R) and std::size(R) are valid expressions +// that may be passed to the span(T*, size_t) constructor. The reason this trait +// is needed rather than expressing this directly in the relevant span constructor +// is that this check requires instantiating span, which would violate the +// C++ standard if written directly in the constructor's enable_if clause +// because span is an incomplete type at that point. By defining this trait +// instead, we add an extra level of indirection that lets us delay the +// evaluation of the template until the first time the associated constructor +// is actually called, at which point span is a complete type. +// +// Note that most compilers do support the noncompliant construct, but nvcc +// does not. See https://github.com/apache/arrow/issues/40252 +template +struct ConstructibleFromDataAndSize : std::false_type {}; + +template +struct ConstructibleFromDataAndSize< + span, R, + std::void_t{std::data(std::declval()), + std::size(std::declval())})>> : std::true_type {}; + +/// std::span polyfill. +/// +/// Does not support static extents. +template +class span { + static_assert(sizeof(T), + R"( +std::span allows contiguous_iterators instead of just pointers, the enforcement +of which requires T to be a complete type. arrow::util::span does not support +contiguous_iterators, but T is still required to be a complete type to prevent +writing code which would break when it is replaced by std::span.)"); + + public: + using element_type = T; + using value_type = std::remove_cv_t; + using iterator = T*; + using const_iterator = T const*; + + span() = default; + span(const span&) = default; + span& operator=(const span&) = default; + + template >> + // NOLINTNEXTLINE runtime/explicit + constexpr span(span mut) : span{mut.data(), mut.size()} {} + + constexpr span(T* data, size_t count) : data_{data}, size_{count} {} + + constexpr span(T* begin, T* end) + : data_{begin}, size_{static_cast(end - begin)} {} + + template < + typename R, + std::enable_if_t, R>::value, bool> = true, + typename DisableUnlessSimilarTypes = std::enable_if_t()))>>, + std::decay_t>>> + // NOLINTNEXTLINE runtime/explicit, non-const reference + constexpr span(R&& range) : span{std::data(range), std::size(range)} {} + + constexpr T* begin() const { return data_; } + constexpr T* end() const { return data_ + size_; } + constexpr T* data() const { return data_; } + + constexpr size_t size() const { return size_; } + constexpr size_t size_bytes() const { return size_ * sizeof(T); } + constexpr bool empty() const { return size_ == 0; } + + constexpr T& operator[](size_t i) { return data_[i]; } + constexpr const T& operator[](size_t i) const { return data_[i]; } + + constexpr span subspan(size_t offset) const { + if (offset > size_) return {data_, data_}; + return {data_ + offset, size_ - offset}; + } + + constexpr span subspan(size_t offset, size_t count) const { + auto out = subspan(offset); + if (count < out.size_) { + out.size_ = count; + } + return out; + } + + constexpr bool operator==(span const& other) const { + if (size_ != other.size_) return false; + + if constexpr (std::is_integral_v) { + if (size_ == 0) { + return true; // memcmp does not handle null pointers, even if size_ == 0 + } + return std::memcmp(data_, other.data_, size_bytes()) == 0; + } else { + T* ptr = data_; + for (T const& e : other) { + if (*ptr++ != e) return false; + } + return true; + } + } + constexpr bool operator!=(span const& other) const { return !(*this == other); } + + private: + T* data_{}; + size_t size_{}; +}; + +template +span(R& range) -> span>; + +template +span(T*, size_t) -> span; + +template +constexpr span as_bytes(span s) { + return {reinterpret_cast(s.data()), s.size_bytes()}; +} + +template +constexpr span as_writable_bytes(span s) { + return {reinterpret_cast(s.data()), s.size_bytes()}; +} + +} // namespace arrow::util diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db4e67f59ed6e3afb5c90cb758b7998dd9d510f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace internal { + +class StopWatch { + // This clock should give us wall clock time + using ClockType = std::chrono::steady_clock; + + public: + StopWatch() {} + + void Start() { start_ = ClockType::now(); } + + // Returns time in nanoseconds. + uint64_t Stop() { + auto stop = ClockType::now(); + std::chrono::nanoseconds d = stop - start_; + assert(d.count() >= 0); + return static_cast(d.count()); + } + + private: + std::chrono::time_point start_; +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/string_builder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/string_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..7c05ccd51f7fddaf3fbab65b0ba0fc1a8e5cf796 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/string_builder.h @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. template + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +namespace detail { + +class ARROW_EXPORT StringStreamWrapper { + public: + StringStreamWrapper(); + ~StringStreamWrapper(); + + std::ostream& stream() { return ostream_; } + std::string str(); + + protected: + std::unique_ptr sstream_; + std::ostream& ostream_; +}; + +} // namespace detail + +template +void StringBuilderRecursive(std::ostream& stream, Head&& head) { + stream << head; +} + +template +void StringBuilderRecursive(std::ostream& stream, Head&& head, Tail&&... tail) { + StringBuilderRecursive(stream, std::forward(head)); + StringBuilderRecursive(stream, std::forward(tail)...); +} + +template +std::string StringBuilder(Args&&... args) { + detail::StringStreamWrapper ss; + StringBuilderRecursive(ss.stream(), std::forward(args)...); + return ss.str(); +} + +/// CRTP helper for declaring string representation. Defines operator<< +template +class ToStringOstreamable { + public: + ~ToStringOstreamable() { + static_assert( + std::is_same().ToString()), std::string>::value, + "ToStringOstreamable depends on the method T::ToString() const"); + } + + private: + const T& cast() const { return static_cast(*this); } + + friend inline std::ostream& operator<<(std::ostream& os, const ToStringOstreamable& t) { + return os << t.cast().ToString(); + } +}; + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h new file mode 100644 index 0000000000000000000000000000000000000000..308df468840eb299ac35f1e308a643df4b8e0e4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// approximate quantiles from arbitrary length dataset with O(1) space +// based on 'Computing Extremely Accurate Quantiles Using t-Digests' from Dunning & Ertl +// - https://arxiv.org/abs/1902.04023 +// - https://github.com/tdunning/t-digest + +#pragma once + +#include +#include +#include + +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +namespace internal { + +class ARROW_EXPORT TDigest { + public: + explicit TDigest(uint32_t delta = 100, uint32_t buffer_size = 500); + ~TDigest(); + TDigest(TDigest&&); + TDigest& operator=(TDigest&&); + + // reset and re-use this tdigest + void Reset(); + + // validate data integrity + Status Validate() const; + + // dump internal data, only for debug + void Dump() const; + + // buffer a single data point, consume internal buffer if full + // this function is intensively called and performance critical + // call it only if you are sure no NAN exists in input data + void Add(double value) { + DCHECK(!std::isnan(value)) << "cannot add NAN"; + if (ARROW_PREDICT_FALSE(input_.size() == input_.capacity())) { + MergeInput(); + } + input_.push_back(value); + } + + // skip NAN on adding + template + typename std::enable_if::value>::type NanAdd(T value) { + if (!std::isnan(value)) Add(value); + } + + template + typename std::enable_if::value>::type NanAdd(T value) { + Add(static_cast(value)); + } + + // merge with other t-digests, called infrequently + void Merge(const std::vector& others); + void Merge(const TDigest& other); + + // calculate quantile + double Quantile(double q) const; + + double Min() const { return Quantile(0); } + double Max() const { return Quantile(1); } + double Mean() const; + + // check if this tdigest contains no valid data points + bool is_empty() const; + + private: + // merge input data with current tdigest + void MergeInput() const; + + // input buffer, size = buffer_size * sizeof(double) + mutable std::vector input_; + + // hide other members with pimpl + class TDigestImpl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h new file mode 100644 index 0000000000000000000000000000000000000000..7815d4d1ecc1d66ba20c45eddb6c626833aa54e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h @@ -0,0 +1,243 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A non-zero-terminated small string class. +// std::string usually has a small string optimization +// (see review at https://shaharmike.com/cpp/std-string/) +// but this one allows tight control and optimization of memory layout. +template +class SmallString { + public: + SmallString() : length_(0) {} + + template + SmallString(const T& v) { // NOLINT implicit constructor + *this = std::string_view(v); + } + + SmallString& operator=(const std::string_view s) { +#ifndef NDEBUG + CheckSize(s.size()); +#endif + length_ = static_cast(s.size()); + std::memcpy(data_, s.data(), length_); + return *this; + } + + SmallString& operator=(const std::string& s) { + *this = std::string_view(s); + return *this; + } + + SmallString& operator=(const char* s) { + *this = std::string_view(s); + return *this; + } + + explicit operator std::string_view() const { return std::string_view(data_, length_); } + + const char* data() const { return data_; } + size_t length() const { return length_; } + bool empty() const { return length_ == 0; } + char operator[](size_t pos) const { +#ifdef NDEBUG + assert(pos <= length_); +#endif + return data_[pos]; + } + + SmallString substr(size_t pos) const { + return SmallString(std::string_view(*this).substr(pos)); + } + + SmallString substr(size_t pos, size_t count) const { + return SmallString(std::string_view(*this).substr(pos, count)); + } + + template + bool operator==(T&& other) const { + return std::string_view(*this) == std::string_view(std::forward(other)); + } + + template + bool operator!=(T&& other) const { + return std::string_view(*this) != std::string_view(std::forward(other)); + } + + protected: + uint8_t length_; + char data_[N]; + + void CheckSize(size_t n) { assert(n <= N); } +}; + +template +std::ostream& operator<<(std::ostream& os, const SmallString& str) { + return os << std::string_view(str); +} + +// A trie class for byte strings, optimized for small sets of short strings. +// This class is immutable by design, use a TrieBuilder to construct it. +class ARROW_EXPORT Trie { + using index_type = int16_t; + using fast_index_type = int_fast16_t; + static constexpr auto kMaxIndex = std::numeric_limits::max(); + + public: + Trie() : size_(0) {} + Trie(Trie&&) = default; + Trie& operator=(Trie&&) = default; + + int32_t Find(std::string_view s) const { + const Node* node = &nodes_[0]; + fast_index_type pos = 0; + if (s.length() > static_cast(kMaxIndex)) { + return -1; + } + fast_index_type remaining = static_cast(s.length()); + + while (remaining > 0) { + auto substring_length = node->substring_length(); + if (substring_length > 0) { + auto substring_data = node->substring_data(); + if (remaining < substring_length) { + // Input too short + return -1; + } + for (fast_index_type i = 0; i < substring_length; ++i) { + if (s[pos++] != substring_data[i]) { + // Mismatching substring + return -1; + } + --remaining; + } + if (remaining == 0) { + // Matched node exactly + return node->found_index_; + } + } + // Lookup child using next input character + if (node->child_lookup_ == -1) { + // Input too long + return -1; + } + auto c = static_cast(s[pos++]); + --remaining; + auto child_index = lookup_table_[node->child_lookup_ * 256 + c]; + if (child_index == -1) { + // Child not found + return -1; + } + node = &nodes_[child_index]; + } + + // Input exhausted + if (node->substring_.empty()) { + // Matched node exactly + return node->found_index_; + } else { + return -1; + } + } + + Status Validate() const; + + void Dump() const; + + protected: + static constexpr size_t kNodeSize = 16; + static constexpr auto kMaxSubstringLength = + kNodeSize - 2 * sizeof(index_type) - sizeof(int8_t); + + struct Node { + // If this node is a valid end of string, index of found string, otherwise -1 + index_type found_index_; + // Base index for child lookup in lookup_table_ (-1 if no child nodes) + index_type child_lookup_; + // The substring for this node. + SmallString substring_; + + fast_index_type substring_length() const { + return static_cast(substring_.length()); + } + const char* substring_data() const { return substring_.data(); } + }; + + static_assert(sizeof(Node) == kNodeSize, "Unexpected node size"); + + ARROW_DISALLOW_COPY_AND_ASSIGN(Trie); + + void Dump(const Node* node, const std::string& indent) const; + + // Node table: entry 0 is the root node + std::vector nodes_; + + // Indexed lookup structure: gives index in node table, or -1 if not found + std::vector lookup_table_; + + // Number of entries + index_type size_; + + friend class TrieBuilder; +}; + +class ARROW_EXPORT TrieBuilder { + using index_type = Trie::index_type; + using fast_index_type = Trie::fast_index_type; + + public: + TrieBuilder(); + Status Append(std::string_view s, bool allow_duplicate = false); + Trie Finish(); + + protected: + // Extend the lookup table by 256 entries, return the index of the new span + Status ExtendLookupTable(index_type* out_lookup_index); + // Split the node given by the index at the substring index `split_at` + Status SplitNode(fast_index_type node_index, fast_index_type split_at); + // Append an already constructed child node to the parent + Status AppendChildNode(Trie::Node* parent, uint8_t ch, Trie::Node&& node); + // Create a matching child node from this parent + Status CreateChildNode(Trie::Node* parent, uint8_t ch, std::string_view substring); + Status CreateChildNode(Trie::Node* parent, char ch, std::string_view substring); + + Trie trie_; + + static constexpr auto kMaxIndex = std::numeric_limits::max(); +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..3174881f4d018c6193ff5c12a7d308e39ed75561 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { + +namespace internal { +struct Empty; +} // namespace internal + +template +class WeakFuture; +class FutureWaiter; + +class TimestampParser; + +namespace internal { + +class Executor; +class TaskGroup; +class ThreadPool; +class CpuInfo; + +namespace tracing { + +struct Scope; + +} // namespace tracing +} // namespace internal + +struct Compression { + /// \brief Compression algorithm + enum type { + UNCOMPRESSED, + SNAPPY, + GZIP, + BROTLI, + ZSTD, + LZ4, + LZ4_FRAME, + LZO, + BZ2, + LZ4_HADOOP + }; +}; + +namespace util { +class AsyncTaskScheduler; +class Compressor; +class Decompressor; +class Codec; +class Uri; +} // namespace util + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..c1906152423c97e11ef9f577f46c7f4d4d124597 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace internal { + +/// \brief Metafunction to allow checking if a type matches any of another set of types +template +struct IsOneOf : std::false_type {}; /// Base case: nothing has matched + +template +struct IsOneOf { + /// Recursive case: T == U or T matches any other types provided (not including U). + static constexpr bool value = std::is_same::value || IsOneOf::value; +}; + +/// \brief Shorthand for using IsOneOf + std::enable_if +template +using EnableIfIsOneOf = typename std::enable_if::value, T>::type; + +/// \brief is_null_pointer from C++17 +template +struct is_null_pointer : std::is_same::type> { +}; + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h new file mode 100644 index 0000000000000000000000000000000000000000..900d8011dfd69506ec7ee546f6f32109c448e5f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Contains utilities for making UBSan happy. + +#pragma once + +#include +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +namespace internal { + +constexpr uint8_t kNonNullFiller = 0; + +} // namespace internal + +/// \brief Returns maybe_null if not null or a non-null pointer to an arbitrary memory +/// that shouldn't be dereferenced. +/// +/// Memset/Memcpy are undefined when a nullptr is passed as an argument use this utility +/// method to wrap locations where this could happen. +/// +/// Note: Flatbuffers has UBSan warnings if a zero length vector is passed. +/// https://github.com/google/flatbuffers/pull/5355 is trying to resolve +/// them. +template +inline T* MakeNonNull(T* maybe_null = NULLPTR) { + if (ARROW_PREDICT_TRUE(maybe_null != NULLPTR)) { + return maybe_null; + } + + return const_cast(reinterpret_cast(&internal::kNonNullFiller)); +} + +template +inline std::enable_if_t, T> SafeLoadAs( + const uint8_t* unaligned) { + std::remove_const_t ret; + std::memcpy(&ret, unaligned, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t, T> SafeLoad(const T* unaligned) { + std::remove_const_t ret; + std::memcpy(&ret, unaligned, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t && + std::is_trivially_copyable_v && sizeof(T) == sizeof(U), + U> +SafeCopy(T value) { + std::remove_const_t ret; + std::memcpy(&ret, &value, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t, void> SafeStore(void* unaligned, + T value) { + std::memcpy(unaligned, &value, sizeof(T)); +} + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h new file mode 100644 index 0000000000000000000000000000000000000000..74dbe924ff23740fb603c558e87fc54253392030 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h @@ -0,0 +1,119 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow::util { + +/// \brief A parsed URI +class ARROW_EXPORT Uri { + public: + Uri(); + ~Uri(); + Uri(Uri&&); + Uri& operator=(Uri&&); + + // XXX Should we use std::string_view instead? These functions are + // not performance-critical. + + /// The URI scheme, such as "http", or the empty string if the URI has no + /// explicit scheme. + std::string scheme() const; + + /// Convenience function that returns true if the scheme() is "file" + bool is_file_scheme() const; + + /// Whether the URI has an explicit host name. This may return true if + /// the URI has an empty host (e.g. "file:///tmp/foo"), while it returns + /// false is the URI has not host component at all (e.g. "file:/tmp/foo"). + bool has_host() const; + /// The URI host name, such as "localhost", "127.0.0.1" or "::1", or the empty + /// string is the URI does not have a host component. + std::string host() const; + + /// The URI port number, as a string such as "80", or the empty string is the URI + /// does not have a port number component. + std::string port_text() const; + /// The URI port parsed as an integer, or -1 if the URI does not have a port + /// number component. + int32_t port() const; + + /// The username specified in the URI. + std::string username() const; + /// The password specified in the URI. + std::string password() const; + + /// The URI path component. + std::string path() const; + + /// The URI query string + std::string query_string() const; + + /// The URI query items + /// + /// Note this API doesn't allow differentiating between an empty value + /// and a missing value, such in "a&b=1" vs. "a=&b=1". + Result>> query_items() const; + + /// Get the string representation of this URI. + const std::string& ToString() const; + + /// Factory function to parse a URI from its string representation. + Status Parse(const std::string& uri_string); + + /// Factory function to parse a URI from its string representation. + static Result FromString(const std::string& uri_string); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +/// Percent-encode the input string, for use e.g. as a URI query parameter. +/// +/// This will escape directory separators, making this function unsuitable +/// for encoding URI paths directly. See UriFromAbsolutePath() instead. +ARROW_EXPORT +std::string UriEscape(std::string_view s); + +ARROW_EXPORT +std::string UriUnescape(std::string_view s); + +/// Encode a host for use within a URI, such as "localhost", +/// "127.0.0.1", or "[::1]". +ARROW_EXPORT +std::string UriEncodeHost(std::string_view host); + +/// Whether the string is a syntactically valid URI scheme according to RFC 3986. +ARROW_EXPORT +bool IsValidUriScheme(std::string_view s); + +/// Create a file uri from a given absolute path +ARROW_EXPORT +Result UriFromAbsolutePath(std::string_view path); + +} // namespace arrow::util diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h new file mode 100644 index 0000000000000000000000000000000000000000..ca93fab5b9f4e1f43d451689f0e75cb5572ce983 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +// Convert a UTF8 string to a wstring (either UTF16 or UTF32, depending +// on the wchar_t width). +ARROW_EXPORT Result UTF8ToWideString(std::string_view source); + +// Similarly, convert a wstring to a UTF8 string. +ARROW_EXPORT Result WideStringToUTF8(const std::wstring& source); + +// Convert UTF8 string to a UTF16 string. +ARROW_EXPORT Result UTF8StringToUTF16(std::string_view source); + +// Convert UTF16 string to a UTF8 string. +ARROW_EXPORT Result UTF16StringToUTF8(std::u16string_view source); + +// This function needs to be called before doing UTF8 validation. +ARROW_EXPORT void InitializeUTF8(); + +ARROW_EXPORT bool ValidateUTF8(const uint8_t* data, int64_t size); + +ARROW_EXPORT bool ValidateUTF8(std::string_view str); + +// Skip UTF8 byte order mark, if any. +ARROW_EXPORT +Result SkipUTF8BOM(const uint8_t* data, int64_t size); + +static constexpr uint32_t kMaxUnicodeCodepoint = 0x110000; + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/value_parsing.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/value_parsing.h new file mode 100644 index 0000000000000000000000000000000000000000..609906052cd20714de07ad81824ba81bb30f9b5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/value_parsing.h @@ -0,0 +1,945 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This is a private header for string-to-number parsing utilities + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/config.h" +#include "arrow/util/macros.h" +#include "arrow/util/time.h" +#include "arrow/util/visibility.h" +#include "arrow/vendored/datetime.h" +#include "arrow/vendored/strptime.h" + +namespace arrow { + +/// \brief A virtual string to timestamp parser +class ARROW_EXPORT TimestampParser { + public: + virtual ~TimestampParser() = default; + + virtual bool operator()(const char* s, size_t length, TimeUnit::type out_unit, + int64_t* out, + bool* out_zone_offset_present = NULLPTR) const = 0; + + virtual const char* kind() const = 0; + + virtual const char* format() const; + + /// \brief Create a TimestampParser that recognizes strptime-like format strings + static std::shared_ptr MakeStrptime(std::string format); + + /// \brief Create a TimestampParser that recognizes (locale-agnostic) ISO8601 + /// timestamps + static std::shared_ptr MakeISO8601(); +}; + +namespace internal { + +/// \brief The entry point for conversion from strings. +/// +/// Specializations of StringConverter for `ARROW_TYPE` must define: +/// - A default constructible member type `value_type` which will be yielded on a +/// successful parse. +/// - The static member function `Convert`, callable with signature +/// `(const ARROW_TYPE& t, const char* s, size_t length, value_type* out)`. +/// `Convert` returns truthy for successful parses and assigns the parsed values to +/// `*out`. Parameters required for parsing (for example a timestamp's TimeUnit) +/// are acquired from the type parameter `t`. +template +struct StringConverter; + +template +struct is_parseable { + template ::value_type> + static std::true_type Test(U*); + + template + static std::false_type Test(...); + + static constexpr bool value = decltype(Test(NULLPTR))::value; +}; + +template +using enable_if_parseable = enable_if_t::value, R>; + +template <> +struct StringConverter { + using value_type = bool; + + bool Convert(const BooleanType&, const char* s, size_t length, value_type* out) { + if (length == 1) { + // "0" or "1"? + if (s[0] == '0') { + *out = false; + return true; + } + if (s[0] == '1') { + *out = true; + return true; + } + return false; + } + if (length == 4) { + // "true"? + *out = true; + return ((s[0] == 't' || s[0] == 'T') && (s[1] == 'r' || s[1] == 'R') && + (s[2] == 'u' || s[2] == 'U') && (s[3] == 'e' || s[3] == 'E')); + } + if (length == 5) { + // "false"? + *out = false; + return ((s[0] == 'f' || s[0] == 'F') && (s[1] == 'a' || s[1] == 'A') && + (s[2] == 'l' || s[2] == 'L') && (s[3] == 's' || s[3] == 'S') && + (s[4] == 'e' || s[4] == 'E')); + } + return false; + } +}; + +// Ideas for faster float parsing: +// - http://rapidjson.org/md_doc_internals.html#ParsingDouble +// - https://github.com/google/double-conversion [used here] +// - https://github.com/achan001/dtoa-fast + +ARROW_EXPORT +bool StringToFloat(const char* s, size_t length, char decimal_point, float* out); + +ARROW_EXPORT +bool StringToFloat(const char* s, size_t length, char decimal_point, double* out); + +ARROW_EXPORT +bool StringToFloat(const char* s, size_t length, char decimal_point, uint16_t* out); + +template <> +struct StringConverter { + using value_type = float; + + explicit StringConverter(char decimal_point = '.') : decimal_point(decimal_point) {} + + bool Convert(const FloatType&, const char* s, size_t length, value_type* out) { + return ARROW_PREDICT_TRUE(StringToFloat(s, length, decimal_point, out)); + } + + private: + const char decimal_point; +}; + +template <> +struct StringConverter { + using value_type = double; + + explicit StringConverter(char decimal_point = '.') : decimal_point(decimal_point) {} + + bool Convert(const DoubleType&, const char* s, size_t length, value_type* out) { + return ARROW_PREDICT_TRUE(StringToFloat(s, length, decimal_point, out)); + } + + private: + const char decimal_point; +}; + +template <> +struct StringConverter { + using value_type = uint16_t; + + explicit StringConverter(char decimal_point = '.') : decimal_point(decimal_point) {} + + bool Convert(const HalfFloatType&, const char* s, size_t length, value_type* out) { + return ARROW_PREDICT_TRUE(StringToFloat(s, length, decimal_point, out)); + } + + private: + const char decimal_point; +}; + +// NOTE: HalfFloatType would require a half<->float conversion library + +inline uint8_t ParseDecimalDigit(char c) { return static_cast(c - '0'); } + +#define PARSE_UNSIGNED_ITERATION(C_TYPE) \ + if (length > 0) { \ + uint8_t digit = ParseDecimalDigit(*s++); \ + result = static_cast(result * 10U); \ + length--; \ + if (ARROW_PREDICT_FALSE(digit > 9U)) { \ + /* Non-digit */ \ + return false; \ + } \ + result = static_cast(result + digit); \ + } else { \ + break; \ + } + +#define PARSE_UNSIGNED_ITERATION_LAST(C_TYPE) \ + if (length > 0) { \ + if (ARROW_PREDICT_FALSE(result > std::numeric_limits::max() / 10U)) { \ + /* Overflow */ \ + return false; \ + } \ + uint8_t digit = ParseDecimalDigit(*s++); \ + result = static_cast(result * 10U); \ + C_TYPE new_result = static_cast(result + digit); \ + if (ARROW_PREDICT_FALSE(--length > 0)) { \ + /* Too many digits */ \ + return false; \ + } \ + if (ARROW_PREDICT_FALSE(digit > 9U)) { \ + /* Non-digit */ \ + return false; \ + } \ + if (ARROW_PREDICT_FALSE(new_result < result)) { \ + /* Overflow */ \ + return false; \ + } \ + result = new_result; \ + } + +inline bool ParseUnsigned(const char* s, size_t length, uint8_t* out) { + uint8_t result = 0; + + do { + PARSE_UNSIGNED_ITERATION(uint8_t); + PARSE_UNSIGNED_ITERATION(uint8_t); + PARSE_UNSIGNED_ITERATION_LAST(uint8_t); + } while (false); + *out = result; + return true; +} + +inline bool ParseUnsigned(const char* s, size_t length, uint16_t* out) { + uint16_t result = 0; + do { + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION_LAST(uint16_t); + } while (false); + *out = result; + return true; +} + +inline bool ParseUnsigned(const char* s, size_t length, uint32_t* out) { + uint32_t result = 0; + do { + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + + PARSE_UNSIGNED_ITERATION_LAST(uint32_t); + } while (false); + *out = result; + return true; +} + +inline bool ParseUnsigned(const char* s, size_t length, uint64_t* out) { + uint64_t result = 0; + do { + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION_LAST(uint64_t); + } while (false); + *out = result; + return true; +} + +#undef PARSE_UNSIGNED_ITERATION +#undef PARSE_UNSIGNED_ITERATION_LAST + +template +bool ParseHex(const char* s, size_t length, T* out) { + // lets make sure that the length of the string is not too big + if (!ARROW_PREDICT_TRUE(sizeof(T) * 2 >= length && length > 0)) { + return false; + } + T result = 0; + for (size_t i = 0; i < length; i++) { + result = static_cast(result << 4); + if (s[i] >= '0' && s[i] <= '9') { + result = static_cast(result | (s[i] - '0')); + } else if (s[i] >= 'A' && s[i] <= 'F') { + result = static_cast(result | (s[i] - 'A' + 10)); + } else if (s[i] >= 'a' && s[i] <= 'f') { + result = static_cast(result | (s[i] - 'a' + 10)); + } else { + /* Non-digit */ + return false; + } + } + *out = result; + return true; +} + +template +struct StringToUnsignedIntConverterMixin { + using value_type = typename ARROW_TYPE::c_type; + + bool Convert(const ARROW_TYPE&, const char* s, size_t length, value_type* out) { + if (ARROW_PREDICT_FALSE(length == 0)) { + return false; + } + // If it starts with 0x then its hex + if (length > 2 && s[0] == '0' && ((s[1] == 'x') || (s[1] == 'X'))) { + length -= 2; + s += 2; + + return ARROW_PREDICT_TRUE(ParseHex(s, length, out)); + } + // Skip leading zeros + while (length > 0 && *s == '0') { + length--; + s++; + } + return ParseUnsigned(s, length, out); + } +}; + +template <> +struct StringConverter : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template <> +struct StringConverter + : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template <> +struct StringConverter + : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template <> +struct StringConverter + : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template +struct StringToSignedIntConverterMixin { + using value_type = typename ARROW_TYPE::c_type; + using unsigned_type = typename std::make_unsigned::type; + + bool Convert(const ARROW_TYPE&, const char* s, size_t length, value_type* out) { + static constexpr auto max_positive = + static_cast(std::numeric_limits::max()); + // Assuming two's complement + static constexpr unsigned_type max_negative = max_positive + 1; + bool negative = false; + unsigned_type unsigned_value = 0; + + if (ARROW_PREDICT_FALSE(length == 0)) { + return false; + } + // If it starts with 0x then its hex + if (length > 2 && s[0] == '0' && ((s[1] == 'x') || (s[1] == 'X'))) { + length -= 2; + s += 2; + + if (!ARROW_PREDICT_TRUE(ParseHex(s, length, &unsigned_value))) { + return false; + } + *out = static_cast(unsigned_value); + return true; + } + + if (*s == '-') { + negative = true; + s++; + if (--length == 0) { + return false; + } + } + // Skip leading zeros + while (length > 0 && *s == '0') { + length--; + s++; + } + if (!ARROW_PREDICT_TRUE(ParseUnsigned(s, length, &unsigned_value))) { + return false; + } + if (negative) { + if (ARROW_PREDICT_FALSE(unsigned_value > max_negative)) { + return false; + } + // To avoid both compiler warnings (with unsigned negation) + // and undefined behaviour (with signed negation overflow), + // use the expanded formula for 2's complement negation. + *out = static_cast(~unsigned_value + 1); + } else { + if (ARROW_PREDICT_FALSE(unsigned_value > max_positive)) { + return false; + } + *out = static_cast(unsigned_value); + } + return true; + } +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +namespace detail { + +// Inline-able ISO-8601 parser + +using ts_type = TimestampType::c_type; + +template +static inline bool ParseHH(const char* s, Duration* out) { + uint8_t hours = 0; + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours)); + return true; +} + +template +static inline bool ParseHH_MM(const char* s, Duration* out) { + uint8_t hours = 0; + uint8_t minutes = 0; + if (ARROW_PREDICT_FALSE(s[2] != ':')) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 3, 2, &minutes))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + if (ARROW_PREDICT_FALSE(minutes >= 60)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours) + + std::chrono::minutes(minutes)); + return true; +} + +template +static inline bool ParseHHMM(const char* s, Duration* out) { + uint8_t hours = 0; + uint8_t minutes = 0; + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 2, 2, &minutes))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + if (ARROW_PREDICT_FALSE(minutes >= 60)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours) + + std::chrono::minutes(minutes)); + return true; +} + +template +static inline bool ParseHH_MM_SS(const char* s, Duration* out) { + uint8_t hours = 0; + uint8_t minutes = 0; + uint8_t seconds = 0; + if (ARROW_PREDICT_FALSE(s[2] != ':') || ARROW_PREDICT_FALSE(s[5] != ':')) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 3, 2, &minutes))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 6, 2, &seconds))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + if (ARROW_PREDICT_FALSE(minutes >= 60)) { + return false; + } + if (ARROW_PREDICT_FALSE(seconds >= 60)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours) + + std::chrono::minutes(minutes) + + std::chrono::seconds(seconds)); + return true; +} + +static inline bool ParseSubSeconds(const char* s, size_t length, TimeUnit::type unit, + uint32_t* out) { + // The decimal point has been peeled off at this point + + // Fail if number of decimal places provided exceeds what the unit can hold. + // Calculate how many trailing decimal places are omitted for the unit + // e.g. if 4 decimal places are provided and unit is MICRO, 2 are missing + size_t omitted = 0; + switch (unit) { + case TimeUnit::MILLI: + if (ARROW_PREDICT_FALSE(length > 3)) { + return false; + } + if (length < 3) { + omitted = 3 - length; + } + break; + case TimeUnit::MICRO: + if (ARROW_PREDICT_FALSE(length > 6)) { + return false; + } + if (length < 6) { + omitted = 6 - length; + } + break; + case TimeUnit::NANO: + if (ARROW_PREDICT_FALSE(length > 9)) { + return false; + } + if (length < 9) { + omitted = 9 - length; + } + break; + default: + return false; + } + + if (ARROW_PREDICT_TRUE(omitted == 0)) { + return ParseUnsigned(s, length, out); + } else { + uint32_t subseconds = 0; + bool success = ParseUnsigned(s, length, &subseconds); + if (ARROW_PREDICT_TRUE(success)) { + switch (omitted) { + case 1: + *out = subseconds * 10; + break; + case 2: + *out = subseconds * 100; + break; + case 3: + *out = subseconds * 1000; + break; + case 4: + *out = subseconds * 10000; + break; + case 5: + *out = subseconds * 100000; + break; + case 6: + *out = subseconds * 1000000; + break; + case 7: + *out = subseconds * 10000000; + break; + case 8: + *out = subseconds * 100000000; + break; + default: + // Impossible case + break; + } + return true; + } else { + return false; + } + } +} + +} // namespace detail + +template +static inline bool ParseYYYY_MM_DD(const char* s, Duration* since_epoch) { + uint16_t year = 0; + uint8_t month = 0; + uint8_t day = 0; + if (ARROW_PREDICT_FALSE(s[4] != '-') || ARROW_PREDICT_FALSE(s[7] != '-')) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 4, &year))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 5, 2, &month))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 8, 2, &day))) { + return false; + } + arrow_vendored::date::year_month_day ymd{arrow_vendored::date::year{year}, + arrow_vendored::date::month{month}, + arrow_vendored::date::day{day}}; + if (ARROW_PREDICT_FALSE(!ymd.ok())) return false; + + *since_epoch = std::chrono::duration_cast( + arrow_vendored::date::sys_days{ymd}.time_since_epoch()); + return true; +} + +static inline bool ParseTimestampISO8601(const char* s, size_t length, + TimeUnit::type unit, TimestampType::c_type* out, + bool* out_zone_offset_present = NULLPTR) { + using seconds_type = std::chrono::duration; + + // We allow the following zone offset formats: + // - (none) + // - Z + // - [+-]HH(:?MM)? + // + // We allow the following formats for all units: + // - "YYYY-MM-DD" + // - "YYYY-MM-DD[ T]hhZ?" + // - "YYYY-MM-DD[ T]hh:mmZ?" + // - "YYYY-MM-DD[ T]hh:mm:ssZ?" + // + // We allow the following formats for unit == MILLI, MICRO, or NANO: + // - "YYYY-MM-DD[ T]hh:mm:ss.s{1,3}Z?" + // + // We allow the following formats for unit == MICRO, or NANO: + // - "YYYY-MM-DD[ T]hh:mm:ss.s{4,6}Z?" + // + // We allow the following formats for unit == NANO: + // - "YYYY-MM-DD[ T]hh:mm:ss.s{7,9}Z?" + // + // UTC is always assumed, and the DataType's timezone is ignored. + // + + if (ARROW_PREDICT_FALSE(length < 10)) return false; + + seconds_type seconds_since_epoch; + if (ARROW_PREDICT_FALSE(!ParseYYYY_MM_DD(s, &seconds_since_epoch))) { + return false; + } + + if (length == 10) { + *out = util::CastSecondsToUnit(unit, seconds_since_epoch.count()); + return true; + } + + if (ARROW_PREDICT_FALSE(s[10] != ' ') && ARROW_PREDICT_FALSE(s[10] != 'T')) { + return false; + } + + if (out_zone_offset_present) { + *out_zone_offset_present = false; + } + + seconds_type zone_offset(0); + if (s[length - 1] == 'Z') { + --length; + if (out_zone_offset_present) *out_zone_offset_present = true; + } else if (s[length - 3] == '+' || s[length - 3] == '-') { + // [+-]HH + length -= 3; + if (ARROW_PREDICT_FALSE(!detail::ParseHH(s + length + 1, &zone_offset))) { + return false; + } + if (s[length] == '+') zone_offset *= -1; + if (out_zone_offset_present) *out_zone_offset_present = true; + } else if (s[length - 5] == '+' || s[length - 5] == '-') { + // [+-]HHMM + length -= 5; + if (ARROW_PREDICT_FALSE(!detail::ParseHHMM(s + length + 1, &zone_offset))) { + return false; + } + if (s[length] == '+') zone_offset *= -1; + if (out_zone_offset_present) *out_zone_offset_present = true; + } else if ((s[length - 6] == '+' || s[length - 6] == '-') && (s[length - 3] == ':')) { + // [+-]HH:MM + length -= 6; + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM(s + length + 1, &zone_offset))) { + return false; + } + if (s[length] == '+') zone_offset *= -1; + if (out_zone_offset_present) *out_zone_offset_present = true; + } + + seconds_type seconds_since_midnight; + switch (length) { + case 13: // YYYY-MM-DD[ T]hh + if (ARROW_PREDICT_FALSE(!detail::ParseHH(s + 11, &seconds_since_midnight))) { + return false; + } + break; + case 16: // YYYY-MM-DD[ T]hh:mm + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM(s + 11, &seconds_since_midnight))) { + return false; + } + break; + case 19: // YYYY-MM-DD[ T]hh:mm:ss + case 21: // YYYY-MM-DD[ T]hh:mm:ss.s + case 22: // YYYY-MM-DD[ T]hh:mm:ss.ss + case 23: // YYYY-MM-DD[ T]hh:mm:ss.sss + case 24: // YYYY-MM-DD[ T]hh:mm:ss.ssss + case 25: // YYYY-MM-DD[ T]hh:mm:ss.sssss + case 26: // YYYY-MM-DD[ T]hh:mm:ss.ssssss + case 27: // YYYY-MM-DD[ T]hh:mm:ss.sssssss + case 28: // YYYY-MM-DD[ T]hh:mm:ss.ssssssss + case 29: // YYYY-MM-DD[ T]hh:mm:ss.sssssssss + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM_SS(s + 11, &seconds_since_midnight))) { + return false; + } + break; + default: + return false; + } + + seconds_since_epoch += seconds_since_midnight; + seconds_since_epoch += zone_offset; + + if (length <= 19) { + *out = util::CastSecondsToUnit(unit, seconds_since_epoch.count()); + return true; + } + + if (ARROW_PREDICT_FALSE(s[19] != '.')) { + return false; + } + + uint32_t subseconds = 0; + if (ARROW_PREDICT_FALSE( + !detail::ParseSubSeconds(s + 20, length - 20, unit, &subseconds))) { + return false; + } + + *out = util::CastSecondsToUnit(unit, seconds_since_epoch.count()) + subseconds; + return true; +} + +#if defined(_WIN32) || defined(ARROW_WITH_MUSL) +static constexpr bool kStrptimeSupportsZone = false; +#else +static constexpr bool kStrptimeSupportsZone = true; +#endif + +/// \brief Returns time since the UNIX epoch in the requested unit +static inline bool ParseTimestampStrptime(const char* buf, size_t length, + const char* format, bool ignore_time_in_day, + bool allow_trailing_chars, TimeUnit::type unit, + int64_t* out) { + // NOTE: strptime() is more than 10x faster than arrow_vendored::date::parse(). + // The buffer may not be nul-terminated + std::string clean_copy(buf, length); + struct tm result; + memset(&result, 0, sizeof(struct tm)); +#ifdef _WIN32 + char* ret = arrow_strptime(clean_copy.c_str(), format, &result); +#else + char* ret = strptime(clean_copy.c_str(), format, &result); +#endif + if (ret == NULLPTR) { + return false; + } + if (!allow_trailing_chars && static_cast(ret - clean_copy.c_str()) != length) { + return false; + } + // ignore the time part + arrow_vendored::date::sys_seconds secs = + arrow_vendored::date::sys_days(arrow_vendored::date::year(result.tm_year + 1900) / + (result.tm_mon + 1) / std::max(result.tm_mday, 1)); + if (!ignore_time_in_day) { + secs += (std::chrono::hours(result.tm_hour) + std::chrono::minutes(result.tm_min) + + std::chrono::seconds(result.tm_sec)); +#ifndef _WIN32 + secs -= std::chrono::seconds(result.tm_gmtoff); +#endif + } + *out = util::CastSecondsToUnit(unit, secs.time_since_epoch().count()); + return true; +} + +template <> +struct StringConverter { + using value_type = int64_t; + + bool Convert(const TimestampType& type, const char* s, size_t length, value_type* out) { + return ParseTimestampISO8601(s, length, type.unit(), out); + } +}; + +template <> +struct StringConverter + : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template +struct StringConverter> { + using value_type = typename DATE_TYPE::c_type; + + using duration_type = + typename std::conditional::value, + arrow_vendored::date::days, + std::chrono::milliseconds>::type; + + bool Convert(const DATE_TYPE& type, const char* s, size_t length, value_type* out) { + if (ARROW_PREDICT_FALSE(length != 10)) { + return false; + } + + duration_type since_epoch; + if (ARROW_PREDICT_FALSE(!ParseYYYY_MM_DD(s, &since_epoch))) { + return false; + } + + *out = static_cast(since_epoch.count()); + return true; + } +}; + +template +struct StringConverter> { + using value_type = typename TIME_TYPE::c_type; + + // We allow the following formats for all units: + // - "hh:mm" + // - "hh:mm:ss" + // + // We allow the following formats for unit == MILLI, MICRO, or NANO: + // - "hh:mm:ss.s{1,3}" + // + // We allow the following formats for unit == MICRO, or NANO: + // - "hh:mm:ss.s{4,6}" + // + // We allow the following formats for unit == NANO: + // - "hh:mm:ss.s{7,9}" + + bool Convert(const TIME_TYPE& type, const char* s, size_t length, value_type* out) { + const auto unit = type.unit(); + std::chrono::seconds since_midnight; + + if (length == 5) { + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM(s, &since_midnight))) { + return false; + } + *out = + static_cast(util::CastSecondsToUnit(unit, since_midnight.count())); + return true; + } + + if (ARROW_PREDICT_FALSE(length < 8)) { + return false; + } + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM_SS(s, &since_midnight))) { + return false; + } + + *out = static_cast(util::CastSecondsToUnit(unit, since_midnight.count())); + + if (length == 8) { + return true; + } + + if (ARROW_PREDICT_FALSE(s[8] != '.')) { + return false; + } + + uint32_t subseconds_count = 0; + if (ARROW_PREDICT_FALSE( + !detail::ParseSubSeconds(s + 9, length - 9, unit, &subseconds_count))) { + return false; + } + + *out += subseconds_count; + return true; + } +}; + +/// \brief Convenience wrappers around internal::StringConverter. +template +bool ParseValue(const T& type, const char* s, size_t length, + typename StringConverter::value_type* out) { + return StringConverter{}.Convert(type, s, length, out); +} + +template +enable_if_parameter_free ParseValue( + const char* s, size_t length, typename StringConverter::value_type* out) { + static T type; + return StringConverter{}.Convert(type, s, length, out); +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/vector.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/vector.h new file mode 100644 index 0000000000000000000000000000000000000000..e3c0a67cf46c4ef403e87b5df08686ea4f2d1ba7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/vector.h @@ -0,0 +1,172 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/util/algorithm.h" +#include "arrow/util/functional.h" +#include "arrow/util/logging.h" + +namespace arrow { +namespace internal { + +template +std::vector DeleteVectorElement(const std::vector& values, size_t index) { + DCHECK(!values.empty()); + DCHECK_LT(index, values.size()); + std::vector out; + out.reserve(values.size() - 1); + for (size_t i = 0; i < index; ++i) { + out.push_back(values[i]); + } + for (size_t i = index + 1; i < values.size(); ++i) { + out.push_back(values[i]); + } + return out; +} + +template +std::vector AddVectorElement(const std::vector& values, size_t index, + T new_element) { + DCHECK_LE(index, values.size()); + std::vector out; + out.reserve(values.size() + 1); + for (size_t i = 0; i < index; ++i) { + out.push_back(values[i]); + } + out.emplace_back(std::move(new_element)); + for (size_t i = index; i < values.size(); ++i) { + out.push_back(values[i]); + } + return out; +} + +template +std::vector ReplaceVectorElement(const std::vector& values, size_t index, + T new_element) { + DCHECK_LE(index, values.size()); + std::vector out; + out.reserve(values.size()); + for (size_t i = 0; i < index; ++i) { + out.push_back(values[i]); + } + out.emplace_back(std::move(new_element)); + for (size_t i = index + 1; i < values.size(); ++i) { + out.push_back(values[i]); + } + return out; +} + +template +std::vector FilterVector(std::vector values, Predicate&& predicate) { + auto new_end = std::remove_if(values.begin(), values.end(), + [&](const T& value) { return !predicate(value); }); + values.erase(new_end, values.end()); + return values; +} + +template ()(std::declval()))> +std::vector MapVector(Fn&& map, const std::vector& source) { + std::vector out; + out.reserve(source.size()); + std::transform(source.begin(), source.end(), std::back_inserter(out), + std::forward(map)); + return out; +} + +template ()(std::declval()))> +std::vector MapVector(Fn&& map, std::vector&& source) { + std::vector out; + out.reserve(source.size()); + std::transform(std::make_move_iterator(source.begin()), + std::make_move_iterator(source.end()), std::back_inserter(out), + std::forward(map)); + return out; +} + +/// \brief Like MapVector, but where the function can fail. +template , + typename To = typename internal::call_traits::return_type::ValueType> +Result> MaybeMapVector(Fn&& map, const std::vector& source) { + std::vector out; + out.reserve(source.size()); + ARROW_RETURN_NOT_OK(MaybeTransform(source.begin(), source.end(), + std::back_inserter(out), std::forward(map))); + return std::move(out); +} + +template , + typename To = typename internal::call_traits::return_type::ValueType> +Result> MaybeMapVector(Fn&& map, std::vector&& source) { + std::vector out; + out.reserve(source.size()); + ARROW_RETURN_NOT_OK(MaybeTransform(std::make_move_iterator(source.begin()), + std::make_move_iterator(source.end()), + std::back_inserter(out), std::forward(map))); + return std::move(out); +} + +template +std::vector FlattenVectors(const std::vector>& vecs) { + std::size_t sum = 0; + for (const auto& vec : vecs) { + sum += vec.size(); + } + std::vector out; + out.reserve(sum); + for (const auto& vec : vecs) { + out.insert(out.end(), vec.begin(), vec.end()); + } + return out; +} + +template +Result> UnwrapOrRaise(std::vector>&& results) { + std::vector out; + out.reserve(results.size()); + auto end = std::make_move_iterator(results.end()); + for (auto it = std::make_move_iterator(results.begin()); it != end; it++) { + if (!it->ok()) { + return it->status(); + } + out.push_back(it->MoveValueUnsafe()); + } + return std::move(out); +} + +template +Result> UnwrapOrRaise(const std::vector>& results) { + std::vector out; + out.reserve(results.size()); + for (const auto& result : results) { + if (!result.ok()) { + return result.status(); + } + out.push_back(result.ValueUnsafe()); + } + return std::move(out); +} + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/visibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..1498d2085a03d8555305823b29945d5dafda3770 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/visibility.h @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +// Windows + +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#if defined(__cplusplus) && defined(__GNUC__) && !defined(__clang__) +// Use C++ attribute syntax where possible to avoid GCC parser bug +// (https://stackoverflow.com/questions/57993818/gcc-how-to-combine-attribute-dllexport-and-nodiscard-in-a-struct-de) +#define ARROW_DLLEXPORT [[gnu::dllexport]] +#define ARROW_DLLIMPORT [[gnu::dllimport]] +#else +#define ARROW_DLLEXPORT __declspec(dllexport) +#define ARROW_DLLIMPORT __declspec(dllimport) +#endif + +// _declspec(dllexport) even when the #included by a non-arrow source +#define ARROW_FORCE_EXPORT ARROW_DLLEXPORT + +#ifdef ARROW_STATIC +#define ARROW_EXPORT +#define ARROW_FRIEND_EXPORT +#define ARROW_TEMPLATE_EXPORT +#elif defined(ARROW_EXPORTING) +#define ARROW_EXPORT ARROW_DLLEXPORT +// For some reason [[gnu::dllexport]] doesn't work well with friend declarations +#define ARROW_FRIEND_EXPORT __declspec(dllexport) +#define ARROW_TEMPLATE_EXPORT ARROW_DLLEXPORT +#else +#define ARROW_EXPORT ARROW_DLLIMPORT +#define ARROW_FRIEND_EXPORT __declspec(dllimport) +#define ARROW_TEMPLATE_EXPORT ARROW_DLLIMPORT +#endif + +#define ARROW_NO_EXPORT + +#else + +// Non-Windows + +#if defined(__cplusplus) && (defined(__GNUC__) || defined(__clang__)) +#ifndef ARROW_EXPORT +#define ARROW_EXPORT [[gnu::visibility("default")]] +#endif +#ifndef ARROW_NO_EXPORT +#define ARROW_NO_EXPORT [[gnu::visibility("hidden")]] +#endif +#else +// Not C++, or not gcc/clang +#ifndef ARROW_EXPORT +#define ARROW_EXPORT +#endif +#ifndef ARROW_NO_EXPORT +#define ARROW_NO_EXPORT +#endif +#endif + +#define ARROW_FRIEND_EXPORT +#define ARROW_TEMPLATE_EXPORT + +// [[gnu::visibility("default")]] even when #included by a non-arrow source +#define ARROW_FORCE_EXPORT [[gnu::visibility("default")]] + +#endif // Non-Windows diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_compatibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_compatibility.h new file mode 100644 index 0000000000000000000000000000000000000000..c97b2f3b76a7cca4edc4411071cf7106e3296601 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_compatibility.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifdef _WIN32 + +// Windows defines min and max macros that mess up std::min/max +#ifndef NOMINMAX +#define NOMINMAX +#endif + +#define WIN32_LEAN_AND_MEAN + +// Set Windows 7 as a conservative minimum for Apache Arrow +#if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x601 +#undef _WIN32_WINNT +#endif +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x601 +#endif + +#include + +#include "arrow/util/windows_fixup.h" + +#endif // _WIN32 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h new file mode 100644 index 0000000000000000000000000000000000000000..2949ac4ab768890d866be6133babbe6f92459ab3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This header needs to be included multiple times. + +#ifdef _WIN32 + +#ifdef max +#undef max +#endif +#ifdef min +#undef min +#endif + +// The Windows API defines macros from *File resolving to either +// *FileA or *FileW. Need to undo them. +#ifdef CopyFile +#undef CopyFile +#endif +#ifdef CreateFile +#undef CreateFile +#endif +#ifdef DeleteFile +#undef DeleteFile +#endif + +// Other annoying Windows macro definitions... +#ifdef IN +#undef IN +#endif +#ifdef OUT +#undef OUT +#endif + +// Note that we can't undefine OPTIONAL, because it can be used in other +// Windows headers... + +#endif // _WIN32 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..83f750d35235e606449842db9ee4f484fa7344f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bce8ab2cb4dd8ce9d849f9ab25a0a469043e16928545398d21a6afbb127a05d +size 4528352 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 new file mode 100644 index 0000000000000000000000000000000000000000..6a340f09d2d7430d72680058a834b37278e1d5f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a87b3e5ac91904fc9ace8650052d704711dd74563afb9caf98ca589b71995492 +size 5332496