diff --git a/.gitattributes b/.gitattributes index c52c9b5a435bfb59b08f02c0a2b7dd9070529090..f83429917b7f21487e162946f582ef1be42b4af6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -176,3 +176,8 @@ env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310 env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..42232ff8949bd9326b93fcfc0ad6792f33971a6e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b17e479891bf5d919a8ca7ce7ca0535b8858e8e58bcb1fbd154778451aa7cd +size 1369152 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f38a04fd2c98ddee2b615b2bc207c895e9c29001 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3370617840e9ef04c74cd6f7e140d291a6709ba4e68e9c677b0306fbf92b431 +size 1328288 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h new file mode 100644 index 0000000000000000000000000000000000000000..6c94e13032307a7a954ce800fca99ca5a53fd15f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include "arrow/engine/substrait/api.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..ddb4c120f2a877ffb794b8443f8af1f7707d2cf6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h new file mode 100644 index 0000000000000000000000000000000000000000..8161f21712974ad6bb6a58ed451807e5a2e8e829 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include "arrow/engine/substrait/extension_set.h" +#include "arrow/engine/substrait/extension_types.h" +#include "arrow/engine/substrait/options.h" +#include "arrow/engine/substrait/relation.h" +#include "arrow/engine/substrait/serde.h" diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h new file mode 100644 index 0000000000000000000000000000000000000000..0a502960447e64cb0529ed53a47df4602fefbf19 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h @@ -0,0 +1,477 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/compute/api_aggregate.h" +#include "arrow/compute/expression.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace engine { + +constexpr const char* kSubstraitArithmeticFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_arithmetic.yaml"; +constexpr const char* kSubstraitBooleanFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_boolean.yaml"; +constexpr const char* kSubstraitComparisonFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_comparison.yaml"; +constexpr const char* kSubstraitDatetimeFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_datetime.yaml"; +constexpr const char* kSubstraitLogarithmicFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_logarithmic.yaml"; +constexpr const char* kSubstraitRoundingFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_rounding.yaml"; +constexpr const char* kSubstraitStringFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_string.yaml"; +constexpr const char* kSubstraitAggregateGenericFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_aggregate_generic.yaml"; + +/// If a function call contains this URI then the function is looked up +/// in the registry directly, all arguments are mapped as value arguments, +/// and any options are ignored. +constexpr const char* kArrowSimpleExtensionFunctionsUri = + "urn:arrow:substrait_simple_extension_function"; + +struct ARROW_ENGINE_EXPORT Id { + std::string_view uri, name; + bool empty() const { return uri.empty() && name.empty(); } + std::string ToString() const; +}; +struct ARROW_ENGINE_EXPORT IdHashEq { + size_t operator()(Id id) const; + bool operator()(Id l, Id r) const; +}; + +/// \brief Owning storage for ids +/// +/// Substrait plans may reuse URIs and names in many places. For convenience +/// and performance Substrait ids are typically passed around as views. As we +/// convert a plan from Substrait to Arrow we need to copy these strings out of +/// the Substrait buffer and into owned storage. This class serves as that owned +/// storage. +class ARROW_ENGINE_EXPORT IdStorage { + public: + virtual ~IdStorage() = default; + /// \brief Get an equivalent id pointing into this storage + /// + /// This operation will copy the ids into storage if they do not already exist + virtual Id Emplace(Id id) = 0; + /// \brief Get an equivalent view pointing into this storage for a URI + /// + /// If no URI is found then the uri will be copied into storage + virtual std::string_view EmplaceUri(std::string_view uri) = 0; + /// \brief Get an equivalent id pointing into this storage + /// + /// If no id is found then nullopt will be returned + virtual std::optional Find(Id id) const = 0; + /// \brief Get an equivalent view pointing into this storage for a URI + /// + /// If no URI is found then nullopt will be returned + virtual std::optional FindUri(std::string_view uri) const = 0; + + static std::unique_ptr Make(); +}; + +/// \brief Describes a Substrait call +/// +/// Substrait call expressions contain a list of arguments which can either +/// be enum arguments (which are serialized as strings), value arguments (which) +/// are Arrow expressions, or type arguments (not yet implemented) +class ARROW_ENGINE_EXPORT SubstraitCall { + public: + SubstraitCall(Id id, std::shared_ptr output_type, bool output_nullable, + bool is_hash = false) + : id_(id), + output_type_(std::move(output_type)), + output_nullable_(output_nullable), + is_hash_(is_hash) {} + + const Id& id() const { return id_; } + const std::shared_ptr& output_type() const { return output_type_; } + bool output_nullable() const { return output_nullable_; } + bool is_hash() const { return is_hash_; } + const std::unordered_map>& options() const { + return options_; + } + + bool HasEnumArg(int index) const; + Result GetEnumArg(int index) const; + void SetEnumArg(int index, std::string enum_arg); + Result GetValueArg(int index) const; + bool HasValueArg(int index) const; + void SetValueArg(int index, compute::Expression value_arg); + std::optional const*> GetOption( + std::string_view option_name) const; + void SetOption(std::string_view option_name, + const std::vector& option_preferences); + bool HasOptions() const; + int size() const { return size_; } + + private: + Id id_; + std::shared_ptr output_type_; + bool output_nullable_; + // Only needed when converting from Substrait -> Arrow aggregates. The + // Arrow function name depends on whether or not there are any groups + bool is_hash_; + std::unordered_map enum_args_; + std::unordered_map value_args_; + std::unordered_map> options_; + int size_ = 0; +}; + +/// Substrait identifies functions and custom data types using a (uri, name) pair. +/// +/// This registry is a bidirectional mapping between Substrait IDs and their +/// corresponding Arrow counterparts (arrow::DataType and function names in a function +/// registry) +/// +/// Substrait extension types and variations must be registered with their +/// corresponding arrow::DataType before they can be used! +/// +/// Conceptually this can be thought of as two pairs of `unordered_map`s. One pair to +/// go back and forth between Substrait ID and arrow::DataType and another pair to go +/// back and forth between Substrait ID and Arrow function names. +/// +/// Unlike an ExtensionSet this registry is not created automatically when consuming +/// Substrait plans and must be configured ahead of time (although there is a default +/// instance). +class ARROW_ENGINE_EXPORT ExtensionIdRegistry { + public: + using ArrowToSubstraitCall = + std::function(const arrow::compute::Expression::Call&)>; + using SubstraitCallToArrow = + std::function(const SubstraitCall&)>; + using ArrowToSubstraitAggregate = + std::function(const arrow::compute::Aggregate&)>; + using SubstraitAggregateToArrow = + std::function(const SubstraitCall&)>; + + /// \brief A mapping between a Substrait ID and an arrow::DataType + struct TypeRecord { + Id id; + const std::shared_ptr& type; + }; + + /// \brief Return a uri view owned by this registry + /// + /// If the URI has never been emplaced it will return nullopt + virtual std::optional FindUri(std::string_view uri) const = 0; + /// \brief Return a id view owned by this registry + /// + /// If the id has never been emplaced it will return nullopt + virtual std::optional FindId(Id id) const = 0; + virtual std::optional GetType(const DataType&) const = 0; + virtual std::optional GetType(Id) const = 0; + virtual Status CanRegisterType(Id, const std::shared_ptr& type) const = 0; + virtual Status RegisterType(Id, std::shared_ptr) = 0; + /// \brief Register a converter that converts an Arrow call to a Substrait call + /// + /// Note that there may not be 1:1 parity between ArrowToSubstraitCall and + /// SubstraitCallToArrow because some standard functions (e.g. add) may map to + /// multiple Arrow functions (e.g. add, add_checked) + virtual Status AddArrowToSubstraitCall(std::string arrow_function_name, + ArrowToSubstraitCall conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddArrowToSubstraitCall( + const std::string& arrow_function_name) const = 0; + + /// \brief Register a converter that converts an Arrow aggregate to a Substrait + /// aggregate + virtual Status AddArrowToSubstraitAggregate( + std::string arrow_function_name, ArrowToSubstraitAggregate conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddArrowToSubstraitAggregate( + const std::string& arrow_function_name) const = 0; + + /// \brief Register a converter that converts a Substrait call to an Arrow call + virtual Status AddSubstraitCallToArrow(Id substrait_function_id, + SubstraitCallToArrow conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddSubstraitCallToArrow(Id substrait_function_id) const = 0; + /// \brief Register a simple mapping function + /// + /// All calls to the function must pass only value arguments. The arguments + /// will be converted to expressions and passed to the Arrow function + virtual Status AddSubstraitCallToArrow(Id substrait_function_id, + std::string arrow_function_name) = 0; + + /// \brief Register a converter that converts a Substrait aggregate to an Arrow + /// aggregate + virtual Status AddSubstraitAggregateToArrow( + Id substrait_function_id, SubstraitAggregateToArrow conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddSubstraitAggregateToArrow(Id substrait_function_id) const = 0; + + /// \brief Return a list of Substrait functions that have a converter + /// + /// The function ids are encoded as strings using the pattern {uri}#{name} + virtual std::vector GetSupportedSubstraitFunctions() const = 0; + + /// \brief Find a converter to map Arrow calls to Substrait calls + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetArrowToSubstraitCall( + const std::string& arrow_function_name) const = 0; + + /// \brief Find a converter to map Arrow aggregates to Substrait aggregates + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetArrowToSubstraitAggregate( + const std::string& arrow_function_name) const = 0; + + /// \brief Find a converter to map a Substrait aggregate to an Arrow aggregate + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetSubstraitAggregateToArrow( + Id substrait_function_id) const = 0; + + /// \brief Find a converter to map a Substrait call to an Arrow call + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetSubstraitCallToArrow( + Id substrait_function_id) const = 0; + + /// \brief Similar to \see GetSubstraitCallToArrow but only uses the name + /// + /// There may be multiple functions with the same name and this will return + /// the first. This is slower than GetSubstraitCallToArrow and should only + /// be used when the plan does not include a URI (or the URI is "/") + virtual Result GetSubstraitCallToArrowFallback( + std::string_view function_name) const = 0; + + /// \brief Similar to \see GetSubstraitAggregateToArrow but only uses the name + /// + /// \see GetSubstraitCallToArrowFallback for details on the fallback behavior + virtual Result GetSubstraitAggregateToArrowFallback( + std::string_view function_name) const = 0; +}; + +constexpr std::string_view kArrowExtTypesUri = + "https://github.com/apache/arrow/blob/main/format/substrait/" + "extension_types.yaml"; + +/// A default registry with all supported functions and data types registered +/// +/// Note: Function support is currently very minimal, see ARROW-15538 +ARROW_ENGINE_EXPORT ExtensionIdRegistry* default_extension_id_registry(); + +/// \brief Make a nested registry with a given parent. +/// +/// A nested registry supports registering types and functions other and on top of those +/// already registered in its parent registry. No conflicts in IDs and names used for +/// lookup are allowed. Normally, the given parent is the default registry. +/// +/// One use case for a nested registry is for dynamic registration of functions defined +/// within a Substrait plan while keeping these registrations specific to the plan. When +/// the Substrait plan is disposed of, normally after its execution, the nested registry +/// can be disposed of as well. +ARROW_ENGINE_EXPORT std::shared_ptr nested_extension_id_registry( + const ExtensionIdRegistry* parent); + +/// \brief A set of extensions used within a plan +/// +/// Each time an extension is used within a Substrait plan the extension +/// must be included in an extension set that is defined at the root of the +/// plan. +/// +/// The plan refers to a specific extension using an "anchor" which is an +/// arbitrary integer invented by the producer that has no meaning beyond a +/// plan but which should be consistent within a plan. +/// +/// To support serialization and deserialization this type serves as a +/// bidirectional map between Substrait ID and "anchor"s. +/// +/// When deserializing a Substrait plan the extension set should be extracted +/// after the plan has been converted from Protobuf and before the plan +/// is converted to an execution plan. +/// +/// The extension set can be kept and reused during serialization if a perfect +/// round trip is required. If serialization is not needed or round tripping +/// is not required then the extension set can be safely discarded after the +/// plan has been converted into an execution plan. +/// +/// When converting an execution plan into a Substrait plan an extension set +/// can be automatically generated or a previously generated extension set can +/// be used. +/// +/// ExtensionSet does not own strings; it only refers to strings in an +/// ExtensionIdRegistry. +class ARROW_ENGINE_EXPORT ExtensionSet { + public: + struct FunctionRecord { + Id id; + std::string_view name; + }; + + struct TypeRecord { + Id id; + std::shared_ptr type; + }; + + /// Construct an empty ExtensionSet to be populated during serialization. + explicit ExtensionSet(const ExtensionIdRegistry* = default_extension_id_registry()); + ARROW_DEFAULT_MOVE_AND_ASSIGN(ExtensionSet); + + /// Construct an ExtensionSet with explicit extension ids for efficient referencing + /// during deserialization. Note that input vectors need not be densely packed; an empty + /// (default constructed) Id may be used as a placeholder to indicate an unused + /// _anchor/_reference. This factory will be used to wrap the extensions declared in a + /// substrait::Plan before deserializing the plan's relations. + /// + /// Views will be replaced with equivalent views pointing to memory owned by the + /// registry. + /// + /// Note: This is an advanced operation. The order of the ids, types, and functions + /// must match the anchor numbers chosen for a plan. + /// + /// An extension set should instead be created using + /// arrow::engine::GetExtensionSetFromPlan + static Result Make( + std::unordered_map uris, + std::unordered_map type_ids, + std::unordered_map function_ids, + const ConversionOptions& conversion_options, + const ExtensionIdRegistry* = default_extension_id_registry()); + + const std::unordered_map& uris() const { return uris_; } + + /// \brief Returns a data type given an anchor + /// + /// This is used when converting a Substrait plan to an Arrow execution plan. + /// + /// If the anchor does not exist in this extension set an error will be returned. + Result DecodeType(uint32_t anchor) const; + + /// \brief Returns the number of custom type records in this extension set + /// + /// Note: the types are currently stored as a sparse vector, so this may return a value + /// larger than the actual number of types. This behavior may change in the future; see + /// ARROW-15583. + std::size_t num_types() const { return types_.size(); } + + /// \brief Lookup the anchor for a given type + /// + /// This operation is used when converting an Arrow execution plan to a Substrait plan. + /// If the type has been previously encoded then the same anchor value will returned. + /// + /// If the type has not been previously encoded then a new anchor value will be created. + /// + /// If the type does not exist in the extension id registry then an error will be + /// returned. + /// + /// \return An anchor that can be used to refer to the type within a plan + Result EncodeType(const DataType& type); + + /// \brief Return a function id given an anchor + /// + /// This is used when converting a Substrait plan to an Arrow execution plan. + /// + /// If the anchor does not exist in this extension set an error will be returned. + Result DecodeFunction(uint32_t anchor) const; + + /// \brief Lookup the anchor for a given function + /// + /// This operation is used when converting an Arrow execution plan to a Substrait plan. + /// If the function has been previously encoded then the same anchor value will be + /// returned. + /// + /// If the function has not been previously encoded then a new anchor value will be + /// created. + /// + /// If the function name is not in the extension id registry then an error will be + /// returned. + /// + /// \return An anchor that can be used to refer to the function within a plan + Result EncodeFunction(Id function_id); + + /// \brief Stores a plan-specific id that is not known to the registry + /// + /// This is used when converting an Arrow execution plan to a Substrait plan. + /// + /// If the function is a UDF, something that wasn't known to the registry, + /// then we need long term storage of the function name (the ids are just + /// views) + Id RegisterPlanSpecificId(Id id); + + /// \brief Return the number of custom functions in this extension set + std::size_t num_functions() const { return functions_.size(); } + + const ExtensionIdRegistry* registry() const { return registry_; } + + private: + const ExtensionIdRegistry* registry_; + // If the registry is not aware of an id then we probably can't do anything + // with it. However, in some cases, these may represent extensions or features + // that we can safely ignore. For example, we can usually safely ignore + // extension type variations if we assume the plan is valid. These ignorable + // ids are stored here. + std::unique_ptr plan_specific_ids_ = IdStorage::Make(); + + // Map from anchor values to URI values referenced by this extension set + std::unordered_map uris_; + // Map from anchor values to type definitions, used during Substrait->Arrow + // and populated from the Substrait extension set + std::unordered_map types_; + // Map from anchor values to function ids, used during Substrait->Arrow + // and populated from the Substrait extension set + std::unordered_map functions_; + // Map from type names to anchor values. Used during Arrow->Substrait + // and built as the plan is created. + std::unordered_map types_map_; + // Map from function names to anchor values. Used during Arrow->Substrait + // and built as the plan is created. + std::unordered_map functions_map_; + + Status CheckHasUri(std::string_view uri); + void AddUri(std::pair uri); + Status AddUri(Id id); +}; + +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h new file mode 100644 index 0000000000000000000000000000000000000000..28a4898a878d72061ce5af7ab16337c6b975170f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include + +#include "arrow/engine/substrait/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +// arrow::ExtensionTypes are provided to wrap uuid, fixed_char, varchar, interval_year, +// and interval_day which are first-class types in substrait but do not appear in +// the arrow type system. +// +// Note that these are not automatically registered with arrow::RegisterExtensionType(), +// which means among other things that serialization of these types to IPC would fail. + +/// fixed_size_binary(16) for storing Universally Unique IDentifiers +ARROW_ENGINE_EXPORT +std::shared_ptr uuid(); + +/// fixed_size_binary(length) constrained to contain only valid UTF-8 +ARROW_ENGINE_EXPORT +std::shared_ptr fixed_char(int32_t length); + +/// utf8() constrained to be shorter than `length` +ARROW_ENGINE_EXPORT +std::shared_ptr varchar(int32_t length); + +/// fixed_size_list(int32(), 2) storing a number of [years, months] +ARROW_ENGINE_EXPORT +std::shared_ptr interval_year(); + +/// fixed_size_list(int32(), 2) storing a number of [days, seconds] +ARROW_ENGINE_EXPORT +std::shared_ptr interval_day(); + +/// Return true if t is Uuid, otherwise false +ARROW_ENGINE_EXPORT +bool UnwrapUuid(const DataType&); + +/// Return FixedChar length if t is FixedChar, otherwise nullopt +ARROW_ENGINE_EXPORT +std::optional UnwrapFixedChar(const DataType&); + +/// Return Varchar (max) length if t is VarChar, otherwise nullopt +ARROW_ENGINE_EXPORT +std::optional UnwrapVarChar(const DataType& t); + +/// Return true if t is IntervalYear, otherwise false +ARROW_ENGINE_EXPORT +bool UnwrapIntervalYear(const DataType&); + +/// Return true if t is IntervalDay, otherwise false +ARROW_ENGINE_EXPORT +bool UnwrapIntervalDay(const DataType&); + +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h new file mode 100644 index 0000000000000000000000000000000000000000..1e6f6efb2c751a97e3f0cd9de3eb55c0bb87772c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h @@ -0,0 +1,135 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +/// How strictly to adhere to the input structure when converting between Substrait and +/// Acero representations of a plan. This allows the user to trade conversion accuracy +/// for performance and lenience. +enum class ARROW_ENGINE_EXPORT ConversionStrictness { + /// When a primitive is used at the input that doesn't have an exact match at the + /// output, reject the conversion. This effectively asserts that there is no (known) + /// information loss in the conversion, and that plans should either round-trip back and + /// forth exactly or not at all. This option is primarily intended for testing and + /// debugging. + EXACT_ROUNDTRIP, + + /// When a primitive is used at the input that doesn't have an exact match at the + /// output, attempt to model it with some collection of primitives at the output. This + /// means that even if the incoming plan is completely optimal by some metric, the + /// returned plan is fairly likely to not be optimal anymore, and round-trips back and + /// forth may make the plan increasingly suboptimal. However, every primitive at the + /// output can be (manually) traced back to exactly one primitive at the input, which + /// may be useful when debugging. + PRESERVE_STRUCTURE, + + /// Behaves like PRESERVE_STRUCTURE, but prefers performance over structural accuracy. + /// Basic optimizations *may* be applied, in order to attempt to not regress in terms of + /// plan performance: if the incoming plan was already aggressively optimized, the goal + /// is for the output plan to not be less performant. In practical use cases, this is + /// probably the option you want. + /// + /// Note that no guarantees are made on top of PRESERVE_STRUCTURE. Past and future + /// versions of Arrow may even ignore this option entirely and treat it exactly like + /// PRESERVE_STRUCTURE. + BEST_EFFORT, +}; + +using NamedTableProvider = std::function( + const std::vector&, const Schema&)>; +static NamedTableProvider kDefaultNamedTableProvider; + +using NamedTapProvider = std::function( + const std::string&, std::vector, const std::string&, + std::shared_ptr)>; + +class ARROW_ENGINE_EXPORT ExtensionDetails { + public: + virtual ~ExtensionDetails() = default; +}; + +class ARROW_ENGINE_EXPORT ExtensionProvider { + public: + virtual ~ExtensionProvider() = default; + virtual Result MakeRel(const ConversionOptions& conv_opts, + const std::vector& inputs, + const ExtensionDetails& ext_details, + const ExtensionSet& ext_set) = 0; +}; + +/// \brief Get the default extension provider +ARROW_ENGINE_EXPORT std::shared_ptr default_extension_provider(); +/// \brief Set the default extension provider +/// +/// \param[in] provider the new provider to be set as default +ARROW_ENGINE_EXPORT void set_default_extension_provider( + const std::shared_ptr& provider); + +ARROW_ENGINE_EXPORT NamedTapProvider default_named_tap_provider(); + +ARROW_ENGINE_EXPORT void set_default_named_tap_provider(NamedTapProvider provider); + +/// Options that control the conversion between Substrait and Acero representations of a +/// plan. +struct ARROW_ENGINE_EXPORT ConversionOptions { + ConversionOptions() + : strictness(ConversionStrictness::BEST_EFFORT), + named_table_provider(kDefaultNamedTableProvider), + named_tap_provider(default_named_tap_provider()), + extension_provider(default_extension_provider()), + allow_arrow_extensions(false) {} + + /// \brief How strictly the converter should adhere to the structure of the input. + ConversionStrictness strictness; + /// \brief A custom strategy to be used for providing named tables + /// + /// The default behavior will return an invalid status if the plan has any + /// named table relations. + NamedTableProvider named_table_provider; + /// \brief A custom strategy to be used for obtaining a tap declaration + /// + /// The default provider returns an error + NamedTapProvider named_tap_provider; + /// \brief A custom strategy to be used for providing relation infos. + /// + /// The default behavior will provide for relations known to Arrow. + std::shared_ptr extension_provider; + /// \brief If true then Arrow-specific types and functions will be allowed + /// + /// Set to false to create plans that are more likely to be compatible with non-Arrow + /// engines + bool allow_arrow_extensions; +}; + +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h new file mode 100644 index 0000000000000000000000000000000000000000..d0913b9ae029bf790fe1d348eb82911f8a912079 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/compute/api_aggregate.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +/// Execution information resulting from converting a Substrait relation. +struct ARROW_ENGINE_EXPORT DeclarationInfo { + /// The compute declaration produced thus far. + acero::Declaration declaration; + + std::shared_ptr output_schema; +}; + +/// Information resulting from converting a Substrait plan +struct ARROW_ENGINE_EXPORT PlanInfo { + /// The root declaration. + /// + /// Only plans containing a single top-level relation are supported and so this will + /// represent that relation. + /// + /// This should technically be a RelRoot but some producers use a simple Rel here and so + /// Acero currently supports that case. + DeclarationInfo root; + /// The names of the output fields + /// + /// If `root` was created from a simple Rel then this will be empty + std::vector names; +}; + +/// An expression whose output has a name +struct ARROW_ENGINE_EXPORT NamedExpression { + /// An expression + compute::Expression expression; + // An optional name to assign to the output, may be the empty string + std::string name; +}; + +/// A collection of expressions bound to a common schema +struct ARROW_ENGINE_EXPORT BoundExpressions { + /// The expressions + std::vector named_expressions; + /// The schema that all the expressions are bound to + std::shared_ptr schema; +}; + +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h new file mode 100644 index 0000000000000000000000000000000000000000..ab749f4a64b0513a1838c8e049c2abcd24181016 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h @@ -0,0 +1,331 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/compute/type_fwd.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/engine/substrait/options.h" +#include "arrow/engine/substrait/relation.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace engine { + +/// \brief Serialize an Acero Plan to a binary protobuf Substrait message +/// +/// \param[in] declaration the Acero declaration to serialize. +/// This declaration is the sink relation of the Acero plan. +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// \param[in] conversion_options options to control how the conversion is done +/// +/// \return a buffer containing the protobuf serialization of the Acero relation +ARROW_ENGINE_EXPORT +Result> SerializePlan( + const acero::Declaration& declaration, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serialize expressions to a Substrait message +/// +/// \param[in] bound_expressions the expressions to serialize. +/// \param[in] conversion_options options to control how the conversion is done +/// \param[in,out] ext_set the extension mapping to use, optional, only needed +/// if you want to control the value of function anchors +/// to mirror a previous serialization / deserialization. +/// Will be updated if new functions are encountered +ARROW_ENGINE_EXPORT +Result> SerializeExpressions( + const BoundExpressions& bound_expressions, + const ConversionOptions& conversion_options = {}, ExtensionSet* ext_set = NULLPTR); + +/// Factory function type for generating the node that consumes the batches produced by +/// each toplevel Substrait relation when deserializing a Substrait Plan. +using ConsumerFactory = std::function()>; + +/// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations +/// +/// The output of each top-level Substrait relation will be sent to a caller supplied +/// consumer function provided by consumer_factory +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] consumer_factory factory function for generating the node that consumes +/// the batches produced by each toplevel Substrait relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a vector of ExecNode declarations, one for each toplevel relation in the +/// Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlans( + const Buffer& buf, const ConsumerFactory& consumer_factory, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a single-relation Substrait Plan message to an execution plan +/// +/// The output of each top-level Substrait relation will be sent to a caller supplied +/// consumer function provided by consumer_factory +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] consumer node that consumes the batches produced by each toplevel Substrait +/// relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// \param[in] conversion_options options to control how the conversion is to be done. +/// Plan is returned here. +/// \return an ExecPlan for the Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlan( + const Buffer& buf, const std::shared_ptr& consumer, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// Factory function type for generating the write options of a node consuming the batches +/// produced by each toplevel Substrait relation when deserializing a Substrait Plan. +using WriteOptionsFactory = std::function()>; + +/// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations +/// +/// The output of each top-level Substrait relation will be written to a filesystem. +/// `write_options_factory` can be used to control write behavior. +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] write_options_factory factory function for generating the write options of +/// a node consuming the batches produced by each toplevel Substrait relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a vector of ExecNode declarations, one for each toplevel relation in the +/// Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlans( + const Buffer& buf, const WriteOptionsFactory& write_options_factory, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a single-relation Substrait Plan message to an execution plan +/// +/// The output of the single Substrait relation will be written to a filesystem. +/// `write_options_factory` can be used to control write behavior. +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] write_options write options of a node consuming the batches produced by +/// each toplevel Substrait relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return an ExecPlan for the Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlan( + const Buffer& buf, const std::shared_ptr& write_options, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait Plan message to a Declaration +/// +/// The plan will not contain any sink nodes and will be suitable for use in any +/// of the arrow::compute::DeclarationToXyz methods. +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return A declaration representing the Substrait plan +ARROW_ENGINE_EXPORT Result DeserializePlan( + const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR, + ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserialize a Substrait ExtendedExpression message to the corresponding Arrow +/// type +/// +/// \param[in] buf a buffer containing the protobuf serialization of a collection of bound +/// expressions +/// \param[in] registry an extension-id-registry to use, or null for the default one +/// \param[in] conversion_options options to control how the conversion is done +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// message is returned here. +/// \return A collection of expressions and a common input schema they are bound to +ARROW_ENGINE_EXPORT Result DeserializeExpressions( + const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR, + const ConversionOptions& conversion_options = {}, + ExtensionSet* ext_set_out = NULLPTR); + +/// \brief Deserializes a Substrait Type message to the corresponding Arrow type +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Type +/// message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding Arrow data type +ARROW_ENGINE_EXPORT +Result> DeserializeType( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serializes an Arrow type to a Substrait Type message +/// +/// \param[in] type the Arrow data type to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add a +/// mapping for the given type +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a buffer containing the protobuf serialization of the corresponding Substrait +/// Type message +ARROW_ENGINE_EXPORT +Result> SerializeType( + const DataType& type, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait NamedStruct message to an Arrow schema +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait +/// NamedStruct message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding Arrow schema +ARROW_ENGINE_EXPORT +Result> DeserializeSchema( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serializes an Arrow schema to a Substrait NamedStruct message +/// +/// \param[in] schema the Arrow schema to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// mappings for the types used in the schema +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a buffer containing the protobuf serialization of the corresponding Substrait +/// NamedStruct message +ARROW_ENGINE_EXPORT +Result> SerializeSchema( + const Schema& schema, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait Expression message to a compute expression +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait +/// Expression message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding Arrow compute expression +ARROW_ENGINE_EXPORT +Result DeserializeExpression( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serializes an Arrow compute expression to a Substrait Expression message +/// +/// \param[in] expr the Arrow compute expression to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// mappings for the types used in the expression +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a buffer containing the protobuf serialization of the corresponding Substrait +/// Expression message +ARROW_ENGINE_EXPORT +Result> SerializeExpression( + const compute::Expression& expr, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serialize an Acero Declaration to a binary protobuf Substrait message +/// +/// \param[in] declaration the Acero declaration to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// \param[in] conversion_options options to control how the conversion is done +/// +/// \return a buffer containing the protobuf serialization of the Acero relation +ARROW_ENGINE_EXPORT Result> SerializeRelation( + const acero::Declaration& declaration, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait Rel (relation) message to an ExecNode declaration +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait +/// Rel message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding ExecNode declaration +ARROW_ENGINE_EXPORT Result DeserializeRelation( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +namespace internal { + +/// \brief Checks whether two protobuf serializations of a particular Substrait message +/// type are equivalent +/// +/// Note that a binary comparison of the two buffers is insufficient. One reason for this +/// is that the fields of a message can be specified in any order in the serialization. +/// +/// \param[in] message_name the name of the Substrait message type to check +/// \param[in] l_buf buffer containing the first protobuf serialization to compare +/// \param[in] r_buf buffer containing the second protobuf serialization to compare +/// \return success if equivalent, failure if not +ARROW_ENGINE_EXPORT +Status CheckMessagesEquivalent(std::string_view message_name, const Buffer& l_buf, + const Buffer& r_buf); + +/// \brief Utility function to convert a JSON serialization of a Substrait message to +/// its binary serialization +/// +/// \param[in] type_name the name of the Substrait message type to convert +/// \param[in] json the JSON string to convert +/// \param[in] ignore_unknown_fields if true then unknown fields will be ignored and +/// will not cause an error +/// +/// This should generally be true to allow consumption of plans from newer +/// producers but setting to false can be useful if you are testing +/// conformance to a specific Substrait version +/// \return a buffer filled with the binary protobuf serialization of message +ARROW_ENGINE_EXPORT +Result> SubstraitFromJSON(std::string_view type_name, + std::string_view json, + bool ignore_unknown_fields = true); + +/// \brief Utility function to convert a binary protobuf serialization of a Substrait +/// message to JSON +/// +/// \param[in] type_name the name of the Substrait message type to convert +/// \param[in] buf the buffer containing the binary protobuf serialization of the message +/// \return a JSON string representing the message +ARROW_ENGINE_EXPORT +Result SubstraitToJSON(std::string_view type_name, const Buffer& buf); + +} // namespace internal +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..94c03daaa7a6957a2f8d5db77b7def1f8394d301 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// These utilities are for internal / unit test use only. +// They allow for the construction of simple Substrait plans +// programmatically without first requiring the construction +// of an ExecPlan + +// These utilities have to be here, and not in a test_util.cc +// file (or in a unit test) because only one .so is allowed +// to include each .pb.h file or else protobuf will encounter +// global namespace conflicts. + +#include +#include +#include +#include + +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +struct Id; + +namespace internal { + +/// \brief Create a scan->project->sink plan for tests +/// +/// The plan will project one additional column using the function +/// defined by `function_id`, `arguments`, and data_types. `arguments` +/// and `data_types` should have the same length but only one of each +/// should be defined at each index. +/// +/// If `data_types` is defined at an index then the plan will create a +/// direct reference (starting at index 0 and increasing by 1 for each +/// argument of this type). +/// +/// If `arguments` is defined at an index then the plan will create an +/// enum argument with that value. +ARROW_ENGINE_EXPORT Result> CreateScanProjectSubstrait( + Id function_id, const std::shared_ptr& input_table, + const std::vector& arguments, + const std::unordered_map>& options, + const std::vector>& data_types, + const DataType& output_type); + +/// \brief Create a scan->aggregate->sink plan for tests +/// +/// The plan will create an aggregate with one grouping set (defined by +/// key_idxs) and one measure. The measure will be a function +/// defined by `function_id` and direct references to `arg_idxs`. +ARROW_ENGINE_EXPORT Result> CreateScanAggSubstrait( + Id function_id, const std::shared_ptr
& input_table, + const std::vector& key_idxs, const std::vector& arg_idxs, + const DataType& output_type); + +} // namespace internal +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..a1db4b255ed8ee6a0ae7bb4a7a57f5a1aadb27cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/vector.h" + +#include +#include +#include +#include +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/compute/exec.h" +#include "arrow/compute/kernel.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/pcg_random.h" + +namespace arrow { +namespace engine { + +Result> SortTableOnAllFields(const std::shared_ptr
& tab); + +void AssertTablesEqualIgnoringOrder(const std::shared_ptr
& exp, + const std::shared_ptr
& act); + +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..6089d3f747a82cdc68b738b9ce6abbbb60e6811c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +namespace arrow { +namespace engine { + +class ExtensionIdRegistry; +class ExtensionSet; + +struct ConversionOptions; +struct DeclarationInfo; + +} // namespace engine +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h new file mode 100644 index 0000000000000000000000000000000000000000..5128ec44bff77c120b28bd5e20761d68233b75ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/compute/type_fwd.h" +#include "arrow/engine/substrait/options.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" + +namespace arrow { + +namespace engine { + +using PythonTableProvider = + std::function>(const std::vector&)>; + +/// \brief Utility method to run a Substrait plan +/// \param substrait_buffer The plan to run, must be in binary protobuf format +/// \param registry A registry of extension functions to make available to the plan +/// If null then the default registry will be used. +/// \param memory_pool The memory pool the plan should use to make allocations. +/// \param func_registry A registry of functions used for execution expressions. +/// `registry` maps from Substrait function IDs to "names". These +/// names will be provided to `func_registry` to get the actual +/// kernel. +/// \param conversion_options Options to control plan deserialization +/// \param use_threads If True then the CPU thread pool will be used for CPU work. If +/// False then all work will be done on the calling thread. +/// \return A record batch reader that will read out the results +ARROW_ENGINE_EXPORT Result> ExecuteSerializedPlan( + const Buffer& substrait_buffer, const ExtensionIdRegistry* registry = NULLPTR, + compute::FunctionRegistry* func_registry = NULLPTR, + const ConversionOptions& conversion_options = {}, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool()); + +/// \brief Get a Serialized Plan from a Substrait JSON plan. +/// This is a helper method for Python tests. +ARROW_ENGINE_EXPORT Result> SerializeJsonPlan( + const std::string& substrait_json); + +/// \brief Make a nested registry with the default registry as parent. +/// See arrow::engine::nested_extension_id_registry for details. +ARROW_ENGINE_EXPORT std::shared_ptr MakeExtensionIdRegistry(); + +ARROW_ENGINE_EXPORT const std::string& default_extension_types_uri(); + +// TODO(ARROW-18145) Populate these from cmake files +constexpr uint32_t kSubstraitMajorVersion = 0; +constexpr uint32_t kSubstraitMinorVersion = 27; +constexpr uint32_t kSubstraitPatchVersion = 0; + +constexpr uint32_t kSubstraitMinimumMajorVersion = 0; +constexpr uint32_t kSubstraitMinimumMinorVersion = 20; + +Status CheckVersion(uint32_t major_version, uint32_t minor_version); + +} // namespace engine + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..d81d202ee65673b1540836063d6aa5f88da9fe9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// TODO(westonpace): Once we have a proper engine module this file +// should be renamed arrow/engine/visibility.h +// This API is EXPERIMENTAL. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_ENGINE_STATIC +#define ARROW_ENGINE_EXPORT +#elif defined(ARROW_ENGINE_EXPORTING) +#define ARROW_ENGINE_EXPORT __declspec(dllexport) +#else +#define ARROW_ENGINE_EXPORT __declspec(dllimport) +#endif + +#define ARROW_ENGINE_NO_EXPORT +#else // Not Windows +#ifndef ARROW_ENGINE_EXPORT +#define ARROW_ENGINE_EXPORT __attribute__((visibility("default"))) +#endif +#ifndef ARROW_ENGINE_NO_EXPORT +#define ARROW_ENGINE_NO_EXPORT __attribute__((visibility("hidden"))) +#endif +#endif // Non-Windows + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h new file mode 100644 index 0000000000000000000000000000000000000000..82e0a600513d4abd9bb956053a2a7e94a1033f39 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h @@ -0,0 +1,146 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include +#include + +#include "arrow/memory_pool.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class Column; +class DataType; +class MemoryPool; +class Status; +class Table; + +namespace py { + +enum class MapConversionType { + DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas + LOSSY, // report warnings when lossiness is encountered due to duplicate keys + STRICT_, // raise a Python exception when lossiness is encountered due to duplicate + // keys +}; + +struct PandasOptions { + /// arrow::MemoryPool to use for memory allocations + MemoryPool* pool = default_memory_pool(); + + /// If true, we will convert all string columns to categoricals + bool strings_to_categorical = false; + bool zero_copy_only = false; + bool integer_object_nulls = false; + bool date_as_object = false; + bool timestamp_as_object = false; + bool use_threads = false; + + /// Coerce all date and timestamp to datetime64[ns] + bool coerce_temporal_nanoseconds = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + /// \brief If true, do not create duplicate PyObject versions of equal + /// objects. This only applies to immutable objects like strings or datetime + /// objects + bool deduplicate_objects = false; + + /// \brief For certain data types, a cast is needed in order to store the + /// data in a pandas DataFrame or Series (e.g. timestamps are always stored + /// as nanoseconds in pandas). This option controls whether it is a safe + /// cast or not. + bool safe_cast = true; + + /// \brief If true, create one block per column rather than consolidated + /// blocks (1 per data type). Do zero-copy wrapping when there are no + /// nulls. pandas currently will consolidate the blocks on its own, causing + /// increased memory use, so keep this in mind if you are working on a + /// memory-constrained situation. + bool split_blocks = false; + + /// \brief If true, allow non-writable zero-copy views to be created for + /// single column blocks. This option is also used to provide zero copy for + /// Series data + bool allow_zero_copy_blocks = false; + + /// \brief If true, attempt to deallocate buffers in passed Arrow object if + /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for + /// original context for this feature. Only currently implemented for Table + /// conversions + bool self_destruct = false; + + /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to + /// Python association lists (list-of-tuples) in the same order as the Arrow + /// Map, as in [(key1, value1), (key2, value2), ...] + /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts. + /// This can change the ordering of (key, value) pairs, and will deduplicate + /// multiple keys, resulting in a possible loss of data. + /// If 'lossy', this key deduplication results in a warning printed + /// when detected. If 'strict', this instead results in an exception + /// being raised when detected. + MapConversionType maps_as_pydicts = MapConversionType::DEFAULT; + + // Used internally for nested arrays. + bool decode_dictionaries = false; + + // Columns that should be casted to categorical + std::unordered_set categorical_columns; + + // Columns that should be passed through to be converted to + // ExtensionArray/Block + std::unordered_set extension_columns; + + // Used internally to decipher between to_numpy() and to_pandas() when + // the expected output differs + bool to_numpy = false; +}; + +ARROW_PYTHON_EXPORT +Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr arr, + PyObject* py_ref, PyObject** out); + +ARROW_PYTHON_EXPORT +Status ConvertChunkedArrayToPandas(const PandasOptions& options, + std::shared_ptr col, PyObject* py_ref, + PyObject** out); + +// Convert a whole table as efficiently as possible to a pandas.DataFrame. +// +// The returned Python object is a list of tuples consisting of the exact 2D +// BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x. +// +// tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2]) +ARROW_PYTHON_EXPORT +Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr
table, + PyObject** out); + +} // namespace py +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..1187037aed29e2cc5910e156c260fc9d9d81bff5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" + +namespace arrow { + +class Decimal128; +class Decimal256; + +namespace py { + +class OwnedRef; + +// +// Python Decimal support +// + +namespace internal { + +// \brief Import the Python Decimal type +ARROW_PYTHON_EXPORT +Status ImportDecimalType(OwnedRef* decimal_type); + +// \brief Convert a Python Decimal object to a C++ string +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[out] The string representation of the Python Decimal instance +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status PythonDecimalToString(PyObject* python_decimal, std::string* out); + +// \brief Convert a C++ std::string to a Python Decimal instance +// \param[in] decimal_constructor The decimal type object +// \param[in] decimal_string A decimal string +// \return An instance of decimal.Decimal +ARROW_PYTHON_EXPORT +PyObject* DecimalFromString(PyObject* decimal_constructor, + const std::string& decimal_string); + +// \brief Convert a Python decimal to an Arrow Decimal128 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal128* out); + +// \brief Convert a Python object to an Arrow Decimal128 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal128* out); + +// \brief Convert a Python decimal to an Arrow Decimal256 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal256* out); + +// \brief Convert a Python object to an Arrow Decimal256 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal256* out); + +// \brief Check whether obj is an instance of Decimal +ARROW_PYTHON_EXPORT +bool PyDecimal_Check(PyObject* obj); + +// \brief Check whether obj is nan. This function will abort the program if the argument +// is not a Decimal instance +ARROW_PYTHON_EXPORT +bool PyDecimal_ISNAN(PyObject* obj); + +// \brief Helper class to track and update the precision and scale of a decimal +class ARROW_PYTHON_EXPORT DecimalMetadata { + public: + DecimalMetadata(); + DecimalMetadata(int32_t precision, int32_t scale); + + // \brief Adjust the precision and scale of a decimal type given a new precision and a + // new scale \param[in] suggested_precision A candidate precision \param[in] + // suggested_scale A candidate scale \return The status of the operation + Status Update(int32_t suggested_precision, int32_t suggested_scale); + + // \brief A convenient interface for updating the precision and scale based on a Python + // Decimal object \param object A Python Decimal object \return The status of the + // operation + Status Update(PyObject* object); + + int32_t precision() const { return precision_; } + int32_t scale() const { return scale_; } + + private: + int32_t precision_; + int32_t scale_; +}; + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h new file mode 100644 index 0000000000000000000000000000000000000000..e433d9aca7081cf3ec7e919650e8d07de7d3d92a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/extension_type.h" +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace py { + +class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType { + public: + // Implement extensionType API + std::string extension_name() const override { return extension_name_; } + + std::string ToString() const override; + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override; + + // For use from Cython + // Assumes that `typ` is borrowed + static Status FromClass(const std::shared_ptr storage_type, + const std::string extension_name, PyObject* typ, + std::shared_ptr* out); + + // Return new ref + PyObject* GetInstance() const; + Status SetInstance(PyObject*) const; + + protected: + PyExtensionType(std::shared_ptr storage_type, PyObject* typ, + PyObject* inst = NULLPTR); + PyExtensionType(std::shared_ptr storage_type, std::string extension_name, + PyObject* typ, PyObject* inst = NULLPTR); + + std::string extension_name_; + + // These fields are mutable because of two-step initialization. + mutable OwnedRefNoGIL type_class_; + // A weakref or null. Storing a strong reference to the Python extension type + // instance would create an unreclaimable reference cycle between Python and C++ + // (the Python instance has to keep a strong reference to the C++ ExtensionType + // in other direction). Instead, we store a weakref to the instance. + // If the weakref is dead, we reconstruct the instance from its serialized form. + mutable OwnedRefNoGIL type_instance_; + // Empty if type_instance_ is null + mutable std::string serialized_; +}; + +ARROW_PYTHON_EXPORT std::string PyExtensionName(); + +ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr&); + +ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name); + +} // namespace py +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h new file mode 100644 index 0000000000000000000000000000000000000000..2e6c954862bd92af369baf04bf10a76e0c076fb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" +#include "arrow/python/visibility.h" + +extern "C" { +ARROW_PYTHON_EXPORT +int arrow_init_numpy(); +} diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_convert.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_convert.h new file mode 100644 index 0000000000000000000000000000000000000000..2d1086e13552885f09431848fabf0829e670d681 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_convert.h @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/python/visibility.h" +#include "arrow/sparse_tensor.h" + +namespace arrow { + +class DataType; +class MemoryPool; +class Status; +class Tensor; + +namespace py { + +class ARROW_PYTHON_EXPORT NumPyBuffer : public Buffer { + public: + explicit NumPyBuffer(PyObject* arr); + virtual ~NumPyBuffer(); + + private: + PyObject* arr_; +}; + +ARROW_PYTHON_EXPORT +Result> NumPyDtypeToArrow(PyObject* dtype); +ARROW_PYTHON_EXPORT +Result> NumPyDtypeToArrow(PyArray_Descr* descr); +ARROW_PYTHON_EXPORT +Result> NumPyScalarToArrowDataType(PyObject* scalar); + +ARROW_PYTHON_EXPORT Status NdarrayToTensor(MemoryPool* pool, PyObject* ao, + const std::vector& dim_names, + std::shared_ptr* out); + +ARROW_PYTHON_EXPORT Status TensorToNdarray(const std::shared_ptr& tensor, + PyObject* base, PyObject** out); + +ARROW_PYTHON_EXPORT Status +SparseCOOTensorToNdarray(const std::shared_ptr& sparse_tensor, + PyObject* base, PyObject** out_data, PyObject** out_coords); + +Status SparseCSXMatrixToNdarray(const std::shared_ptr& sparse_tensor, + PyObject* base, PyObject** out_data, + PyObject** out_indptr, PyObject** out_indices); + +ARROW_PYTHON_EXPORT Status SparseCSRMatrixToNdarray( + const std::shared_ptr& sparse_tensor, PyObject* base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices); + +ARROW_PYTHON_EXPORT Status SparseCSCMatrixToNdarray( + const std::shared_ptr& sparse_tensor, PyObject* base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices); + +ARROW_PYTHON_EXPORT Status SparseCSFTensorToNdarray( + const std::shared_ptr& sparse_tensor, PyObject* base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices); + +ARROW_PYTHON_EXPORT Status NdarraysToSparseCOOTensor( + MemoryPool* pool, PyObject* data_ao, PyObject* coords_ao, + const std::vector& shape, const std::vector& dim_names, + std::shared_ptr* out); + +ARROW_PYTHON_EXPORT Status NdarraysToSparseCSRMatrix( + MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao, + const std::vector& shape, const std::vector& dim_names, + std::shared_ptr* out); + +ARROW_PYTHON_EXPORT Status NdarraysToSparseCSCMatrix( + MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao, + const std::vector& shape, const std::vector& dim_names, + std::shared_ptr* out); + +ARROW_PYTHON_EXPORT Status NdarraysToSparseCSFTensor( + MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao, + const std::vector& shape, const std::vector& axis_order, + const std::vector& dim_names, std::shared_ptr* out); + +ARROW_PYTHON_EXPORT Status +TensorToSparseCOOTensor(const std::shared_ptr& tensor, + std::shared_ptr* csparse_tensor); + +ARROW_PYTHON_EXPORT Status +TensorToSparseCSRMatrix(const std::shared_ptr& tensor, + std::shared_ptr* csparse_tensor); + +ARROW_PYTHON_EXPORT Status +TensorToSparseCSCMatrix(const std::shared_ptr& tensor, + std::shared_ptr* csparse_tensor); + +ARROW_PYTHON_EXPORT Status +TensorToSparseCSFTensor(const std::shared_ptr& tensor, + std::shared_ptr* csparse_tensor); + +} // namespace py +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h new file mode 100644 index 0000000000000000000000000000000000000000..d167996ba8da6796ac62da0fa0186419a3211930 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between CPython built-in data structures and Arrow +// data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" + +#include "arrow/python/common.h" + +namespace arrow { + +class Array; +class Status; + +namespace py { + +struct PyConversionOptions { + PyConversionOptions() = default; + + PyConversionOptions(const std::shared_ptr& type, int64_t size, + MemoryPool* pool, bool from_pandas) + : type(type), size(size), from_pandas(from_pandas) {} + + // Set to null if to be inferred + std::shared_ptr type; + + // Default is -1, which indicates the size should the same as the input sequence + int64_t size = -1; + + bool from_pandas = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + bool strict = false; +}; + +/// \brief Convert sequence (list, generator, NumPy array with dtype object) of +/// Python objects. +/// \param[in] obj the sequence to convert +/// \param[in] mask a NumPy array of true/false values to indicate whether +/// values in the sequence are null (true) or not null (false). This parameter +/// may be null +/// \param[in] options various conversion options +/// \param[in] pool MemoryPool to use for allocations +/// \return Result ChunkedArray +ARROW_PYTHON_EXPORT +Result> ConvertPySequence( + PyObject* obj, PyObject* mask, PyConversionOptions options, + MemoryPool* pool = default_memory_pool()); + +} // namespace py + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..dd43b32fd43ff46e195d0057cf3198b926b9fdd0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_PYTHON_STATIC +#define ARROW_PYTHON_EXPORT +#elif defined(ARROW_PYTHON_EXPORTING) +#define ARROW_PYTHON_EXPORT __declspec(dllexport) +#else +#define ARROW_PYTHON_EXPORT __declspec(dllimport) +#endif + +#else // Not Windows +#ifndef ARROW_PYTHON_EXPORT +#define ARROW_PYTHON_EXPORT __attribute__((visibility("default"))) +#endif +#endif // Non-Windows diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0e6ba709d974daebf81cf9e6cdb7aa8b947cc8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/result.h" + +namespace arrow { + +template +Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out, + UnaryOperation unary_op) { + for (; first != last; ++first, (void)++out) { + ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first)); + } + return Status::OK(); +} + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h new file mode 100644 index 0000000000000000000000000000000000000000..71920e49f4aa2b1d92312b4aabaffafe35d323c7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h @@ -0,0 +1,221 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" + +namespace arrow { +namespace internal { + +struct BitmapWordAlignParams { + int64_t leading_bits; + int64_t trailing_bits; + int64_t trailing_bit_offset; + const uint8_t* aligned_start; + int64_t aligned_bits; + int64_t aligned_words; +}; + +// Compute parameters for accessing a bitmap using aligned word instructions. +// The returned parameters describe: +// - a leading area of size `leading_bits` before the aligned words +// - a word-aligned area of size `aligned_bits` +// - a trailing area of size `trailing_bits` after the aligned words +template +inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset, + int64_t length) { + static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES), + "ALIGN_IN_BYTES should be a positive power of two"); + constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8; + + BitmapWordAlignParams p; + + // Compute a "bit address" that we can align up to ALIGN_IN_BITS. + // We don't care about losing the upper bits since we are only interested in the + // difference between both addresses. + const uint64_t bit_addr = + reinterpret_cast(data) * 8 + static_cast(bit_offset); + const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS); + + p.leading_bits = std::min(length, aligned_bit_addr - bit_addr); + p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS; + p.aligned_bits = p.aligned_words * ALIGN_IN_BITS; + p.trailing_bits = length - p.leading_bits - p.aligned_bits; + p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits; + + p.aligned_start = data + (bit_offset + p.leading_bits) / 8; + return p; +} +} // namespace internal + +namespace util { + +// Functions to check if the provided Arrow object is aligned by the specified alignment + +/// \brief Special alignment value to use data type-specific alignment +/// +/// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment +/// functions, then the function will ensure each buffer is suitably aligned +/// for the data type of the array. For example, given an int32 buffer the values +/// buffer's address must be a multiple of 4. Given a large_string buffer the offsets +/// buffer's address must be a multiple of 8. +constexpr int64_t kValueAlignment = -3; + +/// \brief Calculate if the buffer's address is a multiple of `alignment` +/// +/// If `alignment` is less than or equal to 0 then this method will always return true +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment); +/// \brief Calculate if all buffers in the array data are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array data to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment); +/// \brief Calculate if all buffers in the array are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment); + +// Following functions require an additional boolean vector which stores the +// alignment check bits of the constituent objects. +// For example, needs_alignment vector for a ChunkedArray will contain the +// check bits of the constituent Arrays. +// The boolean vector check was introduced to minimize the repetitive checks +// of the constituent objects during the EnsureAlignment function where certain +// objects can be ignored for further checking if we already know that they are +// completely aligned. + +/// \brief Calculate which (if any) chunks in a chunked array are unaligned +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the check +/// it must be set to a valid vector. Extra elements will be added to the end +/// of the vector for each chunk that is checked. `true` will be stored if +/// the chunk is unaligned. +/// \param offset the index of the chunk to start checking +/// \return true if all chunks (starting at `offset`) are aligned, false otherwise +ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment, + std::vector* needs_alignment, int offset = 0); + +/// \brief calculate which (if any) columns in a record batch are unaligned +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment, + std::vector* needs_alignment); + +/// \brief calculate which (if any) columns in a table are unaligned +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment, + std::vector* needs_alignment); + +/// \brief return a buffer that has the given alignment and the same data as the input +/// buffer +/// +/// If the input buffer is already aligned then this method will return the input buffer +/// If the input buffer is not already aligned then this method will allocate a new +/// buffer. The alignment of the new buffer will have at least +/// max(kDefaultBufferAlignment, alignment) bytes of alignment. +/// +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate a new buffer if the +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr buffer, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array data where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array_data the array data to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array_data, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr array, + int64_t alignment, + MemoryPool* memory_pool); + +/// \brief return a chunked array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the chunked array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a record batch where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr batch, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a table where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr
table, + int64_t alignment, + MemoryPool* memory_pool); + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..f3c5bf9ef6f52b0a0737348c2a5bdc524e62c251 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" + +namespace arrow { + +template +using AsyncGenerator = std::function()>; + +template +class MappingGenerator; + +template +class SequencingGenerator; + +template +class TransformingGenerator; + +template +class SerialReadaheadGenerator; + +template +class ReadaheadGenerator; + +template +class PushGenerator; + +template +class MergedGenerator; + +template +struct Enumerated; + +template +class EnumeratingGenerator; + +template +class TransferringGenerator; + +template +class BackgroundGenerator; + +template +class GeneratorIterator; + +template +struct CancellableGenerator; + +template +class DefaultIfEmptyGenerator; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h new file mode 100644 index 0000000000000000000000000000000000000000..5b80e19d896b746ccc4318bb2f8ce250c7892e66 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +ARROW_EXPORT +std::string base64_encode(std::string_view s); + +ARROW_EXPORT +std::string base64_decode(std::string_view s); + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h new file mode 100644 index 0000000000000000000000000000000000000000..75639ac11ae41acb5e23e3eaa91901f41472fdc6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h @@ -0,0 +1,211 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include + +#include "benchmark/benchmark.h" + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/logging.h" // IWYU pragma: keep + +namespace arrow { + +// Benchmark changed its parameter type between releases from +// int to int64_t. As it doesn't have version macros, we need +// to apply C++ template magic. + +template +struct BenchmarkArgsType; + +// Pattern matching that extracts the vector element type of Benchmark::Args() +template +struct BenchmarkArgsType&)> { + using type = Values; +}; + +using ArgsType = + typename BenchmarkArgsType::type; + +using internal::CpuInfo; + +static const CpuInfo* cpu_info = CpuInfo::GetInstance(); + +static const int64_t kL1Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L1); +static const int64_t kL2Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L2); +static const int64_t kL3Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L3); +static const int64_t kCantFitInL3Size = kL3Size * 4; +static const std::vector kMemorySizes = {kL1Size, kL2Size, kL3Size, + kCantFitInL3Size}; +// 0 is treated as "no nulls" +static const std::vector kInverseNullProportions = {10000, 100, 10, 2, 1, 0}; + +struct GenericItemsArgs { + // number of items processed per iteration + const int64_t size; + + // proportion of nulls in generated arrays + double null_proportion; + + explicit GenericItemsArgs(benchmark::State& state) + : size(state.range(0)), state_(state) { + if (state.range(1) == 0) { + this->null_proportion = 0.0; + } else { + this->null_proportion = std::min(1., 1. / static_cast(state.range(1))); + } + } + + ~GenericItemsArgs() { + state_.counters["size"] = static_cast(size); + state_.counters["null_percent"] = null_proportion * 100; + state_.SetItemsProcessed(state_.iterations() * size); + } + + private: + benchmark::State& state_; +}; + +void BenchmarkSetArgsWithSizes(benchmark::internal::Benchmark* bench, + const std::vector& sizes = kMemorySizes) { + bench->Unit(benchmark::kMicrosecond); + + for (const auto size : sizes) { + for (const auto inverse_null_proportion : kInverseNullProportions) { + bench->Args({static_cast(size), inverse_null_proportion}); + } + } +} + +void BenchmarkSetArgs(benchmark::internal::Benchmark* bench) { + BenchmarkSetArgsWithSizes(bench, kMemorySizes); +} + +void RegressionSetArgs(benchmark::internal::Benchmark* bench) { + // Regression do not need to account for cache hierarchy, thus optimize for + // the best case. + BenchmarkSetArgsWithSizes(bench, {kL1Size}); +} + +// RAII struct to handle some of the boilerplate in regression benchmarks +struct RegressionArgs { + // size of memory tested (per iteration) in bytes + int64_t size; + + // proportion of nulls in generated arrays + double null_proportion; + + // If size_is_bytes is true, then it's a number of bytes, otherwise it's the + // number of items processed (for reporting) + explicit RegressionArgs(benchmark::State& state, bool size_is_bytes = true) + : size(state.range(0)), state_(state), size_is_bytes_(size_is_bytes) { + if (state.range(1) == 0) { + this->null_proportion = 0.0; + } else { + this->null_proportion = std::min(1., 1. / static_cast(state.range(1))); + } + } + + ~RegressionArgs() { + state_.counters["size"] = static_cast(size); + state_.counters["null_percent"] = null_proportion * 100; + if (size_is_bytes_) { + state_.SetBytesProcessed(state_.iterations() * size); + } else { + state_.SetItemsProcessed(state_.iterations() * size); + } + } + + private: + benchmark::State& state_; + bool size_is_bytes_; +}; + +class MemoryPoolMemoryManager : public benchmark::MemoryManager { + void Start() override { + memory_pool = std::make_shared(default_memory_pool()); + + MemoryPool* default_pool = default_memory_pool(); + global_allocations_start = default_pool->num_allocations(); + } + +// BENCHMARK_DONT_OPTIMIZE is used here to detect Google Benchmark +// 1.8.0. We can remove this Stop(Result*) when we require Google +// Benchmark 1.8.0 or later. +#ifndef BENCHMARK_DONT_OPTIMIZE + void Stop(Result* result) override { Stop(*result); } +#endif + + void Stop(benchmark::MemoryManager::Result& result) override { + // If num_allocations is still zero, we assume that the memory pool wasn't passed down + // so we should record them. + MemoryPool* default_pool = default_memory_pool(); + int64_t new_default_allocations = + default_pool->num_allocations() - global_allocations_start; + + // Only record metrics if (1) there were allocations and (2) we + // recorded at least one. + if (new_default_allocations > 0 && memory_pool->num_allocations() > 0) { + if (new_default_allocations > memory_pool->num_allocations()) { + // If we missed some, let's report that. + int64_t missed_allocations = + new_default_allocations - memory_pool->num_allocations(); + ARROW_LOG(WARNING) << "BenchmarkMemoryTracker recorded some allocations " + << "for a benchmark, but missed " << missed_allocations + << " allocations.\n"; + } + + result.max_bytes_used = memory_pool->max_memory(); + result.total_allocated_bytes = memory_pool->total_bytes_allocated(); + result.num_allocs = memory_pool->num_allocations(); + } + } + + public: + std::shared_ptr<::arrow::ProxyMemoryPool> memory_pool; + + protected: + int64_t global_allocations_start; +}; + +/// \brief Track memory pool allocations in benchmarks. +/// +/// Instantiate as a global variable to register the hooks into Google Benchmark +/// to collect memory metrics. Before each benchmark, a new ProxyMemoryPool is +/// created. It can then be accessed with memory_pool(). Once the benchmark is +/// complete, the hook will record the maximum memory used, the total bytes +/// allocated, and the total number of allocations. If no allocations were seen, +/// (for example, if you forgot to pass down the memory pool), then these metrics +/// will not be saved. +/// +/// Since this is used as one global variable, this will not work if multiple +/// benchmarks are run concurrently or for multi-threaded benchmarks (ones +/// that use `->ThreadRange(...)`). +class BenchmarkMemoryTracker { + public: + BenchmarkMemoryTracker() : manager_() { ::benchmark::RegisterMemoryManager(&manager_); } + ::arrow::MemoryPool* memory_pool() const { return manager_.memory_pool.get(); } + + protected: + ::arrow::MemoryPoolMemoryManager manager_; +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h new file mode 100644 index 0000000000000000000000000000000000000000..94f7a5bdfa667a97bd00a91404a1dd9f64dfd2dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/span.h" + +namespace arrow::util { + +inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) { + // Small string: inlined. Bytes beyond size are zeroed + BinaryViewType::c_type out; + out.inlined = {size, {}}; + memcpy(&out.inlined.data, data, size); + return out; +} + +inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) { + return ToInlineBinaryView(v.data(), static_cast(v.size())); +} + +inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size, + int32_t buffer_index, int32_t offset) { + if (size <= BinaryViewType::kInlineSize) { + return ToInlineBinaryView(data, size); + } + + // Large string: store index/offset. + BinaryViewType::c_type out; + out.ref = {size, {}, buffer_index, offset}; + memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix)); + return out; +} + +inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index, + int32_t offset) { + return ToBinaryView(v.data(), static_cast(v.size()), buffer_index, offset); +} + +template +std::string_view FromBinaryView(const BinaryViewType::c_type& v, + const BufferPtr* data_buffers) { + auto* data = v.is_inline() ? v.inlined.data.data() + : data_buffers[v.ref.buffer_index]->data() + v.ref.offset; + return {reinterpret_cast(data), static_cast(v.size())}; +} +template +std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete; + +template +bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r, + const BufferPtr* l_buffers, const BufferPtr* r_buffers) { + int64_t l_size_and_prefix, r_size_and_prefix; + memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix)); + memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix)); + + if (l_size_and_prefix != r_size_and_prefix) return false; + + if (l.is_inline()) { + // The columnar spec mandates that the inlined part be zero-padded, so we can compare + // a word at a time regardless of the exact size. + int64_t l_inlined, r_inlined; + memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined)); + memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined)); + return l_inlined == r_inlined; + } + + // Sizes are equal and this is not inline, therefore both are out + // of line and have kPrefixSize first in common. + const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset; + const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset; + return memcmp(l_data + BinaryViewType::kPrefixSize, + r_data + BinaryViewType::kPrefixSize, + l.size() - BinaryViewType::kPrefixSize) == 0; +} + +} // namespace arrow::util diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..a436a50b86fe14f84699cba679f6cac882514c19 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h @@ -0,0 +1,515 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +struct BitRun { + int64_t length; + // Whether bits are set at this point. + bool set; + + std::string ToString() const { + return std::string("{Length: ") + std::to_string(length) + + ", set=" + std::to_string(set) + "}"; + } +}; + +inline bool operator==(const BitRun& lhs, const BitRun& rhs) { + return lhs.length == rhs.length && lhs.set == rhs.set; +} + +inline bool operator!=(const BitRun& lhs, const BitRun& rhs) { + return lhs.length != rhs.length || lhs.set != rhs.set; +} + +class BitRunReaderLinear { + public: + BitRunReaderLinear(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : reader_(bitmap, start_offset, length) {} + + BitRun NextRun() { + BitRun rl = {/*length=*/0, reader_.IsSet()}; + // Advance while the values are equal and not at the end of list. + while (reader_.position() < reader_.length() && reader_.IsSet() == rl.set) { + rl.length++; + reader_.Next(); + } + return rl; + } + + private: + BitmapReader reader_; +}; + +#if ARROW_LITTLE_ENDIAN +/// A convenience class for counting the number of contiguous set/unset bits +/// in a bitmap. +class ARROW_EXPORT BitRunReader { + public: + /// \brief Constructs new BitRunReader. + /// + /// \param[in] bitmap source data + /// \param[in] start_offset bit offset into the source data + /// \param[in] length number of bits to copy + BitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length); + + /// Returns a new BitRun containing the number of contiguous + /// bits with the same value. length == 0 indicates the + /// end of the bitmap. + BitRun NextRun() { + if (ARROW_PREDICT_FALSE(position_ >= length_)) { + return {/*length=*/0, false}; + } + // This implementation relies on a efficient implementations of + // CountTrailingZeros and assumes that runs are more often then + // not. The logic is to incrementally find the next bit change + // from the current position. This is done by zeroing all + // bits in word_ up to position_ and using the TrailingZeroCount + // to find the index of the next set bit. + + // The runs alternate on each call, so flip the bit. + current_run_bit_set_ = !current_run_bit_set_; + + int64_t start_position = position_; + int64_t start_bit_offset = start_position & 63; + // Invert the word for proper use of CountTrailingZeros and + // clear bits so CountTrailingZeros can do it magic. + word_ = ~word_ & ~bit_util::LeastSignificantBitMask(start_bit_offset); + + // Go forward until the next change from unset to set. + int64_t new_bits = bit_util::CountTrailingZeros(word_) - start_bit_offset; + position_ += new_bits; + + if (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) && + ARROW_PREDICT_TRUE(position_ < length_)) { + // Continue extending position while we can advance an entire word. + // (updates position_ accordingly). + AdvanceUntilChange(); + } + + return {/*length=*/position_ - start_position, current_run_bit_set_}; + } + + private: + void AdvanceUntilChange() { + int64_t new_bits = 0; + do { + // Advance the position of the bitmap for loading. + bitmap_ += sizeof(uint64_t); + LoadNextWord(); + new_bits = bit_util::CountTrailingZeros(word_); + // Continue calculating run length. + position_ += new_bits; + } while (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) && + ARROW_PREDICT_TRUE(position_ < length_) && new_bits > 0); + } + + void LoadNextWord() { return LoadWord(length_ - position_); } + + // Helper method for Loading the next word. + void LoadWord(int64_t bits_remaining) { + word_ = 0; + // we need at least an extra byte in this case. + if (ARROW_PREDICT_TRUE(bits_remaining >= 64)) { + std::memcpy(&word_, bitmap_, 8); + } else { + int64_t bytes_to_load = bit_util::BytesForBits(bits_remaining); + auto word_ptr = reinterpret_cast(&word_); + std::memcpy(word_ptr, bitmap_, bytes_to_load); + // Ensure stoppage at last bit in bitmap by reversing the next higher + // order bit. + bit_util::SetBitTo(word_ptr, bits_remaining, + !bit_util::GetBit(word_ptr, bits_remaining - 1)); + } + + // Two cases: + // 1. For unset, CountTrailingZeros works naturally so we don't + // invert the word. + // 2. Otherwise invert so we can use CountTrailingZeros. + if (current_run_bit_set_) { + word_ = ~word_; + } + } + const uint8_t* bitmap_; + int64_t position_; + int64_t length_; + uint64_t word_; + bool current_run_bit_set_; +}; +#else +using BitRunReader = BitRunReaderLinear; +#endif + +struct SetBitRun { + int64_t position; + int64_t length; + + bool AtEnd() const { return length == 0; } + + std::string ToString() const { + return std::string("{pos=") + std::to_string(position) + + ", len=" + std::to_string(length) + "}"; + } + + bool operator==(const SetBitRun& other) const { + return position == other.position && length == other.length; + } + bool operator!=(const SetBitRun& other) const { + return position != other.position || length != other.length; + } +}; + +template +class BaseSetBitRunReader { + public: + /// \brief Constructs new SetBitRunReader. + /// + /// \param[in] bitmap source data + /// \param[in] start_offset bit offset into the source data + /// \param[in] length number of bits to copy + ARROW_NOINLINE + BaseSetBitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(util::MakeNonNull(bitmap)), + length_(length), + remaining_(length_), + current_word_(0), + current_num_bits_(0) { + if (Reverse) { + bitmap_ += (start_offset + length) / 8; + const int8_t end_bit_offset = static_cast((start_offset + length) % 8); + if (length > 0 && end_bit_offset) { + // Get LSBs from last byte + ++bitmap_; + current_num_bits_ = + std::min(static_cast(length), static_cast(end_bit_offset)); + current_word_ = LoadPartialWord(8 - end_bit_offset, current_num_bits_); + } + } else { + bitmap_ += start_offset / 8; + const int8_t bit_offset = static_cast(start_offset % 8); + if (length > 0 && bit_offset) { + // Get MSBs from first byte + current_num_bits_ = + std::min(static_cast(length), static_cast(8 - bit_offset)); + current_word_ = LoadPartialWord(bit_offset, current_num_bits_); + } + } + } + + ARROW_NOINLINE + SetBitRun NextRun() { + int64_t pos = 0; + int64_t len = 0; + if (current_num_bits_) { + const auto run = FindCurrentRun(); + assert(remaining_ >= 0); + if (run.length && current_num_bits_) { + // The run ends in current_word_ + return AdjustRun(run); + } + pos = run.position; + len = run.length; + } + if (!len) { + // We didn't get any ones in current_word_, so we can skip any zeros + // in the following words + SkipNextZeros(); + if (remaining_ == 0) { + return {0, 0}; + } + assert(current_num_bits_); + pos = position(); + } else if (!current_num_bits_) { + if (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + current_num_bits_ = 64; + } else if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + } else { + // No bits remaining, perhaps we found a run? + return AdjustRun({pos, len}); + } + // If current word starts with a zero, we got a full run + if (!(current_word_ & kFirstBit)) { + return AdjustRun({pos, len}); + } + } + // Current word should now start with a set bit + len += CountNextOnes(); + return AdjustRun({pos, len}); + } + + protected: + int64_t position() const { + if (Reverse) { + return remaining_; + } else { + return length_ - remaining_; + } + } + + SetBitRun AdjustRun(SetBitRun run) { + if (Reverse) { + assert(run.position >= run.length); + run.position -= run.length; + } + return run; + } + + uint64_t LoadFullWord() { + uint64_t word; + if (Reverse) { + bitmap_ -= 8; + } + memcpy(&word, bitmap_, 8); + if (!Reverse) { + bitmap_ += 8; + } + return bit_util::ToLittleEndian(word); + } + + uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) { + assert(num_bits > 0); + uint64_t word = 0; + const int64_t num_bytes = bit_util::BytesForBits(num_bits); + if (Reverse) { + // Read in the most significant bytes of the word + bitmap_ -= num_bytes; + memcpy(reinterpret_cast(&word) + 8 - num_bytes, bitmap_, num_bytes); + // XXX MostSignificantBitmask + return (bit_util::ToLittleEndian(word) << bit_offset) & + ~bit_util::LeastSignificantBitMask(64 - num_bits); + } else { + memcpy(&word, bitmap_, num_bytes); + bitmap_ += num_bytes; + return (bit_util::ToLittleEndian(word) >> bit_offset) & + bit_util::LeastSignificantBitMask(num_bits); + } + } + + void SkipNextZeros() { + assert(current_num_bits_ == 0); + while (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + const auto num_zeros = CountFirstZeros(current_word_); + if (num_zeros < 64) { + // Run of zeros ends here + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ = 64 - num_zeros; + remaining_ -= num_zeros; + assert(remaining_ >= 0); + assert(current_num_bits_ >= 0); + return; + } + remaining_ -= 64; + } + // Run of zeros continues in last bitmap word + if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + const auto num_zeros = + std::min(current_num_bits_, CountFirstZeros(current_word_)); + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ -= num_zeros; + remaining_ -= num_zeros; + assert(remaining_ >= 0); + assert(current_num_bits_ >= 0); + } + } + + int64_t CountNextOnes() { + assert(current_word_ & kFirstBit); + + int64_t len; + if (~current_word_) { + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + remaining_ -= num_ones; + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + if (current_num_bits_) { + // Run of ones ends here + return num_ones; + } + len = num_ones; + } else { + // current_word_ is all ones + remaining_ -= 64; + current_num_bits_ = 0; + len = 64; + } + + while (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + const auto num_ones = CountFirstZeros(~current_word_); + len += num_ones; + remaining_ -= num_ones; + if (num_ones < 64) { + // Run of ones ends here + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ = 64 - num_ones; + return len; + } + } + // Run of ones continues in last bitmap word + if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + remaining_ -= num_ones; + len += num_ones; + } + return len; + } + + SetBitRun FindCurrentRun() { + // Skip any pending zeros + const auto num_zeros = CountFirstZeros(current_word_); + if (num_zeros >= current_num_bits_) { + remaining_ -= current_num_bits_; + current_word_ = 0; + current_num_bits_ = 0; + return {0, 0}; + } + assert(num_zeros <= remaining_); + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ -= num_zeros; + remaining_ -= num_zeros; + const int64_t pos = position(); + // Count any ones + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + remaining_ -= num_ones; + return {pos, num_ones}; + } + + inline int CountFirstZeros(uint64_t word); + inline uint64_t ConsumeBits(uint64_t word, int32_t num_bits); + + const uint8_t* bitmap_; + const int64_t length_; + int64_t remaining_; + uint64_t current_word_; + int32_t current_num_bits_; + + static constexpr uint64_t kFirstBit = Reverse ? 0x8000000000000000ULL : 1; +}; + +template <> +inline int BaseSetBitRunReader::CountFirstZeros(uint64_t word) { + return bit_util::CountTrailingZeros(word); +} + +template <> +inline int BaseSetBitRunReader::CountFirstZeros(uint64_t word) { + return bit_util::CountLeadingZeros(word); +} + +template <> +inline uint64_t BaseSetBitRunReader::ConsumeBits(uint64_t word, int32_t num_bits) { + return word >> num_bits; +} + +template <> +inline uint64_t BaseSetBitRunReader::ConsumeBits(uint64_t word, int32_t num_bits) { + return word << num_bits; +} + +using SetBitRunReader = BaseSetBitRunReader; +using ReverseSetBitRunReader = BaseSetBitRunReader; + +// Functional-style bit run visitors. + +// XXX: Try to make this function small so the compiler can inline and optimize +// the `visit` function, which is normally a hot loop with vectorizable code. +// - don't inline SetBitRunReader constructor, it doesn't hurt performance +// - un-inline NextRun hurts 'many null' cases a bit, but improves normal cases +template +inline Status VisitSetBitRuns(const uint8_t* bitmap, int64_t offset, int64_t length, + Visit&& visit) { + if (bitmap == NULLPTR) { + // Assuming all set (as in a null bitmap) + return visit(static_cast(0), static_cast(length)); + } + SetBitRunReader reader(bitmap, offset, length); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + ARROW_RETURN_NOT_OK(visit(run.position, run.length)); + } + return Status::OK(); +} + +template +inline void VisitSetBitRunsVoid(const uint8_t* bitmap, int64_t offset, int64_t length, + Visit&& visit) { + if (bitmap == NULLPTR) { + // Assuming all set (as in a null bitmap) + visit(static_cast(0), static_cast(length)); + return; + } + SetBitRunReader reader(bitmap, offset, length); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + visit(run.position, run.length); + } +} + +template +inline Status VisitSetBitRuns(const std::shared_ptr& bitmap, int64_t offset, + int64_t length, Visit&& visit) { + return VisitSetBitRuns(bitmap ? bitmap->data() : NULLPTR, offset, length, + std::forward(visit)); +} + +template +inline void VisitSetBitRunsVoid(const std::shared_ptr& bitmap, int64_t offset, + int64_t length, Visit&& visit) { + VisitSetBitRunsVoid(bitmap ? bitmap->data() : NULLPTR, offset, length, + std::forward(visit)); +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h new file mode 100644 index 0000000000000000000000000000000000000000..1d3a1dc2459f935e5494743a253a24c5d0b1f197 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h @@ -0,0 +1,370 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_MSC_VER) +#if defined(_M_AMD64) || defined(_M_X64) +#include // IWYU pragma: keep +#include +#endif + +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#define ARROW_POPCOUNT64 __popcnt64 +#define ARROW_POPCOUNT32 __popcnt +#else +#define ARROW_POPCOUNT64 __builtin_popcountll +#define ARROW_POPCOUNT32 __builtin_popcount +#endif + +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace detail { + +template +typename std::make_unsigned::type as_unsigned(Integer x) { + return static_cast::type>(x); +} + +} // namespace detail + +namespace bit_util { + +// The number of set bits in a given unsigned byte value, pre-computed +// +// Generated with the following Python code +// output = 'static constexpr uint8_t kBytePopcount[] = {{{0}}};' +// popcounts = [str(bin(i).count('1')) for i in range(0, 256)] +// print(output.format(', '.join(popcounts))) +static constexpr uint8_t kBytePopcount[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, + 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, + 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, + 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, + 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, + 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, + 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; + +static inline uint64_t PopCount(uint64_t bitmap) { return ARROW_POPCOUNT64(bitmap); } +static inline uint32_t PopCount(uint32_t bitmap) { return ARROW_POPCOUNT32(bitmap); } + +// +// Bit-related computations on integer values +// + +// Returns the ceil of value/divisor +constexpr int64_t CeilDiv(int64_t value, int64_t divisor) { + return (value == 0) ? 0 : 1 + (value - 1) / divisor; +} + +// Return the number of bytes needed to fit the given number of bits +constexpr int64_t BytesForBits(int64_t bits) { + // This formula avoids integer overflow on very large `bits` + return (bits >> 3) + ((bits & 7) != 0); +} + +constexpr bool IsPowerOf2(int64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +constexpr bool IsPowerOf2(uint64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +// Returns the smallest power of two that contains v. If v is already a +// power of two, it is returned as is. +static inline int64_t NextPower2(int64_t n) { + // Taken from + // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + n--; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n |= n >> 32; + n++; + return n; +} + +constexpr bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; } + +constexpr bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; } + +// Returns a mask for the bit_index lower order bits. +// Only valid for bit_index in the range [0, 64). +constexpr uint64_t LeastSignificantBitMask(int64_t bit_index) { + return (static_cast(1) << bit_index) - 1; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' +constexpr int64_t RoundUp(int64_t value, int64_t factor) { + return CeilDiv(value, factor) * factor; +} + +// Returns 'value' rounded down to the nearest multiple of 'factor' +constexpr int64_t RoundDown(int64_t value, int64_t factor) { + return (value / factor) * factor; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' when factor +// is a power of two. +// The result is undefined on overflow, i.e. if `value > 2**64 - factor`, +// since we cannot return the correct result which would be 2**64. +constexpr int64_t RoundUpToPowerOf2(int64_t value, int64_t factor) { + // DCHECK(value >= 0); + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr uint64_t RoundUpToPowerOf2(uint64_t value, uint64_t factor) { + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr int64_t RoundUpToMultipleOf8(int64_t num) { return RoundUpToPowerOf2(num, 8); } + +constexpr int64_t RoundUpToMultipleOf64(int64_t num) { + return RoundUpToPowerOf2(num, 64); +} + +// Returns the number of bytes covering a sliced bitmap. Find the length +// rounded to cover full bytes on both extremities. +// +// The following example represents a slice (offset=10, length=9) +// +// 0 8 16 24 +// |-------|-------|------| +// [ ] (slice) +// [ ] (same slice aligned to bytes bounds, length=16) +// +// The covering bytes is the length (in bytes) of this new aligned slice. +constexpr int64_t CoveringBytes(int64_t offset, int64_t length) { + return (bit_util::RoundUp(length + offset, 8) - bit_util::RoundDown(offset, 8)) / 8; +} + +// Returns the 'num_bits' least-significant bits of 'v'. +static inline uint64_t TrailingBits(uint64_t v, int num_bits) { + if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0; + if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v; + int n = 64 - num_bits; + return (v << n) >> n; +} + +/// \brief Count the number of leading zeros in an unsigned integer. +static inline int CountLeadingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_clz(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse(&index, static_cast(value))) { // NOLINT + return 31 - static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 32 - bitpos; +#endif +} + +static inline int CountLeadingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_clzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse64(&index, value)) { // NOLINT + return 63 - static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 64 - bitpos; +#endif +} + +static inline int CountTrailingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_ctzl(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward(&index, value)) { + return static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 32; + } + return bitpos; +#endif +} + +static inline int CountTrailingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_ctzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward64(&index, value)) { + return static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 64; + } + return bitpos; +#endif +} + +// Returns the minimum number of bits needed to represent an unsigned value +static inline int NumRequiredBits(uint64_t x) { return 64 - CountLeadingZeros(x); } + +// Returns ceil(log2(x)). +static inline int Log2(uint64_t x) { + // DCHECK_GT(x, 0); + return NumRequiredBits(x - 1); +} + +// +// Utilities for reading and writing individual bits by their index +// in a memory area. +// + +// Bitmask selecting the k-th bit in a byte +static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128}; + +// the bitwise complement version of kBitmask +static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127}; + +// Bitmask selecting the (k - 1) preceding bits in a byte +static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127}; +static constexpr uint8_t kPrecedingWrappingBitmask[] = {255, 1, 3, 7, 15, 31, 63, 127}; + +// the bitwise complement version of kPrecedingBitmask +static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128}; + +static constexpr bool GetBit(const uint8_t* bits, uint64_t i) { + return (bits[i >> 3] >> (i & 0x07)) & 1; +} + +// Gets the i-th bit from a byte. Should only be used with i <= 7. +static constexpr bool GetBitFromByte(uint8_t byte, uint8_t i) { + return byte & kBitmask[i]; +} + +static inline void ClearBit(uint8_t* bits, int64_t i) { + bits[i / 8] &= kFlippedBitmask[i % 8]; +} + +static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; } + +static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) { + // https://graphics.stanford.edu/~seander/bithacks.html + // "Conditionally set or clear bits without branching" + // NOTE: this seems to confuse Valgrind as it reads from potentially + // uninitialized memory + bits[i / 8] ^= static_cast(-static_cast(bit_is_set) ^ bits[i / 8]) & + kBitmask[i % 8]; +} + +/// \brief set or clear a range of bits quickly +ARROW_EXPORT +void SetBitsTo(uint8_t* bits, int64_t start_offset, int64_t length, bool bits_are_set); + +/// \brief Sets all bits in the bitmap to true +ARROW_EXPORT +void SetBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// \brief Clears all bits in the bitmap (set to false) +ARROW_EXPORT +void ClearBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// Returns a mask with lower i bits set to 1. If i >= sizeof(Word)*8, all-ones will be +/// returned +/// ex: +/// ref: https://stackoverflow.com/a/59523400 +template +constexpr Word PrecedingWordBitmask(unsigned int const i) { + return static_cast(static_cast(i < sizeof(Word) * 8) + << (i & (sizeof(Word) * 8 - 1))) - + 1; +} +static_assert(PrecedingWordBitmask(0) == 0x00, ""); +static_assert(PrecedingWordBitmask(4) == 0x0f, ""); +static_assert(PrecedingWordBitmask(8) == 0xff, ""); +static_assert(PrecedingWordBitmask(8) == 0x00ff, ""); + +/// \brief Create a word with low `n` bits from `low` and high `sizeof(Word)-n` bits +/// from `high`. +/// Word ret +/// for (i = 0; i < sizeof(Word)*8; i++){ +/// ret[i]= i < n ? low[i]: high[i]; +/// } +template +constexpr Word SpliceWord(int n, Word low, Word high) { + return (high & ~PrecedingWordBitmask(n)) | (low & PrecedingWordBitmask(n)); +} + +/// \brief Pack integers into a bitmap in batches of 8 +template +void PackBits(const uint32_t* values, uint8_t* out) { + for (int i = 0; i < batch_size / 8; ++i) { + *out++ = static_cast(values[0] | values[1] << 1 | values[2] << 2 | + values[3] << 3 | values[4] << 4 | values[5] << 5 | + values[6] << 6 | values[7] << 7); + values += 8; + } +} + +} // namespace bit_util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h new file mode 100644 index 0000000000000000000000000000000000000000..4750e697fc7972e8ad57766ffd1134cf3e99fd14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h @@ -0,0 +1,466 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/bitmap_writer.h" +#include "arrow/util/compare.h" +#include "arrow/util/endian.h" +#include "arrow/util/functional.h" +#include "arrow/util/span.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class BooleanArray; + +namespace internal { + +class ARROW_EXPORT Bitmap : public util::ToStringOstreamable, + public util::EqualityComparable { + public: + Bitmap() = default; + + Bitmap(const std::shared_ptr& buffer, int64_t offset, int64_t length) + : data_(buffer->data()), offset_(offset), length_(length) { + if (buffer->is_mutable()) { + mutable_data_ = buffer->mutable_data(); + } + } + + Bitmap(const void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), offset_(offset), length_(length) {} + + Bitmap(void* data, int64_t offset, int64_t length) + : data_(reinterpret_cast(data)), + mutable_data_(reinterpret_cast(data)), + offset_(offset), + length_(length) {} + + Bitmap Slice(int64_t offset) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length_ - offset}; + } else { + return {data_, offset_ + offset, length_ - offset}; + } + } + + Bitmap Slice(int64_t offset, int64_t length) const { + if (mutable_data_ != NULLPTR) { + return {mutable_data_, offset_ + offset, length}; + } else { + return {data_, offset_ + offset, length}; + } + } + + std::string ToString() const; + + bool Equals(const Bitmap& other) const; + + std::string Diff(const Bitmap& other) const; + + bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); } + + bool operator[](int64_t i) const { return GetBit(i); } + + void SetBitTo(int64_t i, bool v) const { + bit_util::SetBitTo(mutable_data_, i + offset_, v); + } + + void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); } + + void CopyFrom(const Bitmap& other); + void CopyFromInverted(const Bitmap& other); + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps, N); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit bits from each bitmap as bitset + /// + /// All bitmaps must have identical length. + template + static void VisitBits(const std::array& bitmaps, Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps); + std::bitset bits; + for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) { + for (size_t i = 0; i < N; ++i) { + bits[i] = bitmaps[i].GetBit(bit_i); + } + visitor(bits); + } + } + + /// \brief Visit words of bits from each bitmap as array + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// + /// TODO(bkietz) allow for early termination + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + // local, mutable variables which will be sliced/decremented to represent consumption: + Bitmap bitmaps[N]; + int64_t offsets[N]; + int64_t bit_length = BitLength(bitmaps_arg, N); + util::span words[N]; + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps_arg[i]; + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + + auto consume = [&](int64_t consumed_bits) { + for (size_t i = 0; i < N; ++i) { + bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits); + offsets[i] = bitmaps[i].template word_offset(); + assert(offsets[i] >= 0 && offsets[i] < kBitWidth); + words[i] = bitmaps[i].template words(); + } + bit_length -= consumed_bits; + }; + + std::array visited_words; + visited_words.fill(0); + + if (bit_length <= kBitWidth * 2) { + // bitmaps fit into one or two words so don't bother with optimization + while (bit_length > 0) { + auto leading_bits = std::min(bit_length, kBitWidth); + SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + return 0; + } + + int64_t max_offset = *std::max_element(offsets, offsets + N); + int64_t min_offset = *std::min_element(offsets, offsets + N); + if (max_offset > 0) { + // consume leading bits + auto leading_bits = kBitWidth - min_offset; + SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words); + visitor(visited_words); + consume(leading_bits); + } + assert(*std::min_element(offsets, offsets + N) == 0); + + int64_t whole_word_count = bit_length / kBitWidth; + assert(whole_word_count >= 1); + + if (min_offset == max_offset) { + // all offsets were identical, all leading bits have been consumed + assert( + std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; })); + + for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) { + for (size_t i = 0; i < N; ++i) { + visited_words[i] = words[i][word_i]; + } + visitor(visited_words); + } + consume(whole_word_count * kBitWidth); + } else { + // leading bits from potentially incomplete words have been consumed + + // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely + // within the bitmap for all i + for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) { + for (size_t i = 0; i < N; ++i) { + if (offsets[i] == 0) { + visited_words[i] = words[i][word_i]; + } else { + auto words0 = bit_util::ToLittleEndian(words[i][word_i]); + auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]); + visited_words[i] = bit_util::FromLittleEndian( + (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i]))); + } + } + visitor(visited_words); + } + consume((whole_word_count - 1) * kBitWidth); + + SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words); + + visitor(visited_words); + consume(kBitWidth); + } + + // load remaining bits + if (bit_length > 0) { + SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words); + visitor(visited_words); + } + + return min_offset; + } + + template >::type::value_type> + static void RunVisitWordsAndWriteLoop(int64_t bit_length, + std::array& readers, + std::array& writers, + Visitor&& visitor) { + constexpr int64_t kBitWidth = sizeof(Word) * 8; + + std::array visited_words; + std::array output_words; + + // every reader will have same number of words, since they are same length'ed + // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond + // Word boundary, every Word would have to be created from 2 adjoining Words + auto n_words = readers[0].words(); + bit_length -= n_words * kBitWidth; + while (n_words--) { + // first collect all words to visited_words array + for (size_t i = 0; i < N; i++) { + visited_words[i] = readers[i].NextWord(); + } + visitor(visited_words, &output_words); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextWord(output_words[i]); + } + } + + // every reader will have same number of trailing bytes, because of the above reason + // tailing portion could be more than one word! (ref: BitmapWordReader constructor) + // remaining full/ partial words to write + + if (bit_length) { + // convert the word visitor lambda to a byte_visitor + auto byte_visitor = [&](const std::array& in, + std::array* out) { + std::array in_words; + std::array out_words; + std::copy(in.begin(), in.end(), in_words.begin()); + visitor(in_words, &out_words); + for (size_t i = 0; i < M; i++) { + out->at(i) = static_cast(out_words[i]); + } + }; + + std::array visited_bytes; + std::array output_bytes; + int n_bytes = readers[0].trailing_bytes(); + while (n_bytes--) { + visited_bytes.fill(0); + output_bytes.fill(0); + int valid_bits; + for (size_t i = 0; i < N; i++) { + visited_bytes[i] = readers[i].NextTrailingByte(valid_bits); + } + byte_visitor(visited_bytes, &output_bytes); + for (size_t i = 0; i < M; i++) { + writers[i].PutNextTrailingByte(output_bytes[i], valid_bits); + } + } + } + } + + /// \brief Visit words of bits from each input bitmap as array and collects + /// outputs to an array, to be written into the output bitmaps accordingly. + /// + /// All bitmaps must have identical length. The first bit in a visited bitmap + /// may be offset within the first visited word, but words will otherwise contain + /// densely packed bits loaded from the bitmap. That offset within the first word is + /// returned. + /// Visitor is expected to have the following signature + /// [](const std::array& in_words, std::array* out_words){...} + /// + // NOTE: this function is efficient on 3+ sufficiently large bitmaps. + // It also has a large prolog / epilog overhead and should be used + // carefully in other cases. + // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid + // and BitmapUInt64Reader. + template >::type::value_type> + static void VisitWordsAndWrite(const std::array& bitmaps_arg, + std::array* out_bitmaps_arg, + Visitor&& visitor) { + int64_t bit_length = BitLength(bitmaps_arg); + assert(bit_length == BitLength(*out_bitmaps_arg)); + + // if both input and output bitmaps have no byte offset, then use special template + if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; }) && + std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(), + [](const Bitmap& b) { return b.offset_ % 8 == 0; })) { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = BitmapWordReader( + in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter( + out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } else { + std::array, N> readers; + for (size_t i = 0; i < N; ++i) { + const Bitmap& in_bitmap = bitmaps_arg[i]; + readers[i] = + BitmapWordReader(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_); + } + + std::array, M> writers; + for (size_t i = 0; i < M; ++i) { + const Bitmap& out_bitmap = out_bitmaps_arg->at(i); + writers[i] = BitmapWordWriter(out_bitmap.mutable_data_, out_bitmap.offset_, + out_bitmap.length_); + } + + RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor); + } + } + + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return mutable_data_; } + + /// offset of first bit relative to buffer().data() + int64_t offset() const { return offset_; } + + /// number of bits in this Bitmap + int64_t length() const { return length_; } + + /// span of all bytes which contain any bit in this Bitmap + util::span bytes() const { + auto byte_offset = offset_ / 8; + auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset; + return {data_ + byte_offset, static_cast(byte_count)}; + } + + private: + /// span of all Words which contain any bit in this Bitmap + /// + /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36) + /// words() would span bits [16, 48). + /// + /// 0 16 32 48 64 + /// |-------|-------|------|------| (buffer) + /// [ ] (bitmap) + /// |-------|------| (returned words) + /// + /// \warning The words may contain bytes which lie outside the buffer or are + /// uninitialized. + template + util::span words() const { + auto bytes_addr = reinterpret_cast(bytes().data()); + auto words_addr = bytes_addr - bytes_addr % sizeof(Word); + auto word_byte_count = + bit_util::RoundUpToPowerOf2(static_cast(bytes_addr + bytes().size()), + static_cast(sizeof(Word))) - + words_addr; + return {reinterpret_cast(words_addr), + static_cast(word_byte_count / sizeof(Word))}; + } + + /// offset of first bit relative to words().data() + template + int64_t word_offset() const { + return offset_ + 8 * (reinterpret_cast(data_) - + reinterpret_cast(words().data())); + } + + /// load words from bitmaps bitwise + template + static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset, + int64_t out_length, bool set_trailing_bits, + std::array* out) { + out->fill(0); + + int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0; + + Bitmap slices[N], out_bitmaps[N]; + for (size_t i = 0; i < N; ++i) { + slices[i] = bitmaps[i].Slice(offset, out_length); + out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length); + } + + int64_t bit_i = 0; + Bitmap::VisitBits(slices, [&](std::bitset bits) { + for (size_t i = 0; i < N; ++i) { + out_bitmaps[i].SetBitTo(bit_i, bits[i]); + } + ++bit_i; + }); + } + + /// assert bitmaps have identical length and return that length + static int64_t BitLength(const Bitmap* bitmaps, size_t N); + + template + static int64_t BitLength(const std::array& bitmaps) { + for (size_t i = 1; i < N; ++i) { + assert(bitmaps[i].length() == bitmaps[0].length()); + } + return bitmaps[0].length(); + } + + const uint8_t* data_ = NULLPTR; + uint8_t* mutable_data_ = NULLPTR; + int64_t offset_ = 0, length_ = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h new file mode 100644 index 0000000000000000000000000000000000000000..5bd2ad44140834487b02d5899d3515e7b7eafefc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Generate Bitmap with all position to `value` except for one found +/// at `straggler_pos`. +ARROW_EXPORT +Result> BitmapAllButOne(MemoryPool* pool, int64_t length, + int64_t straggler_pos, bool value = true); + +/// \brief Convert vector of bytes to bitmap buffer +ARROW_EXPORT +Result> BytesToBits(const std::vector&, + MemoryPool* pool = default_memory_pool()); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h new file mode 100644 index 0000000000000000000000000000000000000000..52a1e228e01f1d6c3c37a5e2d49d843f0a4573f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A std::generate() like function to write sequential bits into a bitmap area. +// Bits preceding the bitmap area are preserved, bits following the bitmap +// area may be clobbered. + +template +void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) { + if (length == 0) { + return; + } + uint8_t* cur = bitmap + start_offset / 8; + uint8_t bit_mask = bit_util::kBitmask[start_offset % 8]; + uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8]; + + for (int64_t index = 0; index < length; ++index) { + const bool bit = g(); + current_byte = bit ? (current_byte | bit_mask) : current_byte; + bit_mask = static_cast(bit_mask << 1); + if (bit_mask == 0) { + bit_mask = 1; + *cur++ = current_byte; + current_byte = 0; + } + } + if (bit_mask != 1) { + *cur++ = current_byte; + } +} + +// Like GenerateBits(), but unrolls its main loop for higher performance. + +template +void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length, + Generator&& g) { + static_assert(std::is_same()()), bool>::value, + "Functor passed to GenerateBitsUnrolled must return bool"); + + if (length == 0) { + return; + } + uint8_t current_byte; + uint8_t* cur = bitmap + start_offset / 8; + const uint64_t start_bit_offset = start_offset % 8; + uint8_t bit_mask = bit_util::kBitmask[start_bit_offset]; + int64_t remaining = length; + + if (bit_mask != 0x01) { + current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset]; + while (bit_mask != 0 && remaining > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + --remaining; + } + *cur++ = current_byte; + } + + int64_t remaining_bytes = remaining / 8; + uint8_t out_results[8]; + while (remaining_bytes-- > 0) { + for (int i = 0; i < 8; ++i) { + out_results[i] = g(); + } + *cur++ = static_cast(out_results[0] | out_results[1] << 1 | + out_results[2] << 2 | out_results[3] << 3 | + out_results[4] << 4 | out_results[5] << 5 | + out_results[6] << 6 | out_results[7] << 7); + } + + int64_t remaining_bits = remaining % 8; + if (remaining_bits) { + current_byte = 0; + bit_mask = 0x01; + while (remaining_bits-- > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + } + *cur++ = current_byte; + } +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a9d900b2588d9d556fd1995de1d60d8583edfca7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h @@ -0,0 +1,244 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; + +namespace internal { + +// ---------------------------------------------------------------------- +// Bitmap utilities + +/// Copy a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// +/// \return Status message +ARROW_EXPORT +Result> CopyBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Copy a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void CopyBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Invert a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void InvertBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Invert a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to copy +/// +/// \return Status message +ARROW_EXPORT +Result> InvertBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Reverse a bit range of an existing bitmap into an existing bitmap +/// +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to reverse +/// \param[in] dest_offset bit offset into the destination +/// \param[out] dest the destination buffer, must have at least space for +/// (offset + length) bits +ARROW_EXPORT +void ReverseBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest, + int64_t dest_offset); + +/// Reverse a bit range of an existing bitmap +/// +/// \param[in] pool memory pool to allocate memory from +/// \param[in] bitmap source data +/// \param[in] offset bit offset into the source data +/// \param[in] length number of bits to reverse +/// +/// \return Status message +ARROW_EXPORT +Result> ReverseBitmap(MemoryPool* pool, const uint8_t* bitmap, + int64_t offset, int64_t length); + +/// Compute the number of 1's in the given data array +/// +/// \param[in] data a packed LSB-ordered bitmap as a byte array +/// \param[in] bit_offset a bitwise offset into the bitmap +/// \param[in] length the number of bits to inspect in the bitmap relative to +/// the offset +/// +/// \return The number of set (1) bits in the range +ARROW_EXPORT +int64_t CountSetBits(const uint8_t* data, int64_t bit_offset, int64_t length); + +/// Compute the number of 1's in the result of an "and" (&) of two bitmaps +/// +/// \param[in] left_bitmap a packed LSB-ordered bitmap as a byte array +/// \param[in] left_offset a bitwise offset into the left bitmap +/// \param[in] right_bitmap a packed LSB-ordered bitmap as a byte array +/// \param[in] right_offset a bitwise offset into the right bitmap +/// \param[in] length the length of the bitmaps (must be the same) +/// +/// \return The number of set (1) bits in the "and" of the two bitmaps +ARROW_EXPORT +int64_t CountAndSetBits(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length); + +ARROW_EXPORT +bool BitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length); + +// Same as BitmapEquals, but considers a NULL bitmap pointer the same as an +// all-ones bitmap. +ARROW_EXPORT +bool OptionalBitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length); + +ARROW_EXPORT +bool OptionalBitmapEquals(const std::shared_ptr& left, int64_t left_offset, + const std::shared_ptr& right, int64_t right_offset, + int64_t length); + +/// \brief Do a "bitmap and" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapAnd(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap and" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapAnd(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap or" for the given bit length on right and left buffers +/// starting at their respective bit-offsets and put the results in out_buffer +/// starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapOr(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap or" for the given bit length on right and left buffers +/// starting at their respective bit-offsets and put the results in out +/// starting at the given bit-offset. +ARROW_EXPORT +void BitmapOr(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap xor" for the given bit-length on right and left +/// buffers starting at their respective bit-offsets and put the results in +/// out_buffer starting at the given bit offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapXor(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap xor" for the given bit-length on right and left +/// buffers starting at their respective bit-offsets and put the results in +/// out starting at the given bit offset. +ARROW_EXPORT +void BitmapXor(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap and not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapAndNot(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap and not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapAndNot(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +/// \brief Do a "bitmap or not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out_buffer starting at the given bit-offset. +/// +/// out_buffer will be allocated and initialized to zeros using pool before +/// the operation. +ARROW_EXPORT +Result> BitmapOrNot(MemoryPool* pool, const uint8_t* left, + int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, + int64_t out_offset); + +/// \brief Do a "bitmap or not" on right and left buffers starting at +/// their respective bit-offsets for the given bit-length and put +/// the results in out starting at the given bit-offset. +ARROW_EXPORT +void BitmapOrNot(const uint8_t* left, int64_t left_offset, const uint8_t* right, + int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..c9ce8012f3eb5a65ec91b1321b687bc0d77f7557 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h @@ -0,0 +1,286 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +class BitmapWriter { + // A sequential bitwise writer that preserves surrounding bit values. + + public: + BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = bitmap[byte_offset_]; + } else { + current_byte_ = 0; + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; } + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + if (ARROW_PREDICT_TRUE(position_ < length_)) { + current_byte_ = bitmap_[byte_offset_]; + } + } + } + + void Finish() { + // Store current byte if we didn't went past bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +class FirstTimeBitmapWriter { + // Like BitmapWriter, but any bit values *following* the bits written + // might be clobbered. It is hence faster than BitmapWriter, and can + // also avoid false positives with Valgrind. + + public: + FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + current_byte_ = 0; + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = + bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8]; + } else { + current_byte_ = 0; + } + } + + /// Appends number_of_bits from word to valid_bits and valid_bits_offset. + /// + /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed + /// to be unset (i.e. 0). + /// \param[in] number_of_bits The number of bits to append from word. + void AppendWord(uint64_t word, int64_t number_of_bits) { + if (ARROW_PREDICT_FALSE(number_of_bits == 0)) { + return; + } + + // Location that the first byte needs to be written to. + uint8_t* append_position = bitmap_ + byte_offset_; + + // Update state variables except for current_byte_ here. + position_ += number_of_bits; + int64_t bit_offset = bit_util::CountTrailingZeros(static_cast(bit_mask_)); + bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8]; + byte_offset_ += (bit_offset + number_of_bits) / 8; + + if (bit_offset != 0) { + // We are in the middle of the byte. This code updates the byte and shifts + // bits appropriately within word so it can be memcpy'd below. + int64_t bits_to_carry = 8 - bit_offset; + // Carry over bits from word to current_byte_. We assume any extra bits in word + // unset so no additional accounting is needed for when number_of_bits < + // bits_to_carry. + current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset; + // Check if everything is transferred into current_byte_. + if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) { + return; + } + *append_position = current_byte_; + append_position++; + // Move the carry bits off of word. + word = word >> bits_to_carry; + number_of_bits -= bits_to_carry; + } + word = bit_util::ToLittleEndian(word); + int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits); + std::memcpy(append_position, &word, bytes_for_word); + // At this point, the previous current_byte_ has been written to bitmap_. + // The new current_byte_ is either the last relevant byte in 'word' + // or cleared if the new position is byte aligned (i.e. a fresh byte). + if (bit_mask_ == 0x1) { + current_byte_ = 0; + } else { + current_byte_ = *(append_position + bytes_for_word - 1); + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() {} + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + current_byte_ = 0; + } + } + + void Finish() { + // Store current byte if we didn't went go bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +template +class BitmapWordWriter { + public: + BitmapWordWriter() = default; + BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length) + : offset_(static_cast(may_have_byte_offset) * (offset % 8)), + bitmap_(bitmap + offset / 8), + bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)), + mask_((1U << offset_) - 1) { + if (offset_) { + if (length >= static_cast(sizeof(Word) * 8)) { + current_data.word_ = load(bitmap_); + } else if (length > 0) { + current_data.epi.byte_ = load(bitmap_); + } + } + } + + void PutNextWord(Word word) { + if (may_have_byte_offset && offset_) { + // split one word into two adjacent words, don't touch unused bits + // |<------ word ----->| + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // | | + // v v offset + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // |<------ next ----->|<---- current ---->| + word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_)); + Word next_word = load(bitmap_ + sizeof(Word)); + current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_); + next_word = (next_word & ~mask_) | (word & mask_); + store(bitmap_, current_data.word_); + store(bitmap_ + sizeof(Word), next_word); + current_data.word_ = next_word; + } else { + store(bitmap_, word); + } + bitmap_ += sizeof(Word); + } + + void PutNextTrailingByte(uint8_t byte, int valid_bits) { + if (valid_bits == 8) { + if (may_have_byte_offset && offset_) { + byte = (byte << offset_) | (byte >> (8 - offset_)); + uint8_t next_byte = load(bitmap_ + 1); + current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_); + next_byte = (next_byte & ~mask_) | (byte & mask_); + store(bitmap_, current_data.epi.byte_); + store(bitmap_ + 1, next_byte); + current_data.epi.byte_ = next_byte; + } else { + store(bitmap_, byte); + } + ++bitmap_; + } else { + assert(valid_bits > 0); + assert(valid_bits < 8); + assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_); + internal::BitmapWriter writer(bitmap_, offset_, valid_bits); + for (int i = 0; i < valid_bits; ++i) { + (byte & 0x01) ? writer.Set() : writer.Clear(); + writer.Next(); + byte >>= 1; + } + writer.Finish(); + } + } + + private: + int64_t offset_; + uint8_t* bitmap_; + + const uint8_t* bitmap_end_; + uint64_t mask_; + union { + Word word_; + struct { +#if ARROW_LITTLE_ENDIAN == 0 + uint8_t padding_bytes_[sizeof(Word) - 1]; +#endif + uint8_t byte_; + } epi; + } current_data; + + template + DType load(const uint8_t* bitmap) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + return bit_util::ToLittleEndian(util::SafeLoadAs(bitmap)); + } + + template + void store(uint8_t* bitmap, DType data) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + util::SafeStore(bitmap, bit_util::FromLittleEndian(data)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h new file mode 100644 index 0000000000000000000000000000000000000000..dd85c1638c7bfcd9cfd4034fb80ce775aaa92ce9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/endian.h" +#include "arrow/util/visibility.h" + +#include + +namespace arrow { +namespace internal { + +ARROW_EXPORT +int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); +ARROW_EXPORT +int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h new file mode 100644 index 0000000000000000000000000000000000000000..4f45619b2a770e3e6589af03012641ceb833b115 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h @@ -0,0 +1,5642 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This file was generated by script which is modified from its original version in +// GitHub. Original source: +// https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py +// The original copyright notice follows. + +// This code is released under the +// Apache License Version 2.0 http://www.apache.org/licenses/. +// (c) Daniel Lemire 2013 + +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace internal { + +inline const uint8_t* unpack0_64(const uint8_t* in, uint64_t* out) { + for (int k = 0; k < 32; k += 1) { + out[k] = 0; + } + return in; +} + +inline const uint8_t* unpack1_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 1) & mask; + out[2] = (w0 >> 2) & mask; + out[3] = (w0 >> 3) & mask; + out[4] = (w0 >> 4) & mask; + out[5] = (w0 >> 5) & mask; + out[6] = (w0 >> 6) & mask; + out[7] = (w0 >> 7) & mask; + out[8] = (w0 >> 8) & mask; + out[9] = (w0 >> 9) & mask; + out[10] = (w0 >> 10) & mask; + out[11] = (w0 >> 11) & mask; + out[12] = (w0 >> 12) & mask; + out[13] = (w0 >> 13) & mask; + out[14] = (w0 >> 14) & mask; + out[15] = (w0 >> 15) & mask; + out[16] = (w0 >> 16) & mask; + out[17] = (w0 >> 17) & mask; + out[18] = (w0 >> 18) & mask; + out[19] = (w0 >> 19) & mask; + out[20] = (w0 >> 20) & mask; + out[21] = (w0 >> 21) & mask; + out[22] = (w0 >> 22) & mask; + out[23] = (w0 >> 23) & mask; + out[24] = (w0 >> 24) & mask; + out[25] = (w0 >> 25) & mask; + out[26] = (w0 >> 26) & mask; + out[27] = (w0 >> 27) & mask; + out[28] = (w0 >> 28) & mask; + out[29] = (w0 >> 29) & mask; + out[30] = (w0 >> 30) & mask; + out[31] = (w0 >> 31) & mask; + + return in; +} + +inline const uint8_t* unpack2_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 3ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 2) & mask; + out[2] = (w0 >> 4) & mask; + out[3] = (w0 >> 6) & mask; + out[4] = (w0 >> 8) & mask; + out[5] = (w0 >> 10) & mask; + out[6] = (w0 >> 12) & mask; + out[7] = (w0 >> 14) & mask; + out[8] = (w0 >> 16) & mask; + out[9] = (w0 >> 18) & mask; + out[10] = (w0 >> 20) & mask; + out[11] = (w0 >> 22) & mask; + out[12] = (w0 >> 24) & mask; + out[13] = (w0 >> 26) & mask; + out[14] = (w0 >> 28) & mask; + out[15] = (w0 >> 30) & mask; + out[16] = (w0 >> 32) & mask; + out[17] = (w0 >> 34) & mask; + out[18] = (w0 >> 36) & mask; + out[19] = (w0 >> 38) & mask; + out[20] = (w0 >> 40) & mask; + out[21] = (w0 >> 42) & mask; + out[22] = (w0 >> 44) & mask; + out[23] = (w0 >> 46) & mask; + out[24] = (w0 >> 48) & mask; + out[25] = (w0 >> 50) & mask; + out[26] = (w0 >> 52) & mask; + out[27] = (w0 >> 54) & mask; + out[28] = (w0 >> 56) & mask; + out[29] = (w0 >> 58) & mask; + out[30] = (w0 >> 60) & mask; + out[31] = w0 >> 62; + + return in; +} + +inline const uint8_t* unpack3_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 7ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 3) & mask; + out[2] = (w0 >> 6) & mask; + out[3] = (w0 >> 9) & mask; + out[4] = (w0 >> 12) & mask; + out[5] = (w0 >> 15) & mask; + out[6] = (w0 >> 18) & mask; + out[7] = (w0 >> 21) & mask; + out[8] = (w0 >> 24) & mask; + out[9] = (w0 >> 27) & mask; + out[10] = (w0 >> 30) & mask; + out[11] = (w0 >> 33) & mask; + out[12] = (w0 >> 36) & mask; + out[13] = (w0 >> 39) & mask; + out[14] = (w0 >> 42) & mask; + out[15] = (w0 >> 45) & mask; + out[16] = (w0 >> 48) & mask; + out[17] = (w0 >> 51) & mask; + out[18] = (w0 >> 54) & mask; + out[19] = (w0 >> 57) & mask; + out[20] = (w0 >> 60) & mask; + out[21] = ((w0 >> 63) | (w1 << 1)) & mask; + out[22] = (w1 >> 2) & mask; + out[23] = (w1 >> 5) & mask; + out[24] = (w1 >> 8) & mask; + out[25] = (w1 >> 11) & mask; + out[26] = (w1 >> 14) & mask; + out[27] = (w1 >> 17) & mask; + out[28] = (w1 >> 20) & mask; + out[29] = (w1 >> 23) & mask; + out[30] = (w1 >> 26) & mask; + out[31] = (w1 >> 29) & mask; + + return in; +} + +inline const uint8_t* unpack4_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 15ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 4) & mask; + out[2] = (w0 >> 8) & mask; + out[3] = (w0 >> 12) & mask; + out[4] = (w0 >> 16) & mask; + out[5] = (w0 >> 20) & mask; + out[6] = (w0 >> 24) & mask; + out[7] = (w0 >> 28) & mask; + out[8] = (w0 >> 32) & mask; + out[9] = (w0 >> 36) & mask; + out[10] = (w0 >> 40) & mask; + out[11] = (w0 >> 44) & mask; + out[12] = (w0 >> 48) & mask; + out[13] = (w0 >> 52) & mask; + out[14] = (w0 >> 56) & mask; + out[15] = w0 >> 60; + out[16] = (w1)&mask; + out[17] = (w1 >> 4) & mask; + out[18] = (w1 >> 8) & mask; + out[19] = (w1 >> 12) & mask; + out[20] = (w1 >> 16) & mask; + out[21] = (w1 >> 20) & mask; + out[22] = (w1 >> 24) & mask; + out[23] = (w1 >> 28) & mask; + out[24] = (w1 >> 32) & mask; + out[25] = (w1 >> 36) & mask; + out[26] = (w1 >> 40) & mask; + out[27] = (w1 >> 44) & mask; + out[28] = (w1 >> 48) & mask; + out[29] = (w1 >> 52) & mask; + out[30] = (w1 >> 56) & mask; + out[31] = w1 >> 60; + + return in; +} + +inline const uint8_t* unpack5_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 31ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 5) & mask; + out[2] = (w0 >> 10) & mask; + out[3] = (w0 >> 15) & mask; + out[4] = (w0 >> 20) & mask; + out[5] = (w0 >> 25) & mask; + out[6] = (w0 >> 30) & mask; + out[7] = (w0 >> 35) & mask; + out[8] = (w0 >> 40) & mask; + out[9] = (w0 >> 45) & mask; + out[10] = (w0 >> 50) & mask; + out[11] = (w0 >> 55) & mask; + out[12] = ((w0 >> 60) | (w1 << 4)) & mask; + out[13] = (w1 >> 1) & mask; + out[14] = (w1 >> 6) & mask; + out[15] = (w1 >> 11) & mask; + out[16] = (w1 >> 16) & mask; + out[17] = (w1 >> 21) & mask; + out[18] = (w1 >> 26) & mask; + out[19] = (w1 >> 31) & mask; + out[20] = (w1 >> 36) & mask; + out[21] = (w1 >> 41) & mask; + out[22] = (w1 >> 46) & mask; + out[23] = (w1 >> 51) & mask; + out[24] = (w1 >> 56) & mask; + out[25] = ((w1 >> 61) | (w2 << 3)) & mask; + out[26] = (w2 >> 2) & mask; + out[27] = (w2 >> 7) & mask; + out[28] = (w2 >> 12) & mask; + out[29] = (w2 >> 17) & mask; + out[30] = (w2 >> 22) & mask; + out[31] = (w2 >> 27) & mask; + + return in; +} + +inline const uint8_t* unpack6_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 63ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 6) & mask; + out[2] = (w0 >> 12) & mask; + out[3] = (w0 >> 18) & mask; + out[4] = (w0 >> 24) & mask; + out[5] = (w0 >> 30) & mask; + out[6] = (w0 >> 36) & mask; + out[7] = (w0 >> 42) & mask; + out[8] = (w0 >> 48) & mask; + out[9] = (w0 >> 54) & mask; + out[10] = ((w0 >> 60) | (w1 << 4)) & mask; + out[11] = (w1 >> 2) & mask; + out[12] = (w1 >> 8) & mask; + out[13] = (w1 >> 14) & mask; + out[14] = (w1 >> 20) & mask; + out[15] = (w1 >> 26) & mask; + out[16] = (w1 >> 32) & mask; + out[17] = (w1 >> 38) & mask; + out[18] = (w1 >> 44) & mask; + out[19] = (w1 >> 50) & mask; + out[20] = (w1 >> 56) & mask; + out[21] = ((w1 >> 62) | (w2 << 2)) & mask; + out[22] = (w2 >> 4) & mask; + out[23] = (w2 >> 10) & mask; + out[24] = (w2 >> 16) & mask; + out[25] = (w2 >> 22) & mask; + out[26] = (w2 >> 28) & mask; + out[27] = (w2 >> 34) & mask; + out[28] = (w2 >> 40) & mask; + out[29] = (w2 >> 46) & mask; + out[30] = (w2 >> 52) & mask; + out[31] = w2 >> 58; + + return in; +} + +inline const uint8_t* unpack7_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 127ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 7) & mask; + out[2] = (w0 >> 14) & mask; + out[3] = (w0 >> 21) & mask; + out[4] = (w0 >> 28) & mask; + out[5] = (w0 >> 35) & mask; + out[6] = (w0 >> 42) & mask; + out[7] = (w0 >> 49) & mask; + out[8] = (w0 >> 56) & mask; + out[9] = ((w0 >> 63) | (w1 << 1)) & mask; + out[10] = (w1 >> 6) & mask; + out[11] = (w1 >> 13) & mask; + out[12] = (w1 >> 20) & mask; + out[13] = (w1 >> 27) & mask; + out[14] = (w1 >> 34) & mask; + out[15] = (w1 >> 41) & mask; + out[16] = (w1 >> 48) & mask; + out[17] = (w1 >> 55) & mask; + out[18] = ((w1 >> 62) | (w2 << 2)) & mask; + out[19] = (w2 >> 5) & mask; + out[20] = (w2 >> 12) & mask; + out[21] = (w2 >> 19) & mask; + out[22] = (w2 >> 26) & mask; + out[23] = (w2 >> 33) & mask; + out[24] = (w2 >> 40) & mask; + out[25] = (w2 >> 47) & mask; + out[26] = (w2 >> 54) & mask; + out[27] = ((w2 >> 61) | (w3 << 3)) & mask; + out[28] = (w3 >> 4) & mask; + out[29] = (w3 >> 11) & mask; + out[30] = (w3 >> 18) & mask; + out[31] = (w3 >> 25) & mask; + + return in; +} + +inline const uint8_t* unpack8_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 255ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 8) & mask; + out[2] = (w0 >> 16) & mask; + out[3] = (w0 >> 24) & mask; + out[4] = (w0 >> 32) & mask; + out[5] = (w0 >> 40) & mask; + out[6] = (w0 >> 48) & mask; + out[7] = w0 >> 56; + out[8] = (w1)&mask; + out[9] = (w1 >> 8) & mask; + out[10] = (w1 >> 16) & mask; + out[11] = (w1 >> 24) & mask; + out[12] = (w1 >> 32) & mask; + out[13] = (w1 >> 40) & mask; + out[14] = (w1 >> 48) & mask; + out[15] = w1 >> 56; + out[16] = (w2)&mask; + out[17] = (w2 >> 8) & mask; + out[18] = (w2 >> 16) & mask; + out[19] = (w2 >> 24) & mask; + out[20] = (w2 >> 32) & mask; + out[21] = (w2 >> 40) & mask; + out[22] = (w2 >> 48) & mask; + out[23] = w2 >> 56; + out[24] = (w3)&mask; + out[25] = (w3 >> 8) & mask; + out[26] = (w3 >> 16) & mask; + out[27] = (w3 >> 24) & mask; + out[28] = (w3 >> 32) & mask; + out[29] = (w3 >> 40) & mask; + out[30] = (w3 >> 48) & mask; + out[31] = w3 >> 56; + + return in; +} + +inline const uint8_t* unpack9_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 511ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 9) & mask; + out[2] = (w0 >> 18) & mask; + out[3] = (w0 >> 27) & mask; + out[4] = (w0 >> 36) & mask; + out[5] = (w0 >> 45) & mask; + out[6] = (w0 >> 54) & mask; + out[7] = ((w0 >> 63) | (w1 << 1)) & mask; + out[8] = (w1 >> 8) & mask; + out[9] = (w1 >> 17) & mask; + out[10] = (w1 >> 26) & mask; + out[11] = (w1 >> 35) & mask; + out[12] = (w1 >> 44) & mask; + out[13] = (w1 >> 53) & mask; + out[14] = ((w1 >> 62) | (w2 << 2)) & mask; + out[15] = (w2 >> 7) & mask; + out[16] = (w2 >> 16) & mask; + out[17] = (w2 >> 25) & mask; + out[18] = (w2 >> 34) & mask; + out[19] = (w2 >> 43) & mask; + out[20] = (w2 >> 52) & mask; + out[21] = ((w2 >> 61) | (w3 << 3)) & mask; + out[22] = (w3 >> 6) & mask; + out[23] = (w3 >> 15) & mask; + out[24] = (w3 >> 24) & mask; + out[25] = (w3 >> 33) & mask; + out[26] = (w3 >> 42) & mask; + out[27] = (w3 >> 51) & mask; + out[28] = ((w3 >> 60) | (w4 << 4)) & mask; + out[29] = (w4 >> 5) & mask; + out[30] = (w4 >> 14) & mask; + out[31] = (w4 >> 23) & mask; + + return in; +} + +inline const uint8_t* unpack10_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1023ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 10) & mask; + out[2] = (w0 >> 20) & mask; + out[3] = (w0 >> 30) & mask; + out[4] = (w0 >> 40) & mask; + out[5] = (w0 >> 50) & mask; + out[6] = ((w0 >> 60) | (w1 << 4)) & mask; + out[7] = (w1 >> 6) & mask; + out[8] = (w1 >> 16) & mask; + out[9] = (w1 >> 26) & mask; + out[10] = (w1 >> 36) & mask; + out[11] = (w1 >> 46) & mask; + out[12] = ((w1 >> 56) | (w2 << 8)) & mask; + out[13] = (w2 >> 2) & mask; + out[14] = (w2 >> 12) & mask; + out[15] = (w2 >> 22) & mask; + out[16] = (w2 >> 32) & mask; + out[17] = (w2 >> 42) & mask; + out[18] = (w2 >> 52) & mask; + out[19] = ((w2 >> 62) | (w3 << 2)) & mask; + out[20] = (w3 >> 8) & mask; + out[21] = (w3 >> 18) & mask; + out[22] = (w3 >> 28) & mask; + out[23] = (w3 >> 38) & mask; + out[24] = (w3 >> 48) & mask; + out[25] = ((w3 >> 58) | (w4 << 6)) & mask; + out[26] = (w4 >> 4) & mask; + out[27] = (w4 >> 14) & mask; + out[28] = (w4 >> 24) & mask; + out[29] = (w4 >> 34) & mask; + out[30] = (w4 >> 44) & mask; + out[31] = w4 >> 54; + + return in; +} + +inline const uint8_t* unpack11_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2047ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 11) & mask; + out[2] = (w0 >> 22) & mask; + out[3] = (w0 >> 33) & mask; + out[4] = (w0 >> 44) & mask; + out[5] = ((w0 >> 55) | (w1 << 9)) & mask; + out[6] = (w1 >> 2) & mask; + out[7] = (w1 >> 13) & mask; + out[8] = (w1 >> 24) & mask; + out[9] = (w1 >> 35) & mask; + out[10] = (w1 >> 46) & mask; + out[11] = ((w1 >> 57) | (w2 << 7)) & mask; + out[12] = (w2 >> 4) & mask; + out[13] = (w2 >> 15) & mask; + out[14] = (w2 >> 26) & mask; + out[15] = (w2 >> 37) & mask; + out[16] = (w2 >> 48) & mask; + out[17] = ((w2 >> 59) | (w3 << 5)) & mask; + out[18] = (w3 >> 6) & mask; + out[19] = (w3 >> 17) & mask; + out[20] = (w3 >> 28) & mask; + out[21] = (w3 >> 39) & mask; + out[22] = (w3 >> 50) & mask; + out[23] = ((w3 >> 61) | (w4 << 3)) & mask; + out[24] = (w4 >> 8) & mask; + out[25] = (w4 >> 19) & mask; + out[26] = (w4 >> 30) & mask; + out[27] = (w4 >> 41) & mask; + out[28] = (w4 >> 52) & mask; + out[29] = ((w4 >> 63) | (w5 << 1)) & mask; + out[30] = (w5 >> 10) & mask; + out[31] = (w5 >> 21) & mask; + + return in; +} + +inline const uint8_t* unpack12_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4095ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 12) & mask; + out[2] = (w0 >> 24) & mask; + out[3] = (w0 >> 36) & mask; + out[4] = (w0 >> 48) & mask; + out[5] = ((w0 >> 60) | (w1 << 4)) & mask; + out[6] = (w1 >> 8) & mask; + out[7] = (w1 >> 20) & mask; + out[8] = (w1 >> 32) & mask; + out[9] = (w1 >> 44) & mask; + out[10] = ((w1 >> 56) | (w2 << 8)) & mask; + out[11] = (w2 >> 4) & mask; + out[12] = (w2 >> 16) & mask; + out[13] = (w2 >> 28) & mask; + out[14] = (w2 >> 40) & mask; + out[15] = w2 >> 52; + out[16] = (w3)&mask; + out[17] = (w3 >> 12) & mask; + out[18] = (w3 >> 24) & mask; + out[19] = (w3 >> 36) & mask; + out[20] = (w3 >> 48) & mask; + out[21] = ((w3 >> 60) | (w4 << 4)) & mask; + out[22] = (w4 >> 8) & mask; + out[23] = (w4 >> 20) & mask; + out[24] = (w4 >> 32) & mask; + out[25] = (w4 >> 44) & mask; + out[26] = ((w4 >> 56) | (w5 << 8)) & mask; + out[27] = (w5 >> 4) & mask; + out[28] = (w5 >> 16) & mask; + out[29] = (w5 >> 28) & mask; + out[30] = (w5 >> 40) & mask; + out[31] = w5 >> 52; + + return in; +} + +inline const uint8_t* unpack13_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8191ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 13) & mask; + out[2] = (w0 >> 26) & mask; + out[3] = (w0 >> 39) & mask; + out[4] = ((w0 >> 52) | (w1 << 12)) & mask; + out[5] = (w1 >> 1) & mask; + out[6] = (w1 >> 14) & mask; + out[7] = (w1 >> 27) & mask; + out[8] = (w1 >> 40) & mask; + out[9] = ((w1 >> 53) | (w2 << 11)) & mask; + out[10] = (w2 >> 2) & mask; + out[11] = (w2 >> 15) & mask; + out[12] = (w2 >> 28) & mask; + out[13] = (w2 >> 41) & mask; + out[14] = ((w2 >> 54) | (w3 << 10)) & mask; + out[15] = (w3 >> 3) & mask; + out[16] = (w3 >> 16) & mask; + out[17] = (w3 >> 29) & mask; + out[18] = (w3 >> 42) & mask; + out[19] = ((w3 >> 55) | (w4 << 9)) & mask; + out[20] = (w4 >> 4) & mask; + out[21] = (w4 >> 17) & mask; + out[22] = (w4 >> 30) & mask; + out[23] = (w4 >> 43) & mask; + out[24] = ((w4 >> 56) | (w5 << 8)) & mask; + out[25] = (w5 >> 5) & mask; + out[26] = (w5 >> 18) & mask; + out[27] = (w5 >> 31) & mask; + out[28] = (w5 >> 44) & mask; + out[29] = ((w5 >> 57) | (w6 << 7)) & mask; + out[30] = (w6 >> 6) & mask; + out[31] = (w6 >> 19) & mask; + + return in; +} + +inline const uint8_t* unpack14_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 16383ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 14) & mask; + out[2] = (w0 >> 28) & mask; + out[3] = (w0 >> 42) & mask; + out[4] = ((w0 >> 56) | (w1 << 8)) & mask; + out[5] = (w1 >> 6) & mask; + out[6] = (w1 >> 20) & mask; + out[7] = (w1 >> 34) & mask; + out[8] = (w1 >> 48) & mask; + out[9] = ((w1 >> 62) | (w2 << 2)) & mask; + out[10] = (w2 >> 12) & mask; + out[11] = (w2 >> 26) & mask; + out[12] = (w2 >> 40) & mask; + out[13] = ((w2 >> 54) | (w3 << 10)) & mask; + out[14] = (w3 >> 4) & mask; + out[15] = (w3 >> 18) & mask; + out[16] = (w3 >> 32) & mask; + out[17] = (w3 >> 46) & mask; + out[18] = ((w3 >> 60) | (w4 << 4)) & mask; + out[19] = (w4 >> 10) & mask; + out[20] = (w4 >> 24) & mask; + out[21] = (w4 >> 38) & mask; + out[22] = ((w4 >> 52) | (w5 << 12)) & mask; + out[23] = (w5 >> 2) & mask; + out[24] = (w5 >> 16) & mask; + out[25] = (w5 >> 30) & mask; + out[26] = (w5 >> 44) & mask; + out[27] = ((w5 >> 58) | (w6 << 6)) & mask; + out[28] = (w6 >> 8) & mask; + out[29] = (w6 >> 22) & mask; + out[30] = (w6 >> 36) & mask; + out[31] = w6 >> 50; + + return in; +} + +inline const uint8_t* unpack15_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 32767ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 15) & mask; + out[2] = (w0 >> 30) & mask; + out[3] = (w0 >> 45) & mask; + out[4] = ((w0 >> 60) | (w1 << 4)) & mask; + out[5] = (w1 >> 11) & mask; + out[6] = (w1 >> 26) & mask; + out[7] = (w1 >> 41) & mask; + out[8] = ((w1 >> 56) | (w2 << 8)) & mask; + out[9] = (w2 >> 7) & mask; + out[10] = (w2 >> 22) & mask; + out[11] = (w2 >> 37) & mask; + out[12] = ((w2 >> 52) | (w3 << 12)) & mask; + out[13] = (w3 >> 3) & mask; + out[14] = (w3 >> 18) & mask; + out[15] = (w3 >> 33) & mask; + out[16] = (w3 >> 48) & mask; + out[17] = ((w3 >> 63) | (w4 << 1)) & mask; + out[18] = (w4 >> 14) & mask; + out[19] = (w4 >> 29) & mask; + out[20] = (w4 >> 44) & mask; + out[21] = ((w4 >> 59) | (w5 << 5)) & mask; + out[22] = (w5 >> 10) & mask; + out[23] = (w5 >> 25) & mask; + out[24] = (w5 >> 40) & mask; + out[25] = ((w5 >> 55) | (w6 << 9)) & mask; + out[26] = (w6 >> 6) & mask; + out[27] = (w6 >> 21) & mask; + out[28] = (w6 >> 36) & mask; + out[29] = ((w6 >> 51) | (w7 << 13)) & mask; + out[30] = (w7 >> 2) & mask; + out[31] = (w7 >> 17) & mask; + + return in; +} + +inline const uint8_t* unpack16_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 65535ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 16) & mask; + out[2] = (w0 >> 32) & mask; + out[3] = w0 >> 48; + out[4] = (w1)&mask; + out[5] = (w1 >> 16) & mask; + out[6] = (w1 >> 32) & mask; + out[7] = w1 >> 48; + out[8] = (w2)&mask; + out[9] = (w2 >> 16) & mask; + out[10] = (w2 >> 32) & mask; + out[11] = w2 >> 48; + out[12] = (w3)&mask; + out[13] = (w3 >> 16) & mask; + out[14] = (w3 >> 32) & mask; + out[15] = w3 >> 48; + out[16] = (w4)&mask; + out[17] = (w4 >> 16) & mask; + out[18] = (w4 >> 32) & mask; + out[19] = w4 >> 48; + out[20] = (w5)&mask; + out[21] = (w5 >> 16) & mask; + out[22] = (w5 >> 32) & mask; + out[23] = w5 >> 48; + out[24] = (w6)&mask; + out[25] = (w6 >> 16) & mask; + out[26] = (w6 >> 32) & mask; + out[27] = w6 >> 48; + out[28] = (w7)&mask; + out[29] = (w7 >> 16) & mask; + out[30] = (w7 >> 32) & mask; + out[31] = w7 >> 48; + + return in; +} + +inline const uint8_t* unpack17_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 131071ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 17) & mask; + out[2] = (w0 >> 34) & mask; + out[3] = ((w0 >> 51) | (w1 << 13)) & mask; + out[4] = (w1 >> 4) & mask; + out[5] = (w1 >> 21) & mask; + out[6] = (w1 >> 38) & mask; + out[7] = ((w1 >> 55) | (w2 << 9)) & mask; + out[8] = (w2 >> 8) & mask; + out[9] = (w2 >> 25) & mask; + out[10] = (w2 >> 42) & mask; + out[11] = ((w2 >> 59) | (w3 << 5)) & mask; + out[12] = (w3 >> 12) & mask; + out[13] = (w3 >> 29) & mask; + out[14] = (w3 >> 46) & mask; + out[15] = ((w3 >> 63) | (w4 << 1)) & mask; + out[16] = (w4 >> 16) & mask; + out[17] = (w4 >> 33) & mask; + out[18] = ((w4 >> 50) | (w5 << 14)) & mask; + out[19] = (w5 >> 3) & mask; + out[20] = (w5 >> 20) & mask; + out[21] = (w5 >> 37) & mask; + out[22] = ((w5 >> 54) | (w6 << 10)) & mask; + out[23] = (w6 >> 7) & mask; + out[24] = (w6 >> 24) & mask; + out[25] = (w6 >> 41) & mask; + out[26] = ((w6 >> 58) | (w7 << 6)) & mask; + out[27] = (w7 >> 11) & mask; + out[28] = (w7 >> 28) & mask; + out[29] = (w7 >> 45) & mask; + out[30] = ((w7 >> 62) | (w8 << 2)) & mask; + out[31] = (w8 >> 15) & mask; + + return in; +} + +inline const uint8_t* unpack18_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 262143ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 18) & mask; + out[2] = (w0 >> 36) & mask; + out[3] = ((w0 >> 54) | (w1 << 10)) & mask; + out[4] = (w1 >> 8) & mask; + out[5] = (w1 >> 26) & mask; + out[6] = (w1 >> 44) & mask; + out[7] = ((w1 >> 62) | (w2 << 2)) & mask; + out[8] = (w2 >> 16) & mask; + out[9] = (w2 >> 34) & mask; + out[10] = ((w2 >> 52) | (w3 << 12)) & mask; + out[11] = (w3 >> 6) & mask; + out[12] = (w3 >> 24) & mask; + out[13] = (w3 >> 42) & mask; + out[14] = ((w3 >> 60) | (w4 << 4)) & mask; + out[15] = (w4 >> 14) & mask; + out[16] = (w4 >> 32) & mask; + out[17] = ((w4 >> 50) | (w5 << 14)) & mask; + out[18] = (w5 >> 4) & mask; + out[19] = (w5 >> 22) & mask; + out[20] = (w5 >> 40) & mask; + out[21] = ((w5 >> 58) | (w6 << 6)) & mask; + out[22] = (w6 >> 12) & mask; + out[23] = (w6 >> 30) & mask; + out[24] = ((w6 >> 48) | (w7 << 16)) & mask; + out[25] = (w7 >> 2) & mask; + out[26] = (w7 >> 20) & mask; + out[27] = (w7 >> 38) & mask; + out[28] = ((w7 >> 56) | (w8 << 8)) & mask; + out[29] = (w8 >> 10) & mask; + out[30] = (w8 >> 28) & mask; + out[31] = w8 >> 46; + + return in; +} + +inline const uint8_t* unpack19_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 524287ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 19) & mask; + out[2] = (w0 >> 38) & mask; + out[3] = ((w0 >> 57) | (w1 << 7)) & mask; + out[4] = (w1 >> 12) & mask; + out[5] = (w1 >> 31) & mask; + out[6] = ((w1 >> 50) | (w2 << 14)) & mask; + out[7] = (w2 >> 5) & mask; + out[8] = (w2 >> 24) & mask; + out[9] = (w2 >> 43) & mask; + out[10] = ((w2 >> 62) | (w3 << 2)) & mask; + out[11] = (w3 >> 17) & mask; + out[12] = (w3 >> 36) & mask; + out[13] = ((w3 >> 55) | (w4 << 9)) & mask; + out[14] = (w4 >> 10) & mask; + out[15] = (w4 >> 29) & mask; + out[16] = ((w4 >> 48) | (w5 << 16)) & mask; + out[17] = (w5 >> 3) & mask; + out[18] = (w5 >> 22) & mask; + out[19] = (w5 >> 41) & mask; + out[20] = ((w5 >> 60) | (w6 << 4)) & mask; + out[21] = (w6 >> 15) & mask; + out[22] = (w6 >> 34) & mask; + out[23] = ((w6 >> 53) | (w7 << 11)) & mask; + out[24] = (w7 >> 8) & mask; + out[25] = (w7 >> 27) & mask; + out[26] = ((w7 >> 46) | (w8 << 18)) & mask; + out[27] = (w8 >> 1) & mask; + out[28] = (w8 >> 20) & mask; + out[29] = (w8 >> 39) & mask; + out[30] = ((w8 >> 58) | (w9 << 6)) & mask; + out[31] = (w9 >> 13) & mask; + + return in; +} + +inline const uint8_t* unpack20_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1048575ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 20) & mask; + out[2] = (w0 >> 40) & mask; + out[3] = ((w0 >> 60) | (w1 << 4)) & mask; + out[4] = (w1 >> 16) & mask; + out[5] = (w1 >> 36) & mask; + out[6] = ((w1 >> 56) | (w2 << 8)) & mask; + out[7] = (w2 >> 12) & mask; + out[8] = (w2 >> 32) & mask; + out[9] = ((w2 >> 52) | (w3 << 12)) & mask; + out[10] = (w3 >> 8) & mask; + out[11] = (w3 >> 28) & mask; + out[12] = ((w3 >> 48) | (w4 << 16)) & mask; + out[13] = (w4 >> 4) & mask; + out[14] = (w4 >> 24) & mask; + out[15] = w4 >> 44; + out[16] = (w5)&mask; + out[17] = (w5 >> 20) & mask; + out[18] = (w5 >> 40) & mask; + out[19] = ((w5 >> 60) | (w6 << 4)) & mask; + out[20] = (w6 >> 16) & mask; + out[21] = (w6 >> 36) & mask; + out[22] = ((w6 >> 56) | (w7 << 8)) & mask; + out[23] = (w7 >> 12) & mask; + out[24] = (w7 >> 32) & mask; + out[25] = ((w7 >> 52) | (w8 << 12)) & mask; + out[26] = (w8 >> 8) & mask; + out[27] = (w8 >> 28) & mask; + out[28] = ((w8 >> 48) | (w9 << 16)) & mask; + out[29] = (w9 >> 4) & mask; + out[30] = (w9 >> 24) & mask; + out[31] = w9 >> 44; + + return in; +} + +inline const uint8_t* unpack21_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2097151ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 21) & mask; + out[2] = (w0 >> 42) & mask; + out[3] = ((w0 >> 63) | (w1 << 1)) & mask; + out[4] = (w1 >> 20) & mask; + out[5] = (w1 >> 41) & mask; + out[6] = ((w1 >> 62) | (w2 << 2)) & mask; + out[7] = (w2 >> 19) & mask; + out[8] = (w2 >> 40) & mask; + out[9] = ((w2 >> 61) | (w3 << 3)) & mask; + out[10] = (w3 >> 18) & mask; + out[11] = (w3 >> 39) & mask; + out[12] = ((w3 >> 60) | (w4 << 4)) & mask; + out[13] = (w4 >> 17) & mask; + out[14] = (w4 >> 38) & mask; + out[15] = ((w4 >> 59) | (w5 << 5)) & mask; + out[16] = (w5 >> 16) & mask; + out[17] = (w5 >> 37) & mask; + out[18] = ((w5 >> 58) | (w6 << 6)) & mask; + out[19] = (w6 >> 15) & mask; + out[20] = (w6 >> 36) & mask; + out[21] = ((w6 >> 57) | (w7 << 7)) & mask; + out[22] = (w7 >> 14) & mask; + out[23] = (w7 >> 35) & mask; + out[24] = ((w7 >> 56) | (w8 << 8)) & mask; + out[25] = (w8 >> 13) & mask; + out[26] = (w8 >> 34) & mask; + out[27] = ((w8 >> 55) | (w9 << 9)) & mask; + out[28] = (w9 >> 12) & mask; + out[29] = (w9 >> 33) & mask; + out[30] = ((w9 >> 54) | (w10 << 10)) & mask; + out[31] = (w10 >> 11) & mask; + + return in; +} + +inline const uint8_t* unpack22_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4194303ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 22) & mask; + out[2] = ((w0 >> 44) | (w1 << 20)) & mask; + out[3] = (w1 >> 2) & mask; + out[4] = (w1 >> 24) & mask; + out[5] = ((w1 >> 46) | (w2 << 18)) & mask; + out[6] = (w2 >> 4) & mask; + out[7] = (w2 >> 26) & mask; + out[8] = ((w2 >> 48) | (w3 << 16)) & mask; + out[9] = (w3 >> 6) & mask; + out[10] = (w3 >> 28) & mask; + out[11] = ((w3 >> 50) | (w4 << 14)) & mask; + out[12] = (w4 >> 8) & mask; + out[13] = (w4 >> 30) & mask; + out[14] = ((w4 >> 52) | (w5 << 12)) & mask; + out[15] = (w5 >> 10) & mask; + out[16] = (w5 >> 32) & mask; + out[17] = ((w5 >> 54) | (w6 << 10)) & mask; + out[18] = (w6 >> 12) & mask; + out[19] = (w6 >> 34) & mask; + out[20] = ((w6 >> 56) | (w7 << 8)) & mask; + out[21] = (w7 >> 14) & mask; + out[22] = (w7 >> 36) & mask; + out[23] = ((w7 >> 58) | (w8 << 6)) & mask; + out[24] = (w8 >> 16) & mask; + out[25] = (w8 >> 38) & mask; + out[26] = ((w8 >> 60) | (w9 << 4)) & mask; + out[27] = (w9 >> 18) & mask; + out[28] = (w9 >> 40) & mask; + out[29] = ((w9 >> 62) | (w10 << 2)) & mask; + out[30] = (w10 >> 20) & mask; + out[31] = w10 >> 42; + + return in; +} + +inline const uint8_t* unpack23_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8388607ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 23) & mask; + out[2] = ((w0 >> 46) | (w1 << 18)) & mask; + out[3] = (w1 >> 5) & mask; + out[4] = (w1 >> 28) & mask; + out[5] = ((w1 >> 51) | (w2 << 13)) & mask; + out[6] = (w2 >> 10) & mask; + out[7] = (w2 >> 33) & mask; + out[8] = ((w2 >> 56) | (w3 << 8)) & mask; + out[9] = (w3 >> 15) & mask; + out[10] = (w3 >> 38) & mask; + out[11] = ((w3 >> 61) | (w4 << 3)) & mask; + out[12] = (w4 >> 20) & mask; + out[13] = ((w4 >> 43) | (w5 << 21)) & mask; + out[14] = (w5 >> 2) & mask; + out[15] = (w5 >> 25) & mask; + out[16] = ((w5 >> 48) | (w6 << 16)) & mask; + out[17] = (w6 >> 7) & mask; + out[18] = (w6 >> 30) & mask; + out[19] = ((w6 >> 53) | (w7 << 11)) & mask; + out[20] = (w7 >> 12) & mask; + out[21] = (w7 >> 35) & mask; + out[22] = ((w7 >> 58) | (w8 << 6)) & mask; + out[23] = (w8 >> 17) & mask; + out[24] = (w8 >> 40) & mask; + out[25] = ((w8 >> 63) | (w9 << 1)) & mask; + out[26] = (w9 >> 22) & mask; + out[27] = ((w9 >> 45) | (w10 << 19)) & mask; + out[28] = (w10 >> 4) & mask; + out[29] = (w10 >> 27) & mask; + out[30] = ((w10 >> 50) | (w11 << 14)) & mask; + out[31] = (w11 >> 9) & mask; + + return in; +} + +inline const uint8_t* unpack24_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 16777215ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 24) & mask; + out[2] = ((w0 >> 48) | (w1 << 16)) & mask; + out[3] = (w1 >> 8) & mask; + out[4] = (w1 >> 32) & mask; + out[5] = ((w1 >> 56) | (w2 << 8)) & mask; + out[6] = (w2 >> 16) & mask; + out[7] = w2 >> 40; + out[8] = (w3)&mask; + out[9] = (w3 >> 24) & mask; + out[10] = ((w3 >> 48) | (w4 << 16)) & mask; + out[11] = (w4 >> 8) & mask; + out[12] = (w4 >> 32) & mask; + out[13] = ((w4 >> 56) | (w5 << 8)) & mask; + out[14] = (w5 >> 16) & mask; + out[15] = w5 >> 40; + out[16] = (w6)&mask; + out[17] = (w6 >> 24) & mask; + out[18] = ((w6 >> 48) | (w7 << 16)) & mask; + out[19] = (w7 >> 8) & mask; + out[20] = (w7 >> 32) & mask; + out[21] = ((w7 >> 56) | (w8 << 8)) & mask; + out[22] = (w8 >> 16) & mask; + out[23] = w8 >> 40; + out[24] = (w9)&mask; + out[25] = (w9 >> 24) & mask; + out[26] = ((w9 >> 48) | (w10 << 16)) & mask; + out[27] = (w10 >> 8) & mask; + out[28] = (w10 >> 32) & mask; + out[29] = ((w10 >> 56) | (w11 << 8)) & mask; + out[30] = (w11 >> 16) & mask; + out[31] = w11 >> 40; + + return in; +} + +inline const uint8_t* unpack25_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 33554431ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 25) & mask; + out[2] = ((w0 >> 50) | (w1 << 14)) & mask; + out[3] = (w1 >> 11) & mask; + out[4] = (w1 >> 36) & mask; + out[5] = ((w1 >> 61) | (w2 << 3)) & mask; + out[6] = (w2 >> 22) & mask; + out[7] = ((w2 >> 47) | (w3 << 17)) & mask; + out[8] = (w3 >> 8) & mask; + out[9] = (w3 >> 33) & mask; + out[10] = ((w3 >> 58) | (w4 << 6)) & mask; + out[11] = (w4 >> 19) & mask; + out[12] = ((w4 >> 44) | (w5 << 20)) & mask; + out[13] = (w5 >> 5) & mask; + out[14] = (w5 >> 30) & mask; + out[15] = ((w5 >> 55) | (w6 << 9)) & mask; + out[16] = (w6 >> 16) & mask; + out[17] = ((w6 >> 41) | (w7 << 23)) & mask; + out[18] = (w7 >> 2) & mask; + out[19] = (w7 >> 27) & mask; + out[20] = ((w7 >> 52) | (w8 << 12)) & mask; + out[21] = (w8 >> 13) & mask; + out[22] = (w8 >> 38) & mask; + out[23] = ((w8 >> 63) | (w9 << 1)) & mask; + out[24] = (w9 >> 24) & mask; + out[25] = ((w9 >> 49) | (w10 << 15)) & mask; + out[26] = (w10 >> 10) & mask; + out[27] = (w10 >> 35) & mask; + out[28] = ((w10 >> 60) | (w11 << 4)) & mask; + out[29] = (w11 >> 21) & mask; + out[30] = ((w11 >> 46) | (w12 << 18)) & mask; + out[31] = (w12 >> 7) & mask; + + return in; +} + +inline const uint8_t* unpack26_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 67108863ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 26) & mask; + out[2] = ((w0 >> 52) | (w1 << 12)) & mask; + out[3] = (w1 >> 14) & mask; + out[4] = ((w1 >> 40) | (w2 << 24)) & mask; + out[5] = (w2 >> 2) & mask; + out[6] = (w2 >> 28) & mask; + out[7] = ((w2 >> 54) | (w3 << 10)) & mask; + out[8] = (w3 >> 16) & mask; + out[9] = ((w3 >> 42) | (w4 << 22)) & mask; + out[10] = (w4 >> 4) & mask; + out[11] = (w4 >> 30) & mask; + out[12] = ((w4 >> 56) | (w5 << 8)) & mask; + out[13] = (w5 >> 18) & mask; + out[14] = ((w5 >> 44) | (w6 << 20)) & mask; + out[15] = (w6 >> 6) & mask; + out[16] = (w6 >> 32) & mask; + out[17] = ((w6 >> 58) | (w7 << 6)) & mask; + out[18] = (w7 >> 20) & mask; + out[19] = ((w7 >> 46) | (w8 << 18)) & mask; + out[20] = (w8 >> 8) & mask; + out[21] = (w8 >> 34) & mask; + out[22] = ((w8 >> 60) | (w9 << 4)) & mask; + out[23] = (w9 >> 22) & mask; + out[24] = ((w9 >> 48) | (w10 << 16)) & mask; + out[25] = (w10 >> 10) & mask; + out[26] = (w10 >> 36) & mask; + out[27] = ((w10 >> 62) | (w11 << 2)) & mask; + out[28] = (w11 >> 24) & mask; + out[29] = ((w11 >> 50) | (w12 << 14)) & mask; + out[30] = (w12 >> 12) & mask; + out[31] = w12 >> 38; + + return in; +} + +inline const uint8_t* unpack27_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 134217727ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 27) & mask; + out[2] = ((w0 >> 54) | (w1 << 10)) & mask; + out[3] = (w1 >> 17) & mask; + out[4] = ((w1 >> 44) | (w2 << 20)) & mask; + out[5] = (w2 >> 7) & mask; + out[6] = (w2 >> 34) & mask; + out[7] = ((w2 >> 61) | (w3 << 3)) & mask; + out[8] = (w3 >> 24) & mask; + out[9] = ((w3 >> 51) | (w4 << 13)) & mask; + out[10] = (w4 >> 14) & mask; + out[11] = ((w4 >> 41) | (w5 << 23)) & mask; + out[12] = (w5 >> 4) & mask; + out[13] = (w5 >> 31) & mask; + out[14] = ((w5 >> 58) | (w6 << 6)) & mask; + out[15] = (w6 >> 21) & mask; + out[16] = ((w6 >> 48) | (w7 << 16)) & mask; + out[17] = (w7 >> 11) & mask; + out[18] = ((w7 >> 38) | (w8 << 26)) & mask; + out[19] = (w8 >> 1) & mask; + out[20] = (w8 >> 28) & mask; + out[21] = ((w8 >> 55) | (w9 << 9)) & mask; + out[22] = (w9 >> 18) & mask; + out[23] = ((w9 >> 45) | (w10 << 19)) & mask; + out[24] = (w10 >> 8) & mask; + out[25] = (w10 >> 35) & mask; + out[26] = ((w10 >> 62) | (w11 << 2)) & mask; + out[27] = (w11 >> 25) & mask; + out[28] = ((w11 >> 52) | (w12 << 12)) & mask; + out[29] = (w12 >> 15) & mask; + out[30] = ((w12 >> 42) | (w13 << 22)) & mask; + out[31] = (w13 >> 5) & mask; + + return in; +} + +inline const uint8_t* unpack28_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 268435455ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 28) & mask; + out[2] = ((w0 >> 56) | (w1 << 8)) & mask; + out[3] = (w1 >> 20) & mask; + out[4] = ((w1 >> 48) | (w2 << 16)) & mask; + out[5] = (w2 >> 12) & mask; + out[6] = ((w2 >> 40) | (w3 << 24)) & mask; + out[7] = (w3 >> 4) & mask; + out[8] = (w3 >> 32) & mask; + out[9] = ((w3 >> 60) | (w4 << 4)) & mask; + out[10] = (w4 >> 24) & mask; + out[11] = ((w4 >> 52) | (w5 << 12)) & mask; + out[12] = (w5 >> 16) & mask; + out[13] = ((w5 >> 44) | (w6 << 20)) & mask; + out[14] = (w6 >> 8) & mask; + out[15] = w6 >> 36; + out[16] = (w7)&mask; + out[17] = (w7 >> 28) & mask; + out[18] = ((w7 >> 56) | (w8 << 8)) & mask; + out[19] = (w8 >> 20) & mask; + out[20] = ((w8 >> 48) | (w9 << 16)) & mask; + out[21] = (w9 >> 12) & mask; + out[22] = ((w9 >> 40) | (w10 << 24)) & mask; + out[23] = (w10 >> 4) & mask; + out[24] = (w10 >> 32) & mask; + out[25] = ((w10 >> 60) | (w11 << 4)) & mask; + out[26] = (w11 >> 24) & mask; + out[27] = ((w11 >> 52) | (w12 << 12)) & mask; + out[28] = (w12 >> 16) & mask; + out[29] = ((w12 >> 44) | (w13 << 20)) & mask; + out[30] = (w13 >> 8) & mask; + out[31] = w13 >> 36; + + return in; +} + +inline const uint8_t* unpack29_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 536870911ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 29) & mask; + out[2] = ((w0 >> 58) | (w1 << 6)) & mask; + out[3] = (w1 >> 23) & mask; + out[4] = ((w1 >> 52) | (w2 << 12)) & mask; + out[5] = (w2 >> 17) & mask; + out[6] = ((w2 >> 46) | (w3 << 18)) & mask; + out[7] = (w3 >> 11) & mask; + out[8] = ((w3 >> 40) | (w4 << 24)) & mask; + out[9] = (w4 >> 5) & mask; + out[10] = (w4 >> 34) & mask; + out[11] = ((w4 >> 63) | (w5 << 1)) & mask; + out[12] = (w5 >> 28) & mask; + out[13] = ((w5 >> 57) | (w6 << 7)) & mask; + out[14] = (w6 >> 22) & mask; + out[15] = ((w6 >> 51) | (w7 << 13)) & mask; + out[16] = (w7 >> 16) & mask; + out[17] = ((w7 >> 45) | (w8 << 19)) & mask; + out[18] = (w8 >> 10) & mask; + out[19] = ((w8 >> 39) | (w9 << 25)) & mask; + out[20] = (w9 >> 4) & mask; + out[21] = (w9 >> 33) & mask; + out[22] = ((w9 >> 62) | (w10 << 2)) & mask; + out[23] = (w10 >> 27) & mask; + out[24] = ((w10 >> 56) | (w11 << 8)) & mask; + out[25] = (w11 >> 21) & mask; + out[26] = ((w11 >> 50) | (w12 << 14)) & mask; + out[27] = (w12 >> 15) & mask; + out[28] = ((w12 >> 44) | (w13 << 20)) & mask; + out[29] = (w13 >> 9) & mask; + out[30] = ((w13 >> 38) | (w14 << 26)) & mask; + out[31] = (w14 >> 3) & mask; + + return in; +} + +inline const uint8_t* unpack30_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1073741823ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 30) & mask; + out[2] = ((w0 >> 60) | (w1 << 4)) & mask; + out[3] = (w1 >> 26) & mask; + out[4] = ((w1 >> 56) | (w2 << 8)) & mask; + out[5] = (w2 >> 22) & mask; + out[6] = ((w2 >> 52) | (w3 << 12)) & mask; + out[7] = (w3 >> 18) & mask; + out[8] = ((w3 >> 48) | (w4 << 16)) & mask; + out[9] = (w4 >> 14) & mask; + out[10] = ((w4 >> 44) | (w5 << 20)) & mask; + out[11] = (w5 >> 10) & mask; + out[12] = ((w5 >> 40) | (w6 << 24)) & mask; + out[13] = (w6 >> 6) & mask; + out[14] = ((w6 >> 36) | (w7 << 28)) & mask; + out[15] = (w7 >> 2) & mask; + out[16] = (w7 >> 32) & mask; + out[17] = ((w7 >> 62) | (w8 << 2)) & mask; + out[18] = (w8 >> 28) & mask; + out[19] = ((w8 >> 58) | (w9 << 6)) & mask; + out[20] = (w9 >> 24) & mask; + out[21] = ((w9 >> 54) | (w10 << 10)) & mask; + out[22] = (w10 >> 20) & mask; + out[23] = ((w10 >> 50) | (w11 << 14)) & mask; + out[24] = (w11 >> 16) & mask; + out[25] = ((w11 >> 46) | (w12 << 18)) & mask; + out[26] = (w12 >> 12) & mask; + out[27] = ((w12 >> 42) | (w13 << 22)) & mask; + out[28] = (w13 >> 8) & mask; + out[29] = ((w13 >> 38) | (w14 << 26)) & mask; + out[30] = (w14 >> 4) & mask; + out[31] = w14 >> 34; + + return in; +} + +inline const uint8_t* unpack31_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2147483647ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 31) & mask; + out[2] = ((w0 >> 62) | (w1 << 2)) & mask; + out[3] = (w1 >> 29) & mask; + out[4] = ((w1 >> 60) | (w2 << 4)) & mask; + out[5] = (w2 >> 27) & mask; + out[6] = ((w2 >> 58) | (w3 << 6)) & mask; + out[7] = (w3 >> 25) & mask; + out[8] = ((w3 >> 56) | (w4 << 8)) & mask; + out[9] = (w4 >> 23) & mask; + out[10] = ((w4 >> 54) | (w5 << 10)) & mask; + out[11] = (w5 >> 21) & mask; + out[12] = ((w5 >> 52) | (w6 << 12)) & mask; + out[13] = (w6 >> 19) & mask; + out[14] = ((w6 >> 50) | (w7 << 14)) & mask; + out[15] = (w7 >> 17) & mask; + out[16] = ((w7 >> 48) | (w8 << 16)) & mask; + out[17] = (w8 >> 15) & mask; + out[18] = ((w8 >> 46) | (w9 << 18)) & mask; + out[19] = (w9 >> 13) & mask; + out[20] = ((w9 >> 44) | (w10 << 20)) & mask; + out[21] = (w10 >> 11) & mask; + out[22] = ((w10 >> 42) | (w11 << 22)) & mask; + out[23] = (w11 >> 9) & mask; + out[24] = ((w11 >> 40) | (w12 << 24)) & mask; + out[25] = (w12 >> 7) & mask; + out[26] = ((w12 >> 38) | (w13 << 26)) & mask; + out[27] = (w13 >> 5) & mask; + out[28] = ((w13 >> 36) | (w14 << 28)) & mask; + out[29] = (w14 >> 3) & mask; + out[30] = ((w14 >> 34) | (w15 << 30)) & mask; + out[31] = (w15 >> 1) & mask; + + return in; +} + +inline const uint8_t* unpack32_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4294967295ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + out[0] = (w0)&mask; + out[1] = w0 >> 32; + out[2] = (w1)&mask; + out[3] = w1 >> 32; + out[4] = (w2)&mask; + out[5] = w2 >> 32; + out[6] = (w3)&mask; + out[7] = w3 >> 32; + out[8] = (w4)&mask; + out[9] = w4 >> 32; + out[10] = (w5)&mask; + out[11] = w5 >> 32; + out[12] = (w6)&mask; + out[13] = w6 >> 32; + out[14] = (w7)&mask; + out[15] = w7 >> 32; + out[16] = (w8)&mask; + out[17] = w8 >> 32; + out[18] = (w9)&mask; + out[19] = w9 >> 32; + out[20] = (w10)&mask; + out[21] = w10 >> 32; + out[22] = (w11)&mask; + out[23] = w11 >> 32; + out[24] = (w12)&mask; + out[25] = w12 >> 32; + out[26] = (w13)&mask; + out[27] = w13 >> 32; + out[28] = (w14)&mask; + out[29] = w14 >> 32; + out[30] = (w15)&mask; + out[31] = w15 >> 32; + + return in; +} + +inline const uint8_t* unpack33_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8589934591ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 33) | (w1 << 31)) & mask; + out[2] = (w1 >> 2) & mask; + out[3] = ((w1 >> 35) | (w2 << 29)) & mask; + out[4] = (w2 >> 4) & mask; + out[5] = ((w2 >> 37) | (w3 << 27)) & mask; + out[6] = (w3 >> 6) & mask; + out[7] = ((w3 >> 39) | (w4 << 25)) & mask; + out[8] = (w4 >> 8) & mask; + out[9] = ((w4 >> 41) | (w5 << 23)) & mask; + out[10] = (w5 >> 10) & mask; + out[11] = ((w5 >> 43) | (w6 << 21)) & mask; + out[12] = (w6 >> 12) & mask; + out[13] = ((w6 >> 45) | (w7 << 19)) & mask; + out[14] = (w7 >> 14) & mask; + out[15] = ((w7 >> 47) | (w8 << 17)) & mask; + out[16] = (w8 >> 16) & mask; + out[17] = ((w8 >> 49) | (w9 << 15)) & mask; + out[18] = (w9 >> 18) & mask; + out[19] = ((w9 >> 51) | (w10 << 13)) & mask; + out[20] = (w10 >> 20) & mask; + out[21] = ((w10 >> 53) | (w11 << 11)) & mask; + out[22] = (w11 >> 22) & mask; + out[23] = ((w11 >> 55) | (w12 << 9)) & mask; + out[24] = (w12 >> 24) & mask; + out[25] = ((w12 >> 57) | (w13 << 7)) & mask; + out[26] = (w13 >> 26) & mask; + out[27] = ((w13 >> 59) | (w14 << 5)) & mask; + out[28] = (w14 >> 28) & mask; + out[29] = ((w14 >> 61) | (w15 << 3)) & mask; + out[30] = (w15 >> 30) & mask; + out[31] = ((w15 >> 63) | (w16 << 1)) & mask; + + return in; +} + +inline const uint8_t* unpack34_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 17179869183ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 34) | (w1 << 30)) & mask; + out[2] = (w1 >> 4) & mask; + out[3] = ((w1 >> 38) | (w2 << 26)) & mask; + out[4] = (w2 >> 8) & mask; + out[5] = ((w2 >> 42) | (w3 << 22)) & mask; + out[6] = (w3 >> 12) & mask; + out[7] = ((w3 >> 46) | (w4 << 18)) & mask; + out[8] = (w4 >> 16) & mask; + out[9] = ((w4 >> 50) | (w5 << 14)) & mask; + out[10] = (w5 >> 20) & mask; + out[11] = ((w5 >> 54) | (w6 << 10)) & mask; + out[12] = (w6 >> 24) & mask; + out[13] = ((w6 >> 58) | (w7 << 6)) & mask; + out[14] = (w7 >> 28) & mask; + out[15] = ((w7 >> 62) | (w8 << 2)) & mask; + out[16] = ((w8 >> 32) | (w9 << 32)) & mask; + out[17] = (w9 >> 2) & mask; + out[18] = ((w9 >> 36) | (w10 << 28)) & mask; + out[19] = (w10 >> 6) & mask; + out[20] = ((w10 >> 40) | (w11 << 24)) & mask; + out[21] = (w11 >> 10) & mask; + out[22] = ((w11 >> 44) | (w12 << 20)) & mask; + out[23] = (w12 >> 14) & mask; + out[24] = ((w12 >> 48) | (w13 << 16)) & mask; + out[25] = (w13 >> 18) & mask; + out[26] = ((w13 >> 52) | (w14 << 12)) & mask; + out[27] = (w14 >> 22) & mask; + out[28] = ((w14 >> 56) | (w15 << 8)) & mask; + out[29] = (w15 >> 26) & mask; + out[30] = ((w15 >> 60) | (w16 << 4)) & mask; + out[31] = w16 >> 30; + + return in; +} + +inline const uint8_t* unpack35_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 34359738367ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 35) | (w1 << 29)) & mask; + out[2] = (w1 >> 6) & mask; + out[3] = ((w1 >> 41) | (w2 << 23)) & mask; + out[4] = (w2 >> 12) & mask; + out[5] = ((w2 >> 47) | (w3 << 17)) & mask; + out[6] = (w3 >> 18) & mask; + out[7] = ((w3 >> 53) | (w4 << 11)) & mask; + out[8] = (w4 >> 24) & mask; + out[9] = ((w4 >> 59) | (w5 << 5)) & mask; + out[10] = ((w5 >> 30) | (w6 << 34)) & mask; + out[11] = (w6 >> 1) & mask; + out[12] = ((w6 >> 36) | (w7 << 28)) & mask; + out[13] = (w7 >> 7) & mask; + out[14] = ((w7 >> 42) | (w8 << 22)) & mask; + out[15] = (w8 >> 13) & mask; + out[16] = ((w8 >> 48) | (w9 << 16)) & mask; + out[17] = (w9 >> 19) & mask; + out[18] = ((w9 >> 54) | (w10 << 10)) & mask; + out[19] = (w10 >> 25) & mask; + out[20] = ((w10 >> 60) | (w11 << 4)) & mask; + out[21] = ((w11 >> 31) | (w12 << 33)) & mask; + out[22] = (w12 >> 2) & mask; + out[23] = ((w12 >> 37) | (w13 << 27)) & mask; + out[24] = (w13 >> 8) & mask; + out[25] = ((w13 >> 43) | (w14 << 21)) & mask; + out[26] = (w14 >> 14) & mask; + out[27] = ((w14 >> 49) | (w15 << 15)) & mask; + out[28] = (w15 >> 20) & mask; + out[29] = ((w15 >> 55) | (w16 << 9)) & mask; + out[30] = (w16 >> 26) & mask; + out[31] = ((w16 >> 61) | (w17 << 3)) & mask; + + return in; +} + +inline const uint8_t* unpack36_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 68719476735ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 36) | (w1 << 28)) & mask; + out[2] = (w1 >> 8) & mask; + out[3] = ((w1 >> 44) | (w2 << 20)) & mask; + out[4] = (w2 >> 16) & mask; + out[5] = ((w2 >> 52) | (w3 << 12)) & mask; + out[6] = (w3 >> 24) & mask; + out[7] = ((w3 >> 60) | (w4 << 4)) & mask; + out[8] = ((w4 >> 32) | (w5 << 32)) & mask; + out[9] = (w5 >> 4) & mask; + out[10] = ((w5 >> 40) | (w6 << 24)) & mask; + out[11] = (w6 >> 12) & mask; + out[12] = ((w6 >> 48) | (w7 << 16)) & mask; + out[13] = (w7 >> 20) & mask; + out[14] = ((w7 >> 56) | (w8 << 8)) & mask; + out[15] = w8 >> 28; + out[16] = (w9)&mask; + out[17] = ((w9 >> 36) | (w10 << 28)) & mask; + out[18] = (w10 >> 8) & mask; + out[19] = ((w10 >> 44) | (w11 << 20)) & mask; + out[20] = (w11 >> 16) & mask; + out[21] = ((w11 >> 52) | (w12 << 12)) & mask; + out[22] = (w12 >> 24) & mask; + out[23] = ((w12 >> 60) | (w13 << 4)) & mask; + out[24] = ((w13 >> 32) | (w14 << 32)) & mask; + out[25] = (w14 >> 4) & mask; + out[26] = ((w14 >> 40) | (w15 << 24)) & mask; + out[27] = (w15 >> 12) & mask; + out[28] = ((w15 >> 48) | (w16 << 16)) & mask; + out[29] = (w16 >> 20) & mask; + out[30] = ((w16 >> 56) | (w17 << 8)) & mask; + out[31] = w17 >> 28; + + return in; +} + +inline const uint8_t* unpack37_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 137438953471ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 37) | (w1 << 27)) & mask; + out[2] = (w1 >> 10) & mask; + out[3] = ((w1 >> 47) | (w2 << 17)) & mask; + out[4] = (w2 >> 20) & mask; + out[5] = ((w2 >> 57) | (w3 << 7)) & mask; + out[6] = ((w3 >> 30) | (w4 << 34)) & mask; + out[7] = (w4 >> 3) & mask; + out[8] = ((w4 >> 40) | (w5 << 24)) & mask; + out[9] = (w5 >> 13) & mask; + out[10] = ((w5 >> 50) | (w6 << 14)) & mask; + out[11] = (w6 >> 23) & mask; + out[12] = ((w6 >> 60) | (w7 << 4)) & mask; + out[13] = ((w7 >> 33) | (w8 << 31)) & mask; + out[14] = (w8 >> 6) & mask; + out[15] = ((w8 >> 43) | (w9 << 21)) & mask; + out[16] = (w9 >> 16) & mask; + out[17] = ((w9 >> 53) | (w10 << 11)) & mask; + out[18] = (w10 >> 26) & mask; + out[19] = ((w10 >> 63) | (w11 << 1)) & mask; + out[20] = ((w11 >> 36) | (w12 << 28)) & mask; + out[21] = (w12 >> 9) & mask; + out[22] = ((w12 >> 46) | (w13 << 18)) & mask; + out[23] = (w13 >> 19) & mask; + out[24] = ((w13 >> 56) | (w14 << 8)) & mask; + out[25] = ((w14 >> 29) | (w15 << 35)) & mask; + out[26] = (w15 >> 2) & mask; + out[27] = ((w15 >> 39) | (w16 << 25)) & mask; + out[28] = (w16 >> 12) & mask; + out[29] = ((w16 >> 49) | (w17 << 15)) & mask; + out[30] = (w17 >> 22) & mask; + out[31] = ((w17 >> 59) | (w18 << 5)) & mask; + + return in; +} + +inline const uint8_t* unpack38_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 274877906943ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 38) | (w1 << 26)) & mask; + out[2] = (w1 >> 12) & mask; + out[3] = ((w1 >> 50) | (w2 << 14)) & mask; + out[4] = (w2 >> 24) & mask; + out[5] = ((w2 >> 62) | (w3 << 2)) & mask; + out[6] = ((w3 >> 36) | (w4 << 28)) & mask; + out[7] = (w4 >> 10) & mask; + out[8] = ((w4 >> 48) | (w5 << 16)) & mask; + out[9] = (w5 >> 22) & mask; + out[10] = ((w5 >> 60) | (w6 << 4)) & mask; + out[11] = ((w6 >> 34) | (w7 << 30)) & mask; + out[12] = (w7 >> 8) & mask; + out[13] = ((w7 >> 46) | (w8 << 18)) & mask; + out[14] = (w8 >> 20) & mask; + out[15] = ((w8 >> 58) | (w9 << 6)) & mask; + out[16] = ((w9 >> 32) | (w10 << 32)) & mask; + out[17] = (w10 >> 6) & mask; + out[18] = ((w10 >> 44) | (w11 << 20)) & mask; + out[19] = (w11 >> 18) & mask; + out[20] = ((w11 >> 56) | (w12 << 8)) & mask; + out[21] = ((w12 >> 30) | (w13 << 34)) & mask; + out[22] = (w13 >> 4) & mask; + out[23] = ((w13 >> 42) | (w14 << 22)) & mask; + out[24] = (w14 >> 16) & mask; + out[25] = ((w14 >> 54) | (w15 << 10)) & mask; + out[26] = ((w15 >> 28) | (w16 << 36)) & mask; + out[27] = (w16 >> 2) & mask; + out[28] = ((w16 >> 40) | (w17 << 24)) & mask; + out[29] = (w17 >> 14) & mask; + out[30] = ((w17 >> 52) | (w18 << 12)) & mask; + out[31] = w18 >> 26; + + return in; +} + +inline const uint8_t* unpack39_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 549755813887ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 39) | (w1 << 25)) & mask; + out[2] = (w1 >> 14) & mask; + out[3] = ((w1 >> 53) | (w2 << 11)) & mask; + out[4] = ((w2 >> 28) | (w3 << 36)) & mask; + out[5] = (w3 >> 3) & mask; + out[6] = ((w3 >> 42) | (w4 << 22)) & mask; + out[7] = (w4 >> 17) & mask; + out[8] = ((w4 >> 56) | (w5 << 8)) & mask; + out[9] = ((w5 >> 31) | (w6 << 33)) & mask; + out[10] = (w6 >> 6) & mask; + out[11] = ((w6 >> 45) | (w7 << 19)) & mask; + out[12] = (w7 >> 20) & mask; + out[13] = ((w7 >> 59) | (w8 << 5)) & mask; + out[14] = ((w8 >> 34) | (w9 << 30)) & mask; + out[15] = (w9 >> 9) & mask; + out[16] = ((w9 >> 48) | (w10 << 16)) & mask; + out[17] = (w10 >> 23) & mask; + out[18] = ((w10 >> 62) | (w11 << 2)) & mask; + out[19] = ((w11 >> 37) | (w12 << 27)) & mask; + out[20] = (w12 >> 12) & mask; + out[21] = ((w12 >> 51) | (w13 << 13)) & mask; + out[22] = ((w13 >> 26) | (w14 << 38)) & mask; + out[23] = (w14 >> 1) & mask; + out[24] = ((w14 >> 40) | (w15 << 24)) & mask; + out[25] = (w15 >> 15) & mask; + out[26] = ((w15 >> 54) | (w16 << 10)) & mask; + out[27] = ((w16 >> 29) | (w17 << 35)) & mask; + out[28] = (w17 >> 4) & mask; + out[29] = ((w17 >> 43) | (w18 << 21)) & mask; + out[30] = (w18 >> 18) & mask; + out[31] = ((w18 >> 57) | (w19 << 7)) & mask; + + return in; +} + +inline const uint8_t* unpack40_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1099511627775ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 40) | (w1 << 24)) & mask; + out[2] = (w1 >> 16) & mask; + out[3] = ((w1 >> 56) | (w2 << 8)) & mask; + out[4] = ((w2 >> 32) | (w3 << 32)) & mask; + out[5] = (w3 >> 8) & mask; + out[6] = ((w3 >> 48) | (w4 << 16)) & mask; + out[7] = w4 >> 24; + out[8] = (w5)&mask; + out[9] = ((w5 >> 40) | (w6 << 24)) & mask; + out[10] = (w6 >> 16) & mask; + out[11] = ((w6 >> 56) | (w7 << 8)) & mask; + out[12] = ((w7 >> 32) | (w8 << 32)) & mask; + out[13] = (w8 >> 8) & mask; + out[14] = ((w8 >> 48) | (w9 << 16)) & mask; + out[15] = w9 >> 24; + out[16] = (w10)&mask; + out[17] = ((w10 >> 40) | (w11 << 24)) & mask; + out[18] = (w11 >> 16) & mask; + out[19] = ((w11 >> 56) | (w12 << 8)) & mask; + out[20] = ((w12 >> 32) | (w13 << 32)) & mask; + out[21] = (w13 >> 8) & mask; + out[22] = ((w13 >> 48) | (w14 << 16)) & mask; + out[23] = w14 >> 24; + out[24] = (w15)&mask; + out[25] = ((w15 >> 40) | (w16 << 24)) & mask; + out[26] = (w16 >> 16) & mask; + out[27] = ((w16 >> 56) | (w17 << 8)) & mask; + out[28] = ((w17 >> 32) | (w18 << 32)) & mask; + out[29] = (w18 >> 8) & mask; + out[30] = ((w18 >> 48) | (w19 << 16)) & mask; + out[31] = w19 >> 24; + + return in; +} + +inline const uint8_t* unpack41_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2199023255551ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 41) | (w1 << 23)) & mask; + out[2] = (w1 >> 18) & mask; + out[3] = ((w1 >> 59) | (w2 << 5)) & mask; + out[4] = ((w2 >> 36) | (w3 << 28)) & mask; + out[5] = (w3 >> 13) & mask; + out[6] = ((w3 >> 54) | (w4 << 10)) & mask; + out[7] = ((w4 >> 31) | (w5 << 33)) & mask; + out[8] = (w5 >> 8) & mask; + out[9] = ((w5 >> 49) | (w6 << 15)) & mask; + out[10] = ((w6 >> 26) | (w7 << 38)) & mask; + out[11] = (w7 >> 3) & mask; + out[12] = ((w7 >> 44) | (w8 << 20)) & mask; + out[13] = (w8 >> 21) & mask; + out[14] = ((w8 >> 62) | (w9 << 2)) & mask; + out[15] = ((w9 >> 39) | (w10 << 25)) & mask; + out[16] = (w10 >> 16) & mask; + out[17] = ((w10 >> 57) | (w11 << 7)) & mask; + out[18] = ((w11 >> 34) | (w12 << 30)) & mask; + out[19] = (w12 >> 11) & mask; + out[20] = ((w12 >> 52) | (w13 << 12)) & mask; + out[21] = ((w13 >> 29) | (w14 << 35)) & mask; + out[22] = (w14 >> 6) & mask; + out[23] = ((w14 >> 47) | (w15 << 17)) & mask; + out[24] = ((w15 >> 24) | (w16 << 40)) & mask; + out[25] = (w16 >> 1) & mask; + out[26] = ((w16 >> 42) | (w17 << 22)) & mask; + out[27] = (w17 >> 19) & mask; + out[28] = ((w17 >> 60) | (w18 << 4)) & mask; + out[29] = ((w18 >> 37) | (w19 << 27)) & mask; + out[30] = (w19 >> 14) & mask; + out[31] = ((w19 >> 55) | (w20 << 9)) & mask; + + return in; +} + +inline const uint8_t* unpack42_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4398046511103ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 42) | (w1 << 22)) & mask; + out[2] = (w1 >> 20) & mask; + out[3] = ((w1 >> 62) | (w2 << 2)) & mask; + out[4] = ((w2 >> 40) | (w3 << 24)) & mask; + out[5] = (w3 >> 18) & mask; + out[6] = ((w3 >> 60) | (w4 << 4)) & mask; + out[7] = ((w4 >> 38) | (w5 << 26)) & mask; + out[8] = (w5 >> 16) & mask; + out[9] = ((w5 >> 58) | (w6 << 6)) & mask; + out[10] = ((w6 >> 36) | (w7 << 28)) & mask; + out[11] = (w7 >> 14) & mask; + out[12] = ((w7 >> 56) | (w8 << 8)) & mask; + out[13] = ((w8 >> 34) | (w9 << 30)) & mask; + out[14] = (w9 >> 12) & mask; + out[15] = ((w9 >> 54) | (w10 << 10)) & mask; + out[16] = ((w10 >> 32) | (w11 << 32)) & mask; + out[17] = (w11 >> 10) & mask; + out[18] = ((w11 >> 52) | (w12 << 12)) & mask; + out[19] = ((w12 >> 30) | (w13 << 34)) & mask; + out[20] = (w13 >> 8) & mask; + out[21] = ((w13 >> 50) | (w14 << 14)) & mask; + out[22] = ((w14 >> 28) | (w15 << 36)) & mask; + out[23] = (w15 >> 6) & mask; + out[24] = ((w15 >> 48) | (w16 << 16)) & mask; + out[25] = ((w16 >> 26) | (w17 << 38)) & mask; + out[26] = (w17 >> 4) & mask; + out[27] = ((w17 >> 46) | (w18 << 18)) & mask; + out[28] = ((w18 >> 24) | (w19 << 40)) & mask; + out[29] = (w19 >> 2) & mask; + out[30] = ((w19 >> 44) | (w20 << 20)) & mask; + out[31] = w20 >> 22; + + return in; +} + +inline const uint8_t* unpack43_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8796093022207ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 43) | (w1 << 21)) & mask; + out[2] = ((w1 >> 22) | (w2 << 42)) & mask; + out[3] = (w2 >> 1) & mask; + out[4] = ((w2 >> 44) | (w3 << 20)) & mask; + out[5] = ((w3 >> 23) | (w4 << 41)) & mask; + out[6] = (w4 >> 2) & mask; + out[7] = ((w4 >> 45) | (w5 << 19)) & mask; + out[8] = ((w5 >> 24) | (w6 << 40)) & mask; + out[9] = (w6 >> 3) & mask; + out[10] = ((w6 >> 46) | (w7 << 18)) & mask; + out[11] = ((w7 >> 25) | (w8 << 39)) & mask; + out[12] = (w8 >> 4) & mask; + out[13] = ((w8 >> 47) | (w9 << 17)) & mask; + out[14] = ((w9 >> 26) | (w10 << 38)) & mask; + out[15] = (w10 >> 5) & mask; + out[16] = ((w10 >> 48) | (w11 << 16)) & mask; + out[17] = ((w11 >> 27) | (w12 << 37)) & mask; + out[18] = (w12 >> 6) & mask; + out[19] = ((w12 >> 49) | (w13 << 15)) & mask; + out[20] = ((w13 >> 28) | (w14 << 36)) & mask; + out[21] = (w14 >> 7) & mask; + out[22] = ((w14 >> 50) | (w15 << 14)) & mask; + out[23] = ((w15 >> 29) | (w16 << 35)) & mask; + out[24] = (w16 >> 8) & mask; + out[25] = ((w16 >> 51) | (w17 << 13)) & mask; + out[26] = ((w17 >> 30) | (w18 << 34)) & mask; + out[27] = (w18 >> 9) & mask; + out[28] = ((w18 >> 52) | (w19 << 12)) & mask; + out[29] = ((w19 >> 31) | (w20 << 33)) & mask; + out[30] = (w20 >> 10) & mask; + out[31] = ((w20 >> 53) | (w21 << 11)) & mask; + + return in; +} + +inline const uint8_t* unpack44_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 17592186044415ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 44) | (w1 << 20)) & mask; + out[2] = ((w1 >> 24) | (w2 << 40)) & mask; + out[3] = (w2 >> 4) & mask; + out[4] = ((w2 >> 48) | (w3 << 16)) & mask; + out[5] = ((w3 >> 28) | (w4 << 36)) & mask; + out[6] = (w4 >> 8) & mask; + out[7] = ((w4 >> 52) | (w5 << 12)) & mask; + out[8] = ((w5 >> 32) | (w6 << 32)) & mask; + out[9] = (w6 >> 12) & mask; + out[10] = ((w6 >> 56) | (w7 << 8)) & mask; + out[11] = ((w7 >> 36) | (w8 << 28)) & mask; + out[12] = (w8 >> 16) & mask; + out[13] = ((w8 >> 60) | (w9 << 4)) & mask; + out[14] = ((w9 >> 40) | (w10 << 24)) & mask; + out[15] = w10 >> 20; + out[16] = (w11)&mask; + out[17] = ((w11 >> 44) | (w12 << 20)) & mask; + out[18] = ((w12 >> 24) | (w13 << 40)) & mask; + out[19] = (w13 >> 4) & mask; + out[20] = ((w13 >> 48) | (w14 << 16)) & mask; + out[21] = ((w14 >> 28) | (w15 << 36)) & mask; + out[22] = (w15 >> 8) & mask; + out[23] = ((w15 >> 52) | (w16 << 12)) & mask; + out[24] = ((w16 >> 32) | (w17 << 32)) & mask; + out[25] = (w17 >> 12) & mask; + out[26] = ((w17 >> 56) | (w18 << 8)) & mask; + out[27] = ((w18 >> 36) | (w19 << 28)) & mask; + out[28] = (w19 >> 16) & mask; + out[29] = ((w19 >> 60) | (w20 << 4)) & mask; + out[30] = ((w20 >> 40) | (w21 << 24)) & mask; + out[31] = w21 >> 20; + + return in; +} + +inline const uint8_t* unpack45_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 35184372088831ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 45) | (w1 << 19)) & mask; + out[2] = ((w1 >> 26) | (w2 << 38)) & mask; + out[3] = (w2 >> 7) & mask; + out[4] = ((w2 >> 52) | (w3 << 12)) & mask; + out[5] = ((w3 >> 33) | (w4 << 31)) & mask; + out[6] = (w4 >> 14) & mask; + out[7] = ((w4 >> 59) | (w5 << 5)) & mask; + out[8] = ((w5 >> 40) | (w6 << 24)) & mask; + out[9] = ((w6 >> 21) | (w7 << 43)) & mask; + out[10] = (w7 >> 2) & mask; + out[11] = ((w7 >> 47) | (w8 << 17)) & mask; + out[12] = ((w8 >> 28) | (w9 << 36)) & mask; + out[13] = (w9 >> 9) & mask; + out[14] = ((w9 >> 54) | (w10 << 10)) & mask; + out[15] = ((w10 >> 35) | (w11 << 29)) & mask; + out[16] = (w11 >> 16) & mask; + out[17] = ((w11 >> 61) | (w12 << 3)) & mask; + out[18] = ((w12 >> 42) | (w13 << 22)) & mask; + out[19] = ((w13 >> 23) | (w14 << 41)) & mask; + out[20] = (w14 >> 4) & mask; + out[21] = ((w14 >> 49) | (w15 << 15)) & mask; + out[22] = ((w15 >> 30) | (w16 << 34)) & mask; + out[23] = (w16 >> 11) & mask; + out[24] = ((w16 >> 56) | (w17 << 8)) & mask; + out[25] = ((w17 >> 37) | (w18 << 27)) & mask; + out[26] = (w18 >> 18) & mask; + out[27] = ((w18 >> 63) | (w19 << 1)) & mask; + out[28] = ((w19 >> 44) | (w20 << 20)) & mask; + out[29] = ((w20 >> 25) | (w21 << 39)) & mask; + out[30] = (w21 >> 6) & mask; + out[31] = ((w21 >> 51) | (w22 << 13)) & mask; + + return in; +} + +inline const uint8_t* unpack46_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 70368744177663ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 46) | (w1 << 18)) & mask; + out[2] = ((w1 >> 28) | (w2 << 36)) & mask; + out[3] = (w2 >> 10) & mask; + out[4] = ((w2 >> 56) | (w3 << 8)) & mask; + out[5] = ((w3 >> 38) | (w4 << 26)) & mask; + out[6] = ((w4 >> 20) | (w5 << 44)) & mask; + out[7] = (w5 >> 2) & mask; + out[8] = ((w5 >> 48) | (w6 << 16)) & mask; + out[9] = ((w6 >> 30) | (w7 << 34)) & mask; + out[10] = (w7 >> 12) & mask; + out[11] = ((w7 >> 58) | (w8 << 6)) & mask; + out[12] = ((w8 >> 40) | (w9 << 24)) & mask; + out[13] = ((w9 >> 22) | (w10 << 42)) & mask; + out[14] = (w10 >> 4) & mask; + out[15] = ((w10 >> 50) | (w11 << 14)) & mask; + out[16] = ((w11 >> 32) | (w12 << 32)) & mask; + out[17] = (w12 >> 14) & mask; + out[18] = ((w12 >> 60) | (w13 << 4)) & mask; + out[19] = ((w13 >> 42) | (w14 << 22)) & mask; + out[20] = ((w14 >> 24) | (w15 << 40)) & mask; + out[21] = (w15 >> 6) & mask; + out[22] = ((w15 >> 52) | (w16 << 12)) & mask; + out[23] = ((w16 >> 34) | (w17 << 30)) & mask; + out[24] = (w17 >> 16) & mask; + out[25] = ((w17 >> 62) | (w18 << 2)) & mask; + out[26] = ((w18 >> 44) | (w19 << 20)) & mask; + out[27] = ((w19 >> 26) | (w20 << 38)) & mask; + out[28] = (w20 >> 8) & mask; + out[29] = ((w20 >> 54) | (w21 << 10)) & mask; + out[30] = ((w21 >> 36) | (w22 << 28)) & mask; + out[31] = w22 >> 18; + + return in; +} + +inline const uint8_t* unpack47_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 140737488355327ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 47) | (w1 << 17)) & mask; + out[2] = ((w1 >> 30) | (w2 << 34)) & mask; + out[3] = (w2 >> 13) & mask; + out[4] = ((w2 >> 60) | (w3 << 4)) & mask; + out[5] = ((w3 >> 43) | (w4 << 21)) & mask; + out[6] = ((w4 >> 26) | (w5 << 38)) & mask; + out[7] = (w5 >> 9) & mask; + out[8] = ((w5 >> 56) | (w6 << 8)) & mask; + out[9] = ((w6 >> 39) | (w7 << 25)) & mask; + out[10] = ((w7 >> 22) | (w8 << 42)) & mask; + out[11] = (w8 >> 5) & mask; + out[12] = ((w8 >> 52) | (w9 << 12)) & mask; + out[13] = ((w9 >> 35) | (w10 << 29)) & mask; + out[14] = ((w10 >> 18) | (w11 << 46)) & mask; + out[15] = (w11 >> 1) & mask; + out[16] = ((w11 >> 48) | (w12 << 16)) & mask; + out[17] = ((w12 >> 31) | (w13 << 33)) & mask; + out[18] = (w13 >> 14) & mask; + out[19] = ((w13 >> 61) | (w14 << 3)) & mask; + out[20] = ((w14 >> 44) | (w15 << 20)) & mask; + out[21] = ((w15 >> 27) | (w16 << 37)) & mask; + out[22] = (w16 >> 10) & mask; + out[23] = ((w16 >> 57) | (w17 << 7)) & mask; + out[24] = ((w17 >> 40) | (w18 << 24)) & mask; + out[25] = ((w18 >> 23) | (w19 << 41)) & mask; + out[26] = (w19 >> 6) & mask; + out[27] = ((w19 >> 53) | (w20 << 11)) & mask; + out[28] = ((w20 >> 36) | (w21 << 28)) & mask; + out[29] = ((w21 >> 19) | (w22 << 45)) & mask; + out[30] = (w22 >> 2) & mask; + out[31] = ((w22 >> 49) | (w23 << 15)) & mask; + + return in; +} + +inline const uint8_t* unpack48_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 281474976710655ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 48) | (w1 << 16)) & mask; + out[2] = ((w1 >> 32) | (w2 << 32)) & mask; + out[3] = w2 >> 16; + out[4] = (w3)&mask; + out[5] = ((w3 >> 48) | (w4 << 16)) & mask; + out[6] = ((w4 >> 32) | (w5 << 32)) & mask; + out[7] = w5 >> 16; + out[8] = (w6)&mask; + out[9] = ((w6 >> 48) | (w7 << 16)) & mask; + out[10] = ((w7 >> 32) | (w8 << 32)) & mask; + out[11] = w8 >> 16; + out[12] = (w9)&mask; + out[13] = ((w9 >> 48) | (w10 << 16)) & mask; + out[14] = ((w10 >> 32) | (w11 << 32)) & mask; + out[15] = w11 >> 16; + out[16] = (w12)&mask; + out[17] = ((w12 >> 48) | (w13 << 16)) & mask; + out[18] = ((w13 >> 32) | (w14 << 32)) & mask; + out[19] = w14 >> 16; + out[20] = (w15)&mask; + out[21] = ((w15 >> 48) | (w16 << 16)) & mask; + out[22] = ((w16 >> 32) | (w17 << 32)) & mask; + out[23] = w17 >> 16; + out[24] = (w18)&mask; + out[25] = ((w18 >> 48) | (w19 << 16)) & mask; + out[26] = ((w19 >> 32) | (w20 << 32)) & mask; + out[27] = w20 >> 16; + out[28] = (w21)&mask; + out[29] = ((w21 >> 48) | (w22 << 16)) & mask; + out[30] = ((w22 >> 32) | (w23 << 32)) & mask; + out[31] = w23 >> 16; + + return in; +} + +inline const uint8_t* unpack49_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 562949953421311ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 49) | (w1 << 15)) & mask; + out[2] = ((w1 >> 34) | (w2 << 30)) & mask; + out[3] = ((w2 >> 19) | (w3 << 45)) & mask; + out[4] = (w3 >> 4) & mask; + out[5] = ((w3 >> 53) | (w4 << 11)) & mask; + out[6] = ((w4 >> 38) | (w5 << 26)) & mask; + out[7] = ((w5 >> 23) | (w6 << 41)) & mask; + out[8] = (w6 >> 8) & mask; + out[9] = ((w6 >> 57) | (w7 << 7)) & mask; + out[10] = ((w7 >> 42) | (w8 << 22)) & mask; + out[11] = ((w8 >> 27) | (w9 << 37)) & mask; + out[12] = (w9 >> 12) & mask; + out[13] = ((w9 >> 61) | (w10 << 3)) & mask; + out[14] = ((w10 >> 46) | (w11 << 18)) & mask; + out[15] = ((w11 >> 31) | (w12 << 33)) & mask; + out[16] = ((w12 >> 16) | (w13 << 48)) & mask; + out[17] = (w13 >> 1) & mask; + out[18] = ((w13 >> 50) | (w14 << 14)) & mask; + out[19] = ((w14 >> 35) | (w15 << 29)) & mask; + out[20] = ((w15 >> 20) | (w16 << 44)) & mask; + out[21] = (w16 >> 5) & mask; + out[22] = ((w16 >> 54) | (w17 << 10)) & mask; + out[23] = ((w17 >> 39) | (w18 << 25)) & mask; + out[24] = ((w18 >> 24) | (w19 << 40)) & mask; + out[25] = (w19 >> 9) & mask; + out[26] = ((w19 >> 58) | (w20 << 6)) & mask; + out[27] = ((w20 >> 43) | (w21 << 21)) & mask; + out[28] = ((w21 >> 28) | (w22 << 36)) & mask; + out[29] = (w22 >> 13) & mask; + out[30] = ((w22 >> 62) | (w23 << 2)) & mask; + out[31] = ((w23 >> 47) | (w24 << 17)) & mask; + + return in; +} + +inline const uint8_t* unpack50_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1125899906842623ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 50) | (w1 << 14)) & mask; + out[2] = ((w1 >> 36) | (w2 << 28)) & mask; + out[3] = ((w2 >> 22) | (w3 << 42)) & mask; + out[4] = (w3 >> 8) & mask; + out[5] = ((w3 >> 58) | (w4 << 6)) & mask; + out[6] = ((w4 >> 44) | (w5 << 20)) & mask; + out[7] = ((w5 >> 30) | (w6 << 34)) & mask; + out[8] = ((w6 >> 16) | (w7 << 48)) & mask; + out[9] = (w7 >> 2) & mask; + out[10] = ((w7 >> 52) | (w8 << 12)) & mask; + out[11] = ((w8 >> 38) | (w9 << 26)) & mask; + out[12] = ((w9 >> 24) | (w10 << 40)) & mask; + out[13] = (w10 >> 10) & mask; + out[14] = ((w10 >> 60) | (w11 << 4)) & mask; + out[15] = ((w11 >> 46) | (w12 << 18)) & mask; + out[16] = ((w12 >> 32) | (w13 << 32)) & mask; + out[17] = ((w13 >> 18) | (w14 << 46)) & mask; + out[18] = (w14 >> 4) & mask; + out[19] = ((w14 >> 54) | (w15 << 10)) & mask; + out[20] = ((w15 >> 40) | (w16 << 24)) & mask; + out[21] = ((w16 >> 26) | (w17 << 38)) & mask; + out[22] = (w17 >> 12) & mask; + out[23] = ((w17 >> 62) | (w18 << 2)) & mask; + out[24] = ((w18 >> 48) | (w19 << 16)) & mask; + out[25] = ((w19 >> 34) | (w20 << 30)) & mask; + out[26] = ((w20 >> 20) | (w21 << 44)) & mask; + out[27] = (w21 >> 6) & mask; + out[28] = ((w21 >> 56) | (w22 << 8)) & mask; + out[29] = ((w22 >> 42) | (w23 << 22)) & mask; + out[30] = ((w23 >> 28) | (w24 << 36)) & mask; + out[31] = w24 >> 14; + + return in; +} + +inline const uint8_t* unpack51_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2251799813685247ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 51) | (w1 << 13)) & mask; + out[2] = ((w1 >> 38) | (w2 << 26)) & mask; + out[3] = ((w2 >> 25) | (w3 << 39)) & mask; + out[4] = (w3 >> 12) & mask; + out[5] = ((w3 >> 63) | (w4 << 1)) & mask; + out[6] = ((w4 >> 50) | (w5 << 14)) & mask; + out[7] = ((w5 >> 37) | (w6 << 27)) & mask; + out[8] = ((w6 >> 24) | (w7 << 40)) & mask; + out[9] = (w7 >> 11) & mask; + out[10] = ((w7 >> 62) | (w8 << 2)) & mask; + out[11] = ((w8 >> 49) | (w9 << 15)) & mask; + out[12] = ((w9 >> 36) | (w10 << 28)) & mask; + out[13] = ((w10 >> 23) | (w11 << 41)) & mask; + out[14] = (w11 >> 10) & mask; + out[15] = ((w11 >> 61) | (w12 << 3)) & mask; + out[16] = ((w12 >> 48) | (w13 << 16)) & mask; + out[17] = ((w13 >> 35) | (w14 << 29)) & mask; + out[18] = ((w14 >> 22) | (w15 << 42)) & mask; + out[19] = (w15 >> 9) & mask; + out[20] = ((w15 >> 60) | (w16 << 4)) & mask; + out[21] = ((w16 >> 47) | (w17 << 17)) & mask; + out[22] = ((w17 >> 34) | (w18 << 30)) & mask; + out[23] = ((w18 >> 21) | (w19 << 43)) & mask; + out[24] = (w19 >> 8) & mask; + out[25] = ((w19 >> 59) | (w20 << 5)) & mask; + out[26] = ((w20 >> 46) | (w21 << 18)) & mask; + out[27] = ((w21 >> 33) | (w22 << 31)) & mask; + out[28] = ((w22 >> 20) | (w23 << 44)) & mask; + out[29] = (w23 >> 7) & mask; + out[30] = ((w23 >> 58) | (w24 << 6)) & mask; + out[31] = ((w24 >> 45) | (w25 << 19)) & mask; + + return in; +} + +inline const uint8_t* unpack52_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4503599627370495ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 52) | (w1 << 12)) & mask; + out[2] = ((w1 >> 40) | (w2 << 24)) & mask; + out[3] = ((w2 >> 28) | (w3 << 36)) & mask; + out[4] = ((w3 >> 16) | (w4 << 48)) & mask; + out[5] = (w4 >> 4) & mask; + out[6] = ((w4 >> 56) | (w5 << 8)) & mask; + out[7] = ((w5 >> 44) | (w6 << 20)) & mask; + out[8] = ((w6 >> 32) | (w7 << 32)) & mask; + out[9] = ((w7 >> 20) | (w8 << 44)) & mask; + out[10] = (w8 >> 8) & mask; + out[11] = ((w8 >> 60) | (w9 << 4)) & mask; + out[12] = ((w9 >> 48) | (w10 << 16)) & mask; + out[13] = ((w10 >> 36) | (w11 << 28)) & mask; + out[14] = ((w11 >> 24) | (w12 << 40)) & mask; + out[15] = w12 >> 12; + out[16] = (w13)&mask; + out[17] = ((w13 >> 52) | (w14 << 12)) & mask; + out[18] = ((w14 >> 40) | (w15 << 24)) & mask; + out[19] = ((w15 >> 28) | (w16 << 36)) & mask; + out[20] = ((w16 >> 16) | (w17 << 48)) & mask; + out[21] = (w17 >> 4) & mask; + out[22] = ((w17 >> 56) | (w18 << 8)) & mask; + out[23] = ((w18 >> 44) | (w19 << 20)) & mask; + out[24] = ((w19 >> 32) | (w20 << 32)) & mask; + out[25] = ((w20 >> 20) | (w21 << 44)) & mask; + out[26] = (w21 >> 8) & mask; + out[27] = ((w21 >> 60) | (w22 << 4)) & mask; + out[28] = ((w22 >> 48) | (w23 << 16)) & mask; + out[29] = ((w23 >> 36) | (w24 << 28)) & mask; + out[30] = ((w24 >> 24) | (w25 << 40)) & mask; + out[31] = w25 >> 12; + + return in; +} + +inline const uint8_t* unpack53_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 9007199254740991ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 53) | (w1 << 11)) & mask; + out[2] = ((w1 >> 42) | (w2 << 22)) & mask; + out[3] = ((w2 >> 31) | (w3 << 33)) & mask; + out[4] = ((w3 >> 20) | (w4 << 44)) & mask; + out[5] = (w4 >> 9) & mask; + out[6] = ((w4 >> 62) | (w5 << 2)) & mask; + out[7] = ((w5 >> 51) | (w6 << 13)) & mask; + out[8] = ((w6 >> 40) | (w7 << 24)) & mask; + out[9] = ((w7 >> 29) | (w8 << 35)) & mask; + out[10] = ((w8 >> 18) | (w9 << 46)) & mask; + out[11] = (w9 >> 7) & mask; + out[12] = ((w9 >> 60) | (w10 << 4)) & mask; + out[13] = ((w10 >> 49) | (w11 << 15)) & mask; + out[14] = ((w11 >> 38) | (w12 << 26)) & mask; + out[15] = ((w12 >> 27) | (w13 << 37)) & mask; + out[16] = ((w13 >> 16) | (w14 << 48)) & mask; + out[17] = (w14 >> 5) & mask; + out[18] = ((w14 >> 58) | (w15 << 6)) & mask; + out[19] = ((w15 >> 47) | (w16 << 17)) & mask; + out[20] = ((w16 >> 36) | (w17 << 28)) & mask; + out[21] = ((w17 >> 25) | (w18 << 39)) & mask; + out[22] = ((w18 >> 14) | (w19 << 50)) & mask; + out[23] = (w19 >> 3) & mask; + out[24] = ((w19 >> 56) | (w20 << 8)) & mask; + out[25] = ((w20 >> 45) | (w21 << 19)) & mask; + out[26] = ((w21 >> 34) | (w22 << 30)) & mask; + out[27] = ((w22 >> 23) | (w23 << 41)) & mask; + out[28] = ((w23 >> 12) | (w24 << 52)) & mask; + out[29] = (w24 >> 1) & mask; + out[30] = ((w24 >> 54) | (w25 << 10)) & mask; + out[31] = ((w25 >> 43) | (w26 << 21)) & mask; + + return in; +} + +inline const uint8_t* unpack54_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 18014398509481983ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 54) | (w1 << 10)) & mask; + out[2] = ((w1 >> 44) | (w2 << 20)) & mask; + out[3] = ((w2 >> 34) | (w3 << 30)) & mask; + out[4] = ((w3 >> 24) | (w4 << 40)) & mask; + out[5] = ((w4 >> 14) | (w5 << 50)) & mask; + out[6] = (w5 >> 4) & mask; + out[7] = ((w5 >> 58) | (w6 << 6)) & mask; + out[8] = ((w6 >> 48) | (w7 << 16)) & mask; + out[9] = ((w7 >> 38) | (w8 << 26)) & mask; + out[10] = ((w8 >> 28) | (w9 << 36)) & mask; + out[11] = ((w9 >> 18) | (w10 << 46)) & mask; + out[12] = (w10 >> 8) & mask; + out[13] = ((w10 >> 62) | (w11 << 2)) & mask; + out[14] = ((w11 >> 52) | (w12 << 12)) & mask; + out[15] = ((w12 >> 42) | (w13 << 22)) & mask; + out[16] = ((w13 >> 32) | (w14 << 32)) & mask; + out[17] = ((w14 >> 22) | (w15 << 42)) & mask; + out[18] = ((w15 >> 12) | (w16 << 52)) & mask; + out[19] = (w16 >> 2) & mask; + out[20] = ((w16 >> 56) | (w17 << 8)) & mask; + out[21] = ((w17 >> 46) | (w18 << 18)) & mask; + out[22] = ((w18 >> 36) | (w19 << 28)) & mask; + out[23] = ((w19 >> 26) | (w20 << 38)) & mask; + out[24] = ((w20 >> 16) | (w21 << 48)) & mask; + out[25] = (w21 >> 6) & mask; + out[26] = ((w21 >> 60) | (w22 << 4)) & mask; + out[27] = ((w22 >> 50) | (w23 << 14)) & mask; + out[28] = ((w23 >> 40) | (w24 << 24)) & mask; + out[29] = ((w24 >> 30) | (w25 << 34)) & mask; + out[30] = ((w25 >> 20) | (w26 << 44)) & mask; + out[31] = w26 >> 10; + + return in; +} + +inline const uint8_t* unpack55_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 36028797018963967ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 55) | (w1 << 9)) & mask; + out[2] = ((w1 >> 46) | (w2 << 18)) & mask; + out[3] = ((w2 >> 37) | (w3 << 27)) & mask; + out[4] = ((w3 >> 28) | (w4 << 36)) & mask; + out[5] = ((w4 >> 19) | (w5 << 45)) & mask; + out[6] = ((w5 >> 10) | (w6 << 54)) & mask; + out[7] = (w6 >> 1) & mask; + out[8] = ((w6 >> 56) | (w7 << 8)) & mask; + out[9] = ((w7 >> 47) | (w8 << 17)) & mask; + out[10] = ((w8 >> 38) | (w9 << 26)) & mask; + out[11] = ((w9 >> 29) | (w10 << 35)) & mask; + out[12] = ((w10 >> 20) | (w11 << 44)) & mask; + out[13] = ((w11 >> 11) | (w12 << 53)) & mask; + out[14] = (w12 >> 2) & mask; + out[15] = ((w12 >> 57) | (w13 << 7)) & mask; + out[16] = ((w13 >> 48) | (w14 << 16)) & mask; + out[17] = ((w14 >> 39) | (w15 << 25)) & mask; + out[18] = ((w15 >> 30) | (w16 << 34)) & mask; + out[19] = ((w16 >> 21) | (w17 << 43)) & mask; + out[20] = ((w17 >> 12) | (w18 << 52)) & mask; + out[21] = (w18 >> 3) & mask; + out[22] = ((w18 >> 58) | (w19 << 6)) & mask; + out[23] = ((w19 >> 49) | (w20 << 15)) & mask; + out[24] = ((w20 >> 40) | (w21 << 24)) & mask; + out[25] = ((w21 >> 31) | (w22 << 33)) & mask; + out[26] = ((w22 >> 22) | (w23 << 42)) & mask; + out[27] = ((w23 >> 13) | (w24 << 51)) & mask; + out[28] = (w24 >> 4) & mask; + out[29] = ((w24 >> 59) | (w25 << 5)) & mask; + out[30] = ((w25 >> 50) | (w26 << 14)) & mask; + out[31] = ((w26 >> 41) | (w27 << 23)) & mask; + + return in; +} + +inline const uint8_t* unpack56_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 72057594037927935ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 56) | (w1 << 8)) & mask; + out[2] = ((w1 >> 48) | (w2 << 16)) & mask; + out[3] = ((w2 >> 40) | (w3 << 24)) & mask; + out[4] = ((w3 >> 32) | (w4 << 32)) & mask; + out[5] = ((w4 >> 24) | (w5 << 40)) & mask; + out[6] = ((w5 >> 16) | (w6 << 48)) & mask; + out[7] = w6 >> 8; + out[8] = (w7)&mask; + out[9] = ((w7 >> 56) | (w8 << 8)) & mask; + out[10] = ((w8 >> 48) | (w9 << 16)) & mask; + out[11] = ((w9 >> 40) | (w10 << 24)) & mask; + out[12] = ((w10 >> 32) | (w11 << 32)) & mask; + out[13] = ((w11 >> 24) | (w12 << 40)) & mask; + out[14] = ((w12 >> 16) | (w13 << 48)) & mask; + out[15] = w13 >> 8; + out[16] = (w14)&mask; + out[17] = ((w14 >> 56) | (w15 << 8)) & mask; + out[18] = ((w15 >> 48) | (w16 << 16)) & mask; + out[19] = ((w16 >> 40) | (w17 << 24)) & mask; + out[20] = ((w17 >> 32) | (w18 << 32)) & mask; + out[21] = ((w18 >> 24) | (w19 << 40)) & mask; + out[22] = ((w19 >> 16) | (w20 << 48)) & mask; + out[23] = w20 >> 8; + out[24] = (w21)&mask; + out[25] = ((w21 >> 56) | (w22 << 8)) & mask; + out[26] = ((w22 >> 48) | (w23 << 16)) & mask; + out[27] = ((w23 >> 40) | (w24 << 24)) & mask; + out[28] = ((w24 >> 32) | (w25 << 32)) & mask; + out[29] = ((w25 >> 24) | (w26 << 40)) & mask; + out[30] = ((w26 >> 16) | (w27 << 48)) & mask; + out[31] = w27 >> 8; + + return in; +} + +inline const uint8_t* unpack57_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 144115188075855871ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 57) | (w1 << 7)) & mask; + out[2] = ((w1 >> 50) | (w2 << 14)) & mask; + out[3] = ((w2 >> 43) | (w3 << 21)) & mask; + out[4] = ((w3 >> 36) | (w4 << 28)) & mask; + out[5] = ((w4 >> 29) | (w5 << 35)) & mask; + out[6] = ((w5 >> 22) | (w6 << 42)) & mask; + out[7] = ((w6 >> 15) | (w7 << 49)) & mask; + out[8] = ((w7 >> 8) | (w8 << 56)) & mask; + out[9] = (w8 >> 1) & mask; + out[10] = ((w8 >> 58) | (w9 << 6)) & mask; + out[11] = ((w9 >> 51) | (w10 << 13)) & mask; + out[12] = ((w10 >> 44) | (w11 << 20)) & mask; + out[13] = ((w11 >> 37) | (w12 << 27)) & mask; + out[14] = ((w12 >> 30) | (w13 << 34)) & mask; + out[15] = ((w13 >> 23) | (w14 << 41)) & mask; + out[16] = ((w14 >> 16) | (w15 << 48)) & mask; + out[17] = ((w15 >> 9) | (w16 << 55)) & mask; + out[18] = (w16 >> 2) & mask; + out[19] = ((w16 >> 59) | (w17 << 5)) & mask; + out[20] = ((w17 >> 52) | (w18 << 12)) & mask; + out[21] = ((w18 >> 45) | (w19 << 19)) & mask; + out[22] = ((w19 >> 38) | (w20 << 26)) & mask; + out[23] = ((w20 >> 31) | (w21 << 33)) & mask; + out[24] = ((w21 >> 24) | (w22 << 40)) & mask; + out[25] = ((w22 >> 17) | (w23 << 47)) & mask; + out[26] = ((w23 >> 10) | (w24 << 54)) & mask; + out[27] = (w24 >> 3) & mask; + out[28] = ((w24 >> 60) | (w25 << 4)) & mask; + out[29] = ((w25 >> 53) | (w26 << 11)) & mask; + out[30] = ((w26 >> 46) | (w27 << 18)) & mask; + out[31] = ((w27 >> 39) | (w28 << 25)) & mask; + + return in; +} + +inline const uint8_t* unpack58_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 288230376151711743ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 58) | (w1 << 6)) & mask; + out[2] = ((w1 >> 52) | (w2 << 12)) & mask; + out[3] = ((w2 >> 46) | (w3 << 18)) & mask; + out[4] = ((w3 >> 40) | (w4 << 24)) & mask; + out[5] = ((w4 >> 34) | (w5 << 30)) & mask; + out[6] = ((w5 >> 28) | (w6 << 36)) & mask; + out[7] = ((w6 >> 22) | (w7 << 42)) & mask; + out[8] = ((w7 >> 16) | (w8 << 48)) & mask; + out[9] = ((w8 >> 10) | (w9 << 54)) & mask; + out[10] = (w9 >> 4) & mask; + out[11] = ((w9 >> 62) | (w10 << 2)) & mask; + out[12] = ((w10 >> 56) | (w11 << 8)) & mask; + out[13] = ((w11 >> 50) | (w12 << 14)) & mask; + out[14] = ((w12 >> 44) | (w13 << 20)) & mask; + out[15] = ((w13 >> 38) | (w14 << 26)) & mask; + out[16] = ((w14 >> 32) | (w15 << 32)) & mask; + out[17] = ((w15 >> 26) | (w16 << 38)) & mask; + out[18] = ((w16 >> 20) | (w17 << 44)) & mask; + out[19] = ((w17 >> 14) | (w18 << 50)) & mask; + out[20] = ((w18 >> 8) | (w19 << 56)) & mask; + out[21] = (w19 >> 2) & mask; + out[22] = ((w19 >> 60) | (w20 << 4)) & mask; + out[23] = ((w20 >> 54) | (w21 << 10)) & mask; + out[24] = ((w21 >> 48) | (w22 << 16)) & mask; + out[25] = ((w22 >> 42) | (w23 << 22)) & mask; + out[26] = ((w23 >> 36) | (w24 << 28)) & mask; + out[27] = ((w24 >> 30) | (w25 << 34)) & mask; + out[28] = ((w25 >> 24) | (w26 << 40)) & mask; + out[29] = ((w26 >> 18) | (w27 << 46)) & mask; + out[30] = ((w27 >> 12) | (w28 << 52)) & mask; + out[31] = w28 >> 6; + + return in; +} + +inline const uint8_t* unpack59_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 576460752303423487ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 59) | (w1 << 5)) & mask; + out[2] = ((w1 >> 54) | (w2 << 10)) & mask; + out[3] = ((w2 >> 49) | (w3 << 15)) & mask; + out[4] = ((w3 >> 44) | (w4 << 20)) & mask; + out[5] = ((w4 >> 39) | (w5 << 25)) & mask; + out[6] = ((w5 >> 34) | (w6 << 30)) & mask; + out[7] = ((w6 >> 29) | (w7 << 35)) & mask; + out[8] = ((w7 >> 24) | (w8 << 40)) & mask; + out[9] = ((w8 >> 19) | (w9 << 45)) & mask; + out[10] = ((w9 >> 14) | (w10 << 50)) & mask; + out[11] = ((w10 >> 9) | (w11 << 55)) & mask; + out[12] = (w11 >> 4) & mask; + out[13] = ((w11 >> 63) | (w12 << 1)) & mask; + out[14] = ((w12 >> 58) | (w13 << 6)) & mask; + out[15] = ((w13 >> 53) | (w14 << 11)) & mask; + out[16] = ((w14 >> 48) | (w15 << 16)) & mask; + out[17] = ((w15 >> 43) | (w16 << 21)) & mask; + out[18] = ((w16 >> 38) | (w17 << 26)) & mask; + out[19] = ((w17 >> 33) | (w18 << 31)) & mask; + out[20] = ((w18 >> 28) | (w19 << 36)) & mask; + out[21] = ((w19 >> 23) | (w20 << 41)) & mask; + out[22] = ((w20 >> 18) | (w21 << 46)) & mask; + out[23] = ((w21 >> 13) | (w22 << 51)) & mask; + out[24] = ((w22 >> 8) | (w23 << 56)) & mask; + out[25] = (w23 >> 3) & mask; + out[26] = ((w23 >> 62) | (w24 << 2)) & mask; + out[27] = ((w24 >> 57) | (w25 << 7)) & mask; + out[28] = ((w25 >> 52) | (w26 << 12)) & mask; + out[29] = ((w26 >> 47) | (w27 << 17)) & mask; + out[30] = ((w27 >> 42) | (w28 << 22)) & mask; + out[31] = ((w28 >> 37) | (w29 << 27)) & mask; + + return in; +} + +inline const uint8_t* unpack60_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1152921504606846975ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 60) | (w1 << 4)) & mask; + out[2] = ((w1 >> 56) | (w2 << 8)) & mask; + out[3] = ((w2 >> 52) | (w3 << 12)) & mask; + out[4] = ((w3 >> 48) | (w4 << 16)) & mask; + out[5] = ((w4 >> 44) | (w5 << 20)) & mask; + out[6] = ((w5 >> 40) | (w6 << 24)) & mask; + out[7] = ((w6 >> 36) | (w7 << 28)) & mask; + out[8] = ((w7 >> 32) | (w8 << 32)) & mask; + out[9] = ((w8 >> 28) | (w9 << 36)) & mask; + out[10] = ((w9 >> 24) | (w10 << 40)) & mask; + out[11] = ((w10 >> 20) | (w11 << 44)) & mask; + out[12] = ((w11 >> 16) | (w12 << 48)) & mask; + out[13] = ((w12 >> 12) | (w13 << 52)) & mask; + out[14] = ((w13 >> 8) | (w14 << 56)) & mask; + out[15] = w14 >> 4; + out[16] = (w15)&mask; + out[17] = ((w15 >> 60) | (w16 << 4)) & mask; + out[18] = ((w16 >> 56) | (w17 << 8)) & mask; + out[19] = ((w17 >> 52) | (w18 << 12)) & mask; + out[20] = ((w18 >> 48) | (w19 << 16)) & mask; + out[21] = ((w19 >> 44) | (w20 << 20)) & mask; + out[22] = ((w20 >> 40) | (w21 << 24)) & mask; + out[23] = ((w21 >> 36) | (w22 << 28)) & mask; + out[24] = ((w22 >> 32) | (w23 << 32)) & mask; + out[25] = ((w23 >> 28) | (w24 << 36)) & mask; + out[26] = ((w24 >> 24) | (w25 << 40)) & mask; + out[27] = ((w25 >> 20) | (w26 << 44)) & mask; + out[28] = ((w26 >> 16) | (w27 << 48)) & mask; + out[29] = ((w27 >> 12) | (w28 << 52)) & mask; + out[30] = ((w28 >> 8) | (w29 << 56)) & mask; + out[31] = w29 >> 4; + + return in; +} + +inline const uint8_t* unpack61_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2305843009213693951ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 61) | (w1 << 3)) & mask; + out[2] = ((w1 >> 58) | (w2 << 6)) & mask; + out[3] = ((w2 >> 55) | (w3 << 9)) & mask; + out[4] = ((w3 >> 52) | (w4 << 12)) & mask; + out[5] = ((w4 >> 49) | (w5 << 15)) & mask; + out[6] = ((w5 >> 46) | (w6 << 18)) & mask; + out[7] = ((w6 >> 43) | (w7 << 21)) & mask; + out[8] = ((w7 >> 40) | (w8 << 24)) & mask; + out[9] = ((w8 >> 37) | (w9 << 27)) & mask; + out[10] = ((w9 >> 34) | (w10 << 30)) & mask; + out[11] = ((w10 >> 31) | (w11 << 33)) & mask; + out[12] = ((w11 >> 28) | (w12 << 36)) & mask; + out[13] = ((w12 >> 25) | (w13 << 39)) & mask; + out[14] = ((w13 >> 22) | (w14 << 42)) & mask; + out[15] = ((w14 >> 19) | (w15 << 45)) & mask; + out[16] = ((w15 >> 16) | (w16 << 48)) & mask; + out[17] = ((w16 >> 13) | (w17 << 51)) & mask; + out[18] = ((w17 >> 10) | (w18 << 54)) & mask; + out[19] = ((w18 >> 7) | (w19 << 57)) & mask; + out[20] = ((w19 >> 4) | (w20 << 60)) & mask; + out[21] = (w20 >> 1) & mask; + out[22] = ((w20 >> 62) | (w21 << 2)) & mask; + out[23] = ((w21 >> 59) | (w22 << 5)) & mask; + out[24] = ((w22 >> 56) | (w23 << 8)) & mask; + out[25] = ((w23 >> 53) | (w24 << 11)) & mask; + out[26] = ((w24 >> 50) | (w25 << 14)) & mask; + out[27] = ((w25 >> 47) | (w26 << 17)) & mask; + out[28] = ((w26 >> 44) | (w27 << 20)) & mask; + out[29] = ((w27 >> 41) | (w28 << 23)) & mask; + out[30] = ((w28 >> 38) | (w29 << 26)) & mask; + out[31] = ((w29 >> 35) | (w30 << 29)) & mask; + + return in; +} + +inline const uint8_t* unpack62_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4611686018427387903ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 62) | (w1 << 2)) & mask; + out[2] = ((w1 >> 60) | (w2 << 4)) & mask; + out[3] = ((w2 >> 58) | (w3 << 6)) & mask; + out[4] = ((w3 >> 56) | (w4 << 8)) & mask; + out[5] = ((w4 >> 54) | (w5 << 10)) & mask; + out[6] = ((w5 >> 52) | (w6 << 12)) & mask; + out[7] = ((w6 >> 50) | (w7 << 14)) & mask; + out[8] = ((w7 >> 48) | (w8 << 16)) & mask; + out[9] = ((w8 >> 46) | (w9 << 18)) & mask; + out[10] = ((w9 >> 44) | (w10 << 20)) & mask; + out[11] = ((w10 >> 42) | (w11 << 22)) & mask; + out[12] = ((w11 >> 40) | (w12 << 24)) & mask; + out[13] = ((w12 >> 38) | (w13 << 26)) & mask; + out[14] = ((w13 >> 36) | (w14 << 28)) & mask; + out[15] = ((w14 >> 34) | (w15 << 30)) & mask; + out[16] = ((w15 >> 32) | (w16 << 32)) & mask; + out[17] = ((w16 >> 30) | (w17 << 34)) & mask; + out[18] = ((w17 >> 28) | (w18 << 36)) & mask; + out[19] = ((w18 >> 26) | (w19 << 38)) & mask; + out[20] = ((w19 >> 24) | (w20 << 40)) & mask; + out[21] = ((w20 >> 22) | (w21 << 42)) & mask; + out[22] = ((w21 >> 20) | (w22 << 44)) & mask; + out[23] = ((w22 >> 18) | (w23 << 46)) & mask; + out[24] = ((w23 >> 16) | (w24 << 48)) & mask; + out[25] = ((w24 >> 14) | (w25 << 50)) & mask; + out[26] = ((w25 >> 12) | (w26 << 52)) & mask; + out[27] = ((w26 >> 10) | (w27 << 54)) & mask; + out[28] = ((w27 >> 8) | (w28 << 56)) & mask; + out[29] = ((w28 >> 6) | (w29 << 58)) & mask; + out[30] = ((w29 >> 4) | (w30 << 60)) & mask; + out[31] = w30 >> 2; + + return in; +} + +inline const uint8_t* unpack63_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 9223372036854775807ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + uint64_t w31 = util::SafeLoadAs(in); + w31 = arrow::bit_util::FromLittleEndian(w31); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 63) | (w1 << 1)) & mask; + out[2] = ((w1 >> 62) | (w2 << 2)) & mask; + out[3] = ((w2 >> 61) | (w3 << 3)) & mask; + out[4] = ((w3 >> 60) | (w4 << 4)) & mask; + out[5] = ((w4 >> 59) | (w5 << 5)) & mask; + out[6] = ((w5 >> 58) | (w6 << 6)) & mask; + out[7] = ((w6 >> 57) | (w7 << 7)) & mask; + out[8] = ((w7 >> 56) | (w8 << 8)) & mask; + out[9] = ((w8 >> 55) | (w9 << 9)) & mask; + out[10] = ((w9 >> 54) | (w10 << 10)) & mask; + out[11] = ((w10 >> 53) | (w11 << 11)) & mask; + out[12] = ((w11 >> 52) | (w12 << 12)) & mask; + out[13] = ((w12 >> 51) | (w13 << 13)) & mask; + out[14] = ((w13 >> 50) | (w14 << 14)) & mask; + out[15] = ((w14 >> 49) | (w15 << 15)) & mask; + out[16] = ((w15 >> 48) | (w16 << 16)) & mask; + out[17] = ((w16 >> 47) | (w17 << 17)) & mask; + out[18] = ((w17 >> 46) | (w18 << 18)) & mask; + out[19] = ((w18 >> 45) | (w19 << 19)) & mask; + out[20] = ((w19 >> 44) | (w20 << 20)) & mask; + out[21] = ((w20 >> 43) | (w21 << 21)) & mask; + out[22] = ((w21 >> 42) | (w22 << 22)) & mask; + out[23] = ((w22 >> 41) | (w23 << 23)) & mask; + out[24] = ((w23 >> 40) | (w24 << 24)) & mask; + out[25] = ((w24 >> 39) | (w25 << 25)) & mask; + out[26] = ((w25 >> 38) | (w26 << 26)) & mask; + out[27] = ((w26 >> 37) | (w27 << 27)) & mask; + out[28] = ((w27 >> 36) | (w28 << 28)) & mask; + out[29] = ((w28 >> 35) | (w29 << 29)) & mask; + out[30] = ((w29 >> 34) | (w30 << 30)) & mask; + out[31] = ((w30 >> 33) | (w31 << 31)) & mask; + + return in; +} + +inline const uint8_t* unpack64_64(const uint8_t* in, uint64_t* out) { + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + uint64_t w31 = util::SafeLoadAs(in); + w31 = arrow::bit_util::FromLittleEndian(w31); + in += 8; + out[0] = w0; + out[1] = w1; + out[2] = w2; + out[3] = w3; + out[4] = w4; + out[5] = w5; + out[6] = w6; + out[7] = w7; + out[8] = w8; + out[9] = w9; + out[10] = w10; + out[11] = w11; + out[12] = w12; + out[13] = w13; + out[14] = w14; + out[15] = w15; + out[16] = w16; + out[17] = w17; + out[18] = w18; + out[19] = w19; + out[20] = w20; + out[21] = w21; + out[22] = w22; + out[23] = w23; + out[24] = w24; + out[25] = w25; + out[26] = w26; + out[27] = w27; + out[28] = w28; + out[29] = w29; + out[30] = w30; + out[31] = w31; + + return in; +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h new file mode 100644 index 0000000000000000000000000000000000000000..97f6b61a1f8cebd297a5f4a8fe4401b6073de45f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +namespace arrow { +namespace internal { + +template +inline OutputType checked_cast(InputType&& value) { + static_assert(std::is_class::type>::type>::value, + "checked_cast input type must be a class"); + static_assert(std::is_class::type>::type>::value, + "checked_cast output type must be a class"); +#ifdef NDEBUG + return static_cast(value); +#else + return dynamic_cast(value); +#endif +} + +template +std::shared_ptr checked_pointer_cast(std::shared_ptr r) noexcept { +#ifdef NDEBUG + return std::static_pointer_cast(std::move(r)); +#else + return std::dynamic_pointer_cast(std::move(r)); +#endif +} + +template +std::unique_ptr checked_pointer_cast(std::unique_ptr r) noexcept { +#ifdef NDEBUG + return std::unique_ptr(static_cast(r.release())); +#else + return std::unique_ptr(dynamic_cast(r.release())); +#endif +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bf4d5e12d02d349c3a0e0fce43f6be5ef4d585 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +constexpr int kUseDefaultCompressionLevel = std::numeric_limits::min(); + +/// \brief Streaming compressor interface +/// +class ARROW_EXPORT Compressor { + public: + virtual ~Compressor() = default; + + struct CompressResult { + int64_t bytes_read; + int64_t bytes_written; + }; + struct FlushResult { + int64_t bytes_written; + bool should_retry; + }; + struct EndResult { + int64_t bytes_written; + bool should_retry; + }; + + /// \brief Compress some input. + /// + /// If bytes_read is 0 on return, then a larger output buffer should be supplied. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Flush part of the compressed output. + /// + /// If should_retry is true on return, Flush() should be called again + /// with a larger buffer. + virtual Result Flush(int64_t output_len, uint8_t* output) = 0; + + /// \brief End compressing, doing whatever is necessary to end the stream. + /// + /// If should_retry is true on return, End() should be called again + /// with a larger buffer. Otherwise, the Compressor should not be used anymore. + /// + /// End() implies Flush(). + virtual Result End(int64_t output_len, uint8_t* output) = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Streaming decompressor interface +/// +class ARROW_EXPORT Decompressor { + public: + virtual ~Decompressor() = default; + + struct DecompressResult { + // XXX is need_more_output necessary? (Brotli?) + int64_t bytes_read; + int64_t bytes_written; + bool need_more_output; + }; + + /// \brief Decompress some input. + /// + /// If need_more_output is true on return, a larger output buffer needs + /// to be supplied. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Return whether the compressed stream is finished. + /// + /// This is a heuristic. If true is returned, then it is guaranteed + /// that the stream is finished. If false is returned, however, it may + /// simply be that the underlying library isn't able to provide the information. + virtual bool IsFinished() = 0; + + /// \brief Reinitialize decompressor, making it ready for a new compressed stream. + virtual Status Reset() = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Compression codec options +class ARROW_EXPORT CodecOptions { + public: + explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel) + : compression_level(compression_level) {} + + virtual ~CodecOptions() = default; + + int compression_level; +}; + +// ---------------------------------------------------------------------- +// GZip codec options implementation + +enum class GZipFormat { + ZLIB, + DEFLATE, + GZIP, +}; + +class ARROW_EXPORT GZipCodecOptions : public CodecOptions { + public: + GZipFormat gzip_format = GZipFormat::GZIP; + std::optional window_bits; +}; + +// ---------------------------------------------------------------------- +// brotli codec options implementation + +class ARROW_EXPORT BrotliCodecOptions : public CodecOptions { + public: + std::optional window_bits; +}; + +/// \brief Compression codec +class ARROW_EXPORT Codec { + public: + virtual ~Codec() = default; + + /// \brief Return special value to indicate that a codec implementation + /// should use its default compression level + static int UseDefaultCompressionLevel(); + + /// \brief Return a string name for compression type + static const std::string& GetCodecAsString(Compression::type t); + + /// \brief Return compression type for name (all lower case) + static Result GetCompressionType(const std::string& name); + + /// \brief Create a codec for the given compression algorithm with CodecOptions + static Result> Create( + Compression::type codec, const CodecOptions& codec_options = CodecOptions{}); + + /// \brief Create a codec for the given compression algorithm + static Result> Create(Compression::type codec, + int compression_level); + + /// \brief Return true if support for indicated codec has been enabled + static bool IsAvailable(Compression::type codec); + + /// \brief Return true if indicated codec supports setting a compression level + static bool SupportsCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MinimumCompressionLevel(Compression::type codec); + + /// \brief Return the largest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MaximumCompressionLevel(Compression::type codec); + + /// \brief Return the default compression level + /// Note: This function creates a temporary Codec instance + static Result DefaultCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level + virtual int minimum_compression_level() const = 0; + + /// \brief Return the largest supported compression level + virtual int maximum_compression_level() const = 0; + + /// \brief Return the default compression level + virtual int default_compression_level() const = 0; + + /// \brief One-shot decompression function + /// + /// output_buffer_len must be correct and therefore be obtained in advance. + /// The actual decompressed length is returned. + /// + /// \note One-shot decompression is not always compatible with streaming + /// compression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) = 0; + + /// \brief One-shot compression function + /// + /// output_buffer_len must first have been computed using MaxCompressedLen(). + /// The actual compressed length is returned. + /// + /// \note One-shot compression is not always compatible with streaming + /// decompression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, uint8_t* output_buffer) = 0; + + virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeCompressor() = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeDecompressor() = 0; + + /// \brief This Codec's compression type + virtual Compression::type compression_type() const = 0; + + /// \brief The name of this Codec's compression type + const std::string& name() const { return GetCodecAsString(compression_type()); } + + /// \brief This Codec's compression level, if applicable + virtual int compression_level() const { return UseDefaultCompressionLevel(); } + + private: + /// \brief Initializes the codec's resources. + virtual Status Init(); +}; + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..c23d6ccd9886e4539d52d537abb85da1dcc93385 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h @@ -0,0 +1,411 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/chunked_array.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { +namespace internal { + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool); + +template +class Converter { + public: + using Self = Converter; + using InputType = Input; + using OptionsType = Options; + + virtual ~Converter() = default; + + Status Construct(std::shared_ptr type, OptionsType options, + MemoryPool* pool) { + type_ = std::move(type); + options_ = std::move(options); + return Init(pool); + } + + virtual Status Append(InputType value) { return Status::NotImplemented("Append"); } + + virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) { + return Status::NotImplemented("Extend"); + } + + virtual Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + return Status::NotImplemented("ExtendMasked"); + } + + const std::shared_ptr& builder() const { return builder_; } + + const std::shared_ptr& type() const { return type_; } + + OptionsType options() const { return options_; } + + bool may_overflow() const { return may_overflow_; } + + bool rewind_on_overflow() const { return rewind_on_overflow_; } + + virtual Status Reserve(int64_t additional_capacity) { + return builder_->Reserve(additional_capacity); + } + + Status AppendNull() { return builder_->AppendNull(); } + + virtual Result> ToArray() { return builder_->Finish(); } + + virtual Result> ToArray(int64_t length) { + ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray()); + return arr->Slice(0, length); + } + + virtual Result> ToChunkedArray() { + ARROW_ASSIGN_OR_RAISE(auto array, ToArray()); + std::vector> chunks = {std::move(array)}; + return std::make_shared(chunks); + } + + protected: + virtual Status Init(MemoryPool* pool) { return Status::OK(); } + + std::shared_ptr type_; + std::shared_ptr builder_; + OptionsType options_; + bool may_overflow_ = false; + bool rewind_on_overflow_ = false; +}; + +template +class PrimitiveConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + + protected: + Status Init(MemoryPool* pool) override { + this->builder_ = std::make_shared(this->type_, pool); + // Narrow variable-sized binary types may overflow + this->may_overflow_ = is_binary_like(this->type_->id()); + primitive_type_ = checked_cast(this->type_.get()); + primitive_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const ArrowType* primitive_type_; + BuilderType* primitive_builder_; +}; + +template class ConverterTrait> +class ListConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + using ConverterType = typename ConverterTrait::type; + + protected: + Status Init(MemoryPool* pool) override { + list_type_ = checked_cast(this->type_.get()); + ARROW_ASSIGN_OR_RAISE(value_converter_, + (MakeConverter( + list_type_->value_type(), this->options_, pool))); + this->builder_ = + std::make_shared(pool, value_converter_->builder(), this->type_); + list_builder_ = checked_cast(this->builder_.get()); + // Narrow list types may overflow + this->may_overflow_ = this->rewind_on_overflow_ = + sizeof(typename ArrowType::offset_type) < sizeof(int64_t); + return Status::OK(); + } + + const ArrowType* list_type_; + BuilderType* list_builder_; + std::unique_ptr value_converter_; +}; + +template class ConverterTrait> +class StructConverter : public BaseConverter { + public: + using ConverterType = typename ConverterTrait::type; + + Status Reserve(int64_t additional_capacity) override { + ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity)); + for (const auto& child : children_) { + ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity)); + } + return Status::OK(); + } + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr child_converter; + std::vector> child_builders; + + struct_type_ = checked_cast(this->type_.get()); + for (const auto& field : struct_type_->fields()) { + ARROW_ASSIGN_OR_RAISE(child_converter, + (MakeConverter( + field->type(), this->options_, pool))); + this->may_overflow_ |= child_converter->may_overflow(); + this->rewind_on_overflow_ = this->may_overflow_; + child_builders.push_back(child_converter->builder()); + children_.push_back(std::move(child_converter)); + } + + this->builder_ = + std::make_shared(this->type_, pool, std::move(child_builders)); + struct_builder_ = checked_cast(this->builder_.get()); + + return Status::OK(); + } + + const StructType* struct_type_; + StructBuilder* struct_builder_; + std::vector> children_; +}; + +template +class DictionaryConverter : public BaseConverter { + public: + using BuilderType = DictionaryBuilder; + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr builder; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder)); + this->builder_ = std::move(builder); + this->may_overflow_ = false; + dict_type_ = checked_cast(this->type_.get()); + value_type_ = checked_cast(dict_type_->value_type().get()); + value_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const DictionaryType* dict_type_; + const ValueType* value_type_; + BuilderType* value_builder_; +}; + +template class ConverterTrait> +struct MakeConverterImpl { + template ::type> + Status Visit(const T&) { + out.reset(new ConverterType()); + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DictionaryType& t) { + switch (t.value_type()->id()) { +#define DICTIONARY_CASE(TYPE) \ + case TYPE::type_id: \ + out = std::make_unique< \ + typename ConverterTrait::template dictionary_type>(); \ + break; + DICTIONARY_CASE(BooleanType); + DICTIONARY_CASE(Int8Type); + DICTIONARY_CASE(Int16Type); + DICTIONARY_CASE(Int32Type); + DICTIONARY_CASE(Int64Type); + DICTIONARY_CASE(UInt8Type); + DICTIONARY_CASE(UInt16Type); + DICTIONARY_CASE(UInt32Type); + DICTIONARY_CASE(UInt64Type); + DICTIONARY_CASE(FloatType); + DICTIONARY_CASE(DoubleType); + DICTIONARY_CASE(BinaryType); + DICTIONARY_CASE(StringType); + DICTIONARY_CASE(FixedSizeBinaryType); +#undef DICTIONARY_CASE + default: + return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(), + " not implemented"); + } + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); } + + std::shared_ptr type; + typename BaseConverter::OptionsType options; + MemoryPool* pool; + std::unique_ptr out; +}; + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool) { + MakeConverterImpl visitor{ + std::move(type), std::move(options), pool, NULLPTR}; + ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor)); + return std::move(visitor.out); +} + +template +class Chunker { + public: + using InputType = typename Converter::InputType; + + explicit Chunker(std::unique_ptr converter) + : converter_(std::move(converter)) {} + + Status Reserve(int64_t additional_capacity) { + ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity)); + reserved_ += additional_capacity; + return Status::OK(); + } + + Status AppendNull() { + auto status = converter_->AppendNull(); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return converter_->AppendNull(); + } + ++length_; + return status; + } + + Status Append(InputType value) { + auto status = converter_->Append(value); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return Append(value); + } + ++length_; + return status; + } + + Status Extend(InputType values, int64_t size, int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->Extend(values, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->ExtendMasked(values, mask, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status FinishChunk() { + ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_)); + chunks_.push_back(chunk); + // Reserve space for the remaining items. + // Besides being an optimization, it is also required if the converter's + // implementation relies on unsafe builder methods in converter->Append(). + auto remaining = reserved_ - length_; + Reset(); + return Reserve(remaining); + } + + Result> ToChunkedArray() { + ARROW_RETURN_NOT_OK(FinishChunk()); + return std::make_shared(chunks_); + } + + protected: + void Reset() { + converter_->builder()->Reset(); + length_ = 0; + reserved_ = 0; + } + + int64_t length_ = 0; + int64_t reserved_ = 0; + std::unique_ptr converter_; + std::vector> chunks_; +}; + +template +static Result>> MakeChunker(std::unique_ptr converter) { + return std::make_unique>(std::move(converter)); +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h new file mode 100644 index 0000000000000000000000000000000000000000..155cf7cfae1061feda9ae436a5f966b90cbabc6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Compute the CRC32 checksum of the given data +/// +/// This function computes CRC32 with the polynomial 0x04C11DB7, +/// as used in zlib and others (note this is different from CRC32C). +/// To compute a running CRC32, pass the previous value in `prev`, +/// otherwise `prev` should be 0. +ARROW_EXPORT +uint32_t crc32(uint32_t prev, const void* data, size_t length); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..ed38a4dcf7ab87aad4db906dd8b6abc058387f8e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +ARROW_EXPORT +void DebugTrap(); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h new file mode 100644 index 0000000000000000000000000000000000000000..161ad0bfddfc5a52040256a9cb39b5af96b876db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; + +class ARROW_EXPORT BoundaryFinder { + public: + BoundaryFinder() = default; + + virtual ~BoundaryFinder(); + + /// \brief Find the position of the first delimiter inside block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindFirst(std::string_view partial, std::string_view block, + int64_t* out_pos) = 0; + + /// \brief Find the position of the last delimiter inside block + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the last delimiter. + /// `out_pos` will be -1 if no delimiter is found. + virtual Status FindLast(std::string_view block, int64_t* out_pos) = 0; + + /// \brief Find the position of the Nth delimiter inside the block + /// + /// `partial` is taken to be the beginning of the block, and `block` + /// its continuation. Also, `partial` doesn't contain a delimiter. + /// + /// The returned `out_pos` is relative to `block`'s start and should point + /// to the first character after the first delimiter. + /// `out_pos` will be -1 if no delimiter is found. + /// + /// The returned `num_found` is the number of delimiters actually found + virtual Status FindNth(std::string_view partial, std::string_view block, int64_t count, + int64_t* out_pos, int64_t* num_found) = 0; + + static constexpr int64_t kNoDelimiterFound = -1; + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(BoundaryFinder); +}; + +ARROW_EXPORT +std::shared_ptr MakeNewlineBoundaryFinder(); + +/// \brief A reusable block-based chunker for delimited data +/// +/// The chunker takes a block of delimited data and helps carve a sub-block +/// which begins and ends on delimiters (suitable for consumption by parsers +/// which can only parse whole objects). +class ARROW_EXPORT Chunker { + public: + explicit Chunker(std::shared_ptr delimiter); + ~Chunker(); + + /// \brief Carve up a chunk in a block of data to contain only whole objects + /// + /// Pre-conditions: + /// - `block` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// + /// Post-conditions: + /// - block == whole + partial + /// - `whole` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `partial` doesn't contain an entire delimited object + /// (IOW: `partial` is generally small) + /// + /// This method will look for the last delimiter in `block` and may + /// therefore be costly. + /// + /// \param[in] block data to be chunked + /// \param[out] whole subrange of block containing whole delimited objects + /// \param[out] partial subrange of block starting with a partial delimited object + Status Process(std::shared_ptr block, std::shared_ptr* whole, + std::shared_ptr* partial); + + /// \brief Carve the completion of a partial object out of a block + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// (i.e. starts just after a delimiter and ends with a delimiter) + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + /// This method will look for the first delimiter in `block` and should + /// therefore be reasonably cheap. + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[out] completion subrange of block containing the completion of partial + /// \param[out] rest subrange of block containing what completion does not cover + Status ProcessWithPartial(std::shared_ptr partial, + std::shared_ptr block, + std::shared_ptr* completion, + std::shared_ptr* rest); + + /// \brief Like ProcessWithPartial, but for the last block of a file + /// + /// This method allows for a final delimited object without a trailing delimiter + /// (ProcessWithPartial would return an error in that case). + /// + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// - `block` follows `partial` in file order and is the last data block + /// + /// Post-conditions: + /// - block == completion + rest + /// - `partial + completion` is a valid block of delimited data + /// - `completion` doesn't contain an entire delimited object + /// (IOW: `completion` is generally small) + /// + Status ProcessFinal(std::shared_ptr partial, std::shared_ptr block, + std::shared_ptr* completion, std::shared_ptr* rest); + + /// \brief Skip count number of rows + /// Pre-conditions: + /// - `partial` is the start of a valid block of delimited data + /// (i.e. starts just after a delimiter) + /// - `block` follows `partial` in file order + /// + /// Post-conditions: + /// - `count` is updated to indicate the number of rows that still need to be skipped + /// - If `count` is > 0 then `rest` is an incomplete block that should be a future + /// `partial` + /// - Else `rest` could be one or more valid blocks of delimited data which need to be + /// parsed + /// + /// \param[in] partial incomplete delimited data + /// \param[in] block delimited data following partial + /// \param[in] final whether this is the final chunk + /// \param[in,out] count number of rows that need to be skipped + /// \param[out] rest subrange of block containing what was not skipped + Status ProcessSkip(std::shared_ptr partial, std::shared_ptr block, + bool final, int64_t* count, std::shared_ptr* rest); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Chunker); + + std::shared_ptr boundary_finder_; +}; + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fae9293f9e79891dcd85b536d697291289804ce5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/cpu_info.h" + +namespace arrow { +namespace internal { + +enum class DispatchLevel : int { + // These dispatch levels, corresponding to instruction set features, + // are sorted in increasing order of preference. + NONE = 0, + SSE4_2, + AVX2, + AVX512, + NEON, + MAX +}; + +/* + A facility for dynamic dispatch according to available DispatchLevel. + + Typical use: + + static void my_function_default(...); + static void my_function_avx2(...); + + struct MyDynamicFunction { + using FunctionType = decltype(&my_function_default); + + static std::vector> implementations() { + return { + { DispatchLevel::NONE, my_function_default } + #if defined(ARROW_HAVE_RUNTIME_AVX2) + , { DispatchLevel::AVX2, my_function_avx2 } + #endif + }; + } + }; + + void my_function(...) { + static DynamicDispatch dispatch; + return dispatch.func(...); + } +*/ +template +class DynamicDispatch { + protected: + using FunctionType = typename DynamicFunction::FunctionType; + using Implementation = std::pair; + + public: + DynamicDispatch() { Resolve(DynamicFunction::implementations()); } + + FunctionType func = {}; + + protected: + // Use the Implementation with the highest DispatchLevel + void Resolve(const std::vector& implementations) { + Implementation cur{DispatchLevel::NONE, {}}; + + for (const auto& impl : implementations) { + if (impl.first >= cur.first && IsSupported(impl.first)) { + // Higher (or same) level than current + cur = impl; + } + } + + if (!cur.second) { + Status::Invalid("No appropriate implementation found").Abort(); + } + func = cur.second; + } + + private: + bool IsSupported(DispatchLevel level) const { + static const auto cpu_info = arrow::internal::CpuInfo::GetInstance(); + + switch (level) { + case DispatchLevel::NONE: + return true; + case DispatchLevel::SSE4_2: + return cpu_info->IsSupported(CpuInfo::SSE4_2); + case DispatchLevel::AVX2: + return cpu_info->IsSupported(CpuInfo::AVX2); + case DispatchLevel::AVX512: + return cpu_info->IsSupported(CpuInfo::AVX512); + default: + return false; + } + } +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h new file mode 100644 index 0000000000000000000000000000000000000000..3d394ba8b78017b8e06457510fc7748fc3c45f45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h @@ -0,0 +1,245 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef _WIN32 +#define ARROW_LITTLE_ENDIAN 1 +#else +#if defined(__APPLE__) || defined(__FreeBSD__) +#include // IWYU pragma: keep +#elif defined(sun) || defined(__sun) +#include // IWYU pragma: keep +#else +#include // IWYU pragma: keep +#endif +# +#ifndef __BYTE_ORDER__ +#error "__BYTE_ORDER__ not defined" +#endif +# +#ifndef __ORDER_LITTLE_ENDIAN__ +#error "__ORDER_LITTLE_ENDIAN__ not defined" +#endif +# +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define ARROW_LITTLE_ENDIAN 1 +#else +#define ARROW_LITTLE_ENDIAN 0 +#endif +#endif + +#if defined(_MSC_VER) +#include // IWYU pragma: keep +#define ARROW_BYTE_SWAP64 _byteswap_uint64 +#define ARROW_BYTE_SWAP32 _byteswap_ulong +#else +#define ARROW_BYTE_SWAP64 __builtin_bswap64 +#define ARROW_BYTE_SWAP32 __builtin_bswap32 +#endif + +#include +#include + +#include "arrow/util/type_traits.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace bit_util { + +// +// Byte-swap 16-bit, 32-bit and 64-bit values +// + +// Swap the byte order (i.e. endianness) +static inline int64_t ByteSwap(int64_t value) { return ARROW_BYTE_SWAP64(value); } +static inline uint64_t ByteSwap(uint64_t value) { + return static_cast(ARROW_BYTE_SWAP64(value)); +} +static inline int32_t ByteSwap(int32_t value) { return ARROW_BYTE_SWAP32(value); } +static inline uint32_t ByteSwap(uint32_t value) { + return static_cast(ARROW_BYTE_SWAP32(value)); +} +static inline int16_t ByteSwap(int16_t value) { + constexpr auto m = static_cast(0xff); + return static_cast(((value >> 8) & m) | ((value & m) << 8)); +} +static inline uint16_t ByteSwap(uint16_t value) { + return static_cast(ByteSwap(static_cast(value))); +} +static inline uint8_t ByteSwap(uint8_t value) { return value; } +static inline int8_t ByteSwap(int8_t value) { return value; } +static inline double ByteSwap(double value) { + const uint64_t swapped = ARROW_BYTE_SWAP64(util::SafeCopy(value)); + return util::SafeCopy(swapped); +} +static inline float ByteSwap(float value) { + const uint32_t swapped = ARROW_BYTE_SWAP32(util::SafeCopy(value)); + return util::SafeCopy(swapped); +} + +// Write the swapped bytes into dst. Src and dst cannot overlap. +static inline void ByteSwap(void* dst, const void* src, int len) { + switch (len) { + case 1: + *reinterpret_cast(dst) = *reinterpret_cast(src); + return; + case 2: + *reinterpret_cast(dst) = ByteSwap(*reinterpret_cast(src)); + return; + case 4: + *reinterpret_cast(dst) = ByteSwap(*reinterpret_cast(src)); + return; + case 8: + *reinterpret_cast(dst) = ByteSwap(*reinterpret_cast(src)); + return; + default: + break; + } + + auto d = reinterpret_cast(dst); + auto s = reinterpret_cast(src); + for (int i = 0; i < len; ++i) { + d[i] = s[len - i - 1]; + } +} + +// Convert to little/big endian format from the machine's native endian format. +#if ARROW_LITTLE_ENDIAN +template > +static inline T ToBigEndian(T value) { + return ByteSwap(value); +} + +template > +static inline T ToLittleEndian(T value) { + return value; +} +#else +template > +static inline T ToBigEndian(T value) { + return value; +} + +template > +static inline T ToLittleEndian(T value) { + return ByteSwap(value); +} +#endif + +// Convert from big/little endian format to the machine's native endian format. +#if ARROW_LITTLE_ENDIAN +template > +static inline T FromBigEndian(T value) { + return ByteSwap(value); +} + +template > +static inline T FromLittleEndian(T value) { + return value; +} +#else +template > +static inline T FromBigEndian(T value) { + return value; +} + +template > +static inline T FromLittleEndian(T value) { + return ByteSwap(value); +} +#endif + +// Handle endianness in *word* granularity (keep individual array element untouched) +namespace little_endian { + +namespace detail { + +// Read a native endian array as little endian +template +struct Reader { + const std::array& native_array; + + explicit Reader(const std::array& native_array) : native_array(native_array) {} + + const T& operator[](size_t i) const { + return native_array[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; + } +}; + +// Read/write a native endian array as little endian +template +struct Writer { + std::array* native_array; + + explicit Writer(std::array* native_array) : native_array(native_array) {} + + const T& operator[](size_t i) const { + return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; + } + T& operator[](size_t i) { return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; } +}; + +} // namespace detail + +// Construct array reader and try to deduce template augments +template +static inline detail::Reader Make(const std::array& native_array) { + return detail::Reader(native_array); +} + +// Construct array writer and try to deduce template augments +template +static inline detail::Writer Make(std::array* native_array) { + return detail::Writer(native_array); +} + +// Convert little endian array to native endian +template +static inline std::array ToNative(std::array array) { + if (!ARROW_LITTLE_ENDIAN) { + std::reverse(array.begin(), array.end()); + } + return array; +} + +// Convert native endian array to little endian +template +static inline std::array FromNative(std::array array) { + return ToNative(array); +} + +} // namespace little_endian + +} // namespace bit_util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..41e268852fa6ea76ce195240498bb11277a7228c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +struct Empty { + static Result ToResult(Status s) { + if (ARROW_PREDICT_TRUE(s.ok())) { + return Empty{}; + } + return s; + } +}; + +/// Helper struct for examining lambdas and other callables. +/// TODO(ARROW-12655) support function pointers +struct call_traits { + public: + template + static std::false_type is_overloaded_impl(R(A...)); + + template + static std::false_type is_overloaded_impl(decltype(&F::operator())*); + + template + static std::true_type is_overloaded_impl(...); + + template + static R return_type_impl(R (F::*)(A...)); + + template + static R return_type_impl(R (F::*)(A...) const); + + template + static typename std::tuple_element>::type argument_type_impl( + R (F::*)(A...)); + + template + static typename std::tuple_element>::type argument_type_impl( + R (F::*)(A...) const); + + template + static typename std::tuple_element>::type argument_type_impl( + R (F::*)(A...) &&); + + template + static std::integral_constant argument_count_impl(R (F::*)(A...)); + + template + static std::integral_constant argument_count_impl(R (F::*)(A...) + const); + + template + static std::integral_constant argument_count_impl(R (F::*)(A...) &&); + + /// bool constant indicating whether F is a callable with more than one possible + /// signature. Will be true_type for objects which define multiple operator() or which + /// define a template operator() + template + using is_overloaded = + decltype(is_overloaded_impl::type>(NULLPTR)); + + template + using enable_if_overloaded = typename std::enable_if::value, T>::type; + + template + using disable_if_overloaded = + typename std::enable_if::value, T>::type; + + /// If F is not overloaded, the argument types of its call operator can be + /// extracted via call_traits::argument_type + template + using argument_type = decltype(argument_type_impl(&std::decay::type::operator())); + + template + using argument_count = decltype(argument_count_impl(&std::decay::type::operator())); + + template + using return_type = decltype(return_type_impl(&std::decay::type::operator())); + + template + using enable_if_return = + typename std::enable_if, T>::value, RT>; + + template + using enable_if_empty = typename std::enable_if::value, R>::type; + + template + using enable_if_not_empty = + typename std::enable_if::value, R>::type; +}; + +/// A type erased callable object which may only be invoked once. +/// It can be constructed from any lambda which matches the provided call signature. +/// Invoking it results in destruction of the lambda, freeing any state/references +/// immediately. Invoking a default constructed FnOnce or one which has already been +/// invoked will segfault. +template +class FnOnce; + +template +class FnOnce { + public: + FnOnce() = default; + + template ()(std::declval()...)), R>::value>::type> + FnOnce(Fn fn) : impl_(new FnImpl(std::move(fn))) { // NOLINT runtime/explicit + } + + explicit operator bool() const { return impl_ != NULLPTR; } + + R operator()(A... a) && { + auto bye = std::move(impl_); + return bye->invoke(std::forward(a)...); + } + + private: + struct Impl { + virtual ~Impl() = default; + virtual R invoke(A&&... a) = 0; + }; + + template + struct FnImpl : Impl { + explicit FnImpl(Fn fn) : fn_(std::move(fn)) {} + R invoke(A&&... a) override { return std::move(fn_)(std::forward(a)...); } + Fn fn_; + }; + + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h new file mode 100644 index 0000000000000000000000000000000000000000..2de9f4153248f0acebf4589fc492eed912a847a9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h @@ -0,0 +1,944 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Private header, not to be exported + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/builder_binary.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_builders.h" +#include "arrow/util/endian.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" + +#define XXH_INLINE_ALL + +#include "arrow/vendored/xxhash.h" // IWYU pragma: keep + +namespace arrow { +namespace internal { + +// XXX would it help to have a 32-bit hash value on large datasets? +typedef uint64_t hash_t; + +// Notes about the choice of a hash function. +// - XXH3 is extremely fast on most data sizes, from small to huge; +// faster even than HW CRC-based hashing schemes +// - our custom hash function for tiny values (< 16 bytes) is still +// significantly faster (~30%), at least on this machine and compiler + +template +inline hash_t ComputeStringHash(const void* data, int64_t length); + +/// \brief A hash function for bitmaps that can handle offsets and lengths in +/// terms of number of bits. The hash only depends on the bits actually hashed. +/// +/// It's the caller's responsibility to ensure that bits_offset + num_bits are +/// readable from the bitmap. +/// +/// \pre bits_offset >= 0 +/// \pre num_bits >= 0 +/// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap +/// +/// \param bitmap The pointer to the bitmap. +/// \param seed The seed for the hash function (useful when chaining hash functions). +/// \param bits_offset The offset in bits relative to the start of the bitmap. +/// \param num_bits The number of bits after the offset to be hashed. +ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed, + int64_t bits_offset, int64_t num_bits); + +template +struct ScalarHelperBase { + static bool CompareScalars(Scalar u, Scalar v) { return u == v; } + + static hash_t ComputeHash(const Scalar& value) { + // Generic hash computation for scalars. Simply apply the string hash + // to the bit representation of the value. + + // XXX in the case of FP values, we'd like equal values to have the same hash, + // even if they have different bit representations... + return ComputeStringHash(&value, sizeof(value)); + } +}; + +template +struct ScalarHelper : public ScalarHelperBase {}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for integers + + static hash_t ComputeHash(const Scalar& value) { + // Faster hash computation for integers. + + // Two of xxhash's prime multipliers (which are chosen for their + // bit dispersion properties) + static constexpr uint64_t multipliers[] = {11400714785074694791ULL, + 14029467366897019727ULL}; + + // Multiplying by the prime number mixes the low bits into the high bits, + // then byte-swapping (which is a single CPU instruction) allows the + // combined high and low bits to participate in the initial hash table index. + auto h = static_cast(value); + return bit_util::ByteSwap(multipliers[AlgNum] * h); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for std::string_view + + static hash_t ComputeHash(std::string_view value) { + return ComputeStringHash(value.data(), static_cast(value.size())); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for reals + + static bool CompareScalars(Scalar u, Scalar v) { + if (std::isnan(u)) { + // XXX should we do a bit-precise comparison? + return std::isnan(v); + } + return u == v; + } +}; + +template +hash_t ComputeStringHash(const void* data, int64_t length) { + if (ARROW_PREDICT_TRUE(length <= 16)) { + // Specialize for small hash strings, as they are quite common as + // hash table keys. Even XXH3 isn't quite as fast. + auto p = reinterpret_cast(data); + auto n = static_cast(length); + if (n <= 8) { + if (n <= 3) { + if (n == 0) { + return 1U; + } + uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1]; + return ScalarHelper::ComputeHash(x); + } + // 4 <= length <= 8 + // We can read the string as two overlapping 32-bit ints, apply + // different hash functions to each of them in parallel, then XOR + // the results + uint32_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 4); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + // 8 <= length <= 16 + // Apply the same principle as above + uint64_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 8); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + +#if XXH3_SECRET_SIZE_MIN != 136 +#error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets +#endif + + // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow. + // Instead, we use hard-coded random secrets. To maximize cache efficiency, + // they reuse the same memory area. + static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = { + 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f, + 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24, + 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26, + 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75, + 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce, + 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3, + 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42, + 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1, + 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5, + 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87}; + + static_assert(AlgNum < 2, "AlgNum too large"); + static constexpr auto secret = kXxh3Secrets + AlgNum; + return XXH3_64bits_withSecret(data, static_cast(length), secret, + XXH3_SECRET_SIZE_MIN); +} + +// XXX add a HashEq struct with both hash and compare functions? + +// ---------------------------------------------------------------------- +// An open-addressing insert-only hash table (no deletes) + +template +class HashTable { + public: + static constexpr hash_t kSentinel = 0ULL; + static constexpr int64_t kLoadFactor = 2UL; + + struct Entry { + hash_t h; + Payload payload; + + // An entry is valid if the hash is different from the sentinel value + operator bool() const { return h != kSentinel; } + }; + + HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) { + DCHECK_NE(pool, nullptr); + // Minimum of 32 elements + capacity = std::max(capacity, 32UL); + capacity_ = bit_util::NextPower2(capacity); + capacity_mask_ = capacity_ - 1; + size_ = 0; + + DCHECK_OK(UpsizeBuffer(capacity_)); + } + + // Lookup with non-linear probing + // cmp_func should have signature bool(const Payload*). + // Return a (Entry*, found) pair. + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) const { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + Status Insert(Entry* entry, hash_t h, const Payload& payload) { + // Ensure entry is empty before inserting + assert(!*entry); + entry->h = FixHash(h); + entry->payload = payload; + ++size_; + + if (ARROW_PREDICT_FALSE(NeedUpsizing())) { + // Resize less frequently since it is expensive + return Upsize(capacity_ * kLoadFactor * 2); + } + return Status::OK(); + } + + uint64_t size() const { return size_; } + + // Visit all non-empty entries in the table + // The visit_func should have signature void(const Entry*) + template + void VisitEntries(VisitFunc&& visit_func) const { + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = entries_[i]; + if (entry) { + visit_func(&entry); + } + } + } + + protected: + // NoCompare is for when the value is known not to exist in the table + enum CompareKind { DoCompare, NoCompare }; + + // The workhorse lookup function + template + std::pair Lookup(hash_t h, const Entry* entries, uint64_t size_mask, + CmpFunc&& cmp_func) const { + static constexpr uint8_t perturb_shift = 5; + + uint64_t index, perturb; + const Entry* entry; + + h = FixHash(h); + index = h & size_mask; + perturb = (h >> perturb_shift) + 1U; + + while (true) { + entry = &entries[index]; + if (CompareEntry(h, entry, std::forward(cmp_func))) { + // Found + return {index, true}; + } + if (entry->h == kSentinel) { + // Empty slot + return {index, false}; + } + + // Perturbation logic inspired from CPython's set / dict object. + // The goal is that all 64 bits of the unmasked hash value eventually + // participate in the probing sequence, to minimize clustering. + index = (index + perturb) & size_mask; + perturb = (perturb >> perturb_shift) + 1U; + } + } + + template + bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const { + if (CKind == NoCompare) { + return false; + } else { + return entry->h == h && cmp_func(&entry->payload); + } + } + + bool NeedUpsizing() const { + // Keep the load factor <= 1/2 + return size_ * kLoadFactor >= capacity_; + } + + Status UpsizeBuffer(uint64_t capacity) { + RETURN_NOT_OK(entries_builder_.Resize(capacity)); + entries_ = entries_builder_.mutable_data(); + memset(static_cast(entries_), 0, capacity * sizeof(Entry)); + + return Status::OK(); + } + + Status Upsize(uint64_t new_capacity) { + assert(new_capacity > capacity_); + uint64_t new_mask = new_capacity - 1; + assert((new_capacity & new_mask) == 0); // it's a power of two + + // Stash old entries and seal builder, effectively resetting the Buffer + const Entry* old_entries = entries_; + ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_)); + // Allocate new buffer + RETURN_NOT_OK(UpsizeBuffer(new_capacity)); + + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = old_entries[i]; + if (entry) { + // Dummy compare function will not be called + auto p = Lookup(entry.h, entries_, new_mask, + [](const Payload*) { return false; }); + // Lookup (and CompareEntry) ensure that an + // empty slots is always returned + assert(!p.second); + entries_[p.first] = entry; + } + } + capacity_ = new_capacity; + capacity_mask_ = new_mask; + + return Status::OK(); + } + + hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; } + + // The number of slots available in the hash table array. + uint64_t capacity_; + uint64_t capacity_mask_; + // The number of used slots in the hash table array. + uint64_t size_; + + Entry* entries_; + TypedBufferBuilder entries_builder_; +}; + +// XXX typedef memo_index_t int32_t ? + +constexpr int32_t kKeyNotFound = -1; + +// ---------------------------------------------------------------------- +// A base class for memoization table. + +class MemoTable { + public: + virtual ~MemoTable() = default; + + virtual int32_t size() const = 0; +}; + +// ---------------------------------------------------------------------- +// A memoization table for memory-cheap scalar values. + +// The memoization table remembers and allows to look up the insertion +// index for each key. + +template class HashTableTemplateType = HashTable> +class ScalarMemoTable : public MemoTable { + public: + explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0) + : hash_table_(pool, static_cast(entries)) {} + + int32_t Get(const Scalar& value) const { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(payload->value, value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + template + Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(value, payload->value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index})); + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index != kKeyNotFound) { + on_found(memo_index); + } else { + null_index_ = memo_index = size(); + on_not_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table +1 if null was added. + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size()) + (GetNull() != kKeyNotFound); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + hash_table_.VisitEntries([=](const HashTableEntry* entry) { + int32_t index = entry->payload.memo_index - start; + if (index >= 0) { + out_data[index] = entry->payload.value; + } + }); + // Zero-initialize the null entry + if (null_index_ != kKeyNotFound) { + int32_t index = null_index_ - start; + if (index >= 0) { + out_data[index] = Scalar{}; + } + } + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + protected: + struct Payload { + Scalar value; + int32_t memo_index; + }; + + using HashTableType = HashTableTemplateType; + using HashTableEntry = typename HashTableType::Entry; + HashTableType hash_table_; + int32_t null_index_ = kKeyNotFound; + + hash_t ComputeHash(const Scalar& value) const { + return ScalarHelper::ComputeHash(value); + } + + public: + // defined here so that `HashTableType` is visible + // Merge entries from `other_table` into `this->hash_table_`. + Status MergeTable(const ScalarMemoTable& other_table) { + const HashTableType& other_hashtable = other_table.hash_table_; + + other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused)); + }); + // TODO: ARROW-17074 - implement proper error handling + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// A memoization table for small scalar values, using direct indexing + +template +struct SmallScalarTraits {}; + +template <> +struct SmallScalarTraits { + static constexpr int32_t cardinality = 2; + + static uint32_t AsIndex(bool value) { return value ? 1 : 0; } +}; + +template +struct SmallScalarTraits::value>> { + using Unsigned = typename std::make_unsigned::type; + + static constexpr int32_t cardinality = 1U + std::numeric_limits::max(); + + static uint32_t AsIndex(Scalar value) { return static_cast(value); } +}; + +template class HashTableTemplateType = HashTable> +class SmallScalarMemoTable : public MemoTable { + public: + explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) { + std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound); + index_to_value_.reserve(cardinality); + } + + int32_t Get(const Scalar value) const { + auto value_index = AsIndex(value); + return value_to_index_[value_index]; + } + + template + Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto value_index = AsIndex(value); + auto memo_index = value_to_index_[value_index]; + if (memo_index == kKeyNotFound) { + memo_index = static_cast(index_to_value_.size()); + index_to_value_.push_back(value); + value_to_index_[value_index] = memo_index; + DCHECK_LT(memo_index, cardinality + 1); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return value_to_index_[cardinality]; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + auto memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = value_to_index_[cardinality] = size(); + index_to_value_.push_back(0); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { return static_cast(index_to_value_.size()); } + + // Merge entries from `other_table` into `this`. + Status MergeTable(const SmallScalarMemoTable& other_table) { + for (const Scalar& other_val : other_table.index_to_value_) { + int32_t unused; + RETURN_NOT_OK(this->GetOrInsert(other_val, &unused)); + } + return Status::OK(); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + DCHECK_GE(start, 0); + DCHECK_LE(static_cast(start), index_to_value_.size()); + int64_t offset = start * static_cast(sizeof(Scalar)); + memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar)); + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + const std::vector& values() const { return index_to_value_; } + + protected: + static constexpr auto cardinality = SmallScalarTraits::cardinality; + static_assert(cardinality <= 256, "cardinality too large for direct-addressed table"); + + uint32_t AsIndex(Scalar value) const { + return SmallScalarTraits::AsIndex(value); + } + + // The last index is reserved for the null element. + int32_t value_to_index_[cardinality + 1]; + std::vector index_to_value_; +}; + +// ---------------------------------------------------------------------- +// A memoization table for variable-sized binary data. + +template +class BinaryMemoTable : public MemoTable { + public: + using builder_offset_type = typename BinaryBuilderT::offset_type; + explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0, + int64_t values_size = -1) + : hash_table_(pool, static_cast(entries)), binary_builder_(pool) { + const int64_t data_size = (values_size < 0) ? entries * 4 : values_size; + DCHECK_OK(binary_builder_.Resize(entries)); + DCHECK_OK(binary_builder_.ReserveData(data_size)); + } + + int32_t Get(const void* data, builder_offset_type length) const { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + int32_t Get(std::string_view value) const { + return Get(value.data(), static_cast(value.length())); + } + + template + Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found, + Func2&& on_not_found, int32_t* out_memo_index) { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + // Insert string value + RETURN_NOT_OK(binary_builder_.Append(static_cast(data), length)); + // Insert hash entry + RETURN_NOT_OK( + hash_table_.Insert(const_cast(p.first), h, {memo_index})); + + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + template + Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + std::forward(on_found), std::forward(on_not_found), + out_memo_index); + } + + Status GetOrInsert(const void* data, builder_offset_type length, + int32_t* out_memo_index) { + return GetOrInsert( + data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + Status GetOrInsert(std::string_view value, int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = null_index_ = size(); + DCHECK_OK(binary_builder_.AppendNull()); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size() + (GetNull() != kKeyNotFound)); + } + + int64_t values_size() const { return binary_builder_.value_data_length(); } + + // Copy (n + 1) offsets starting from index `start` into `out_data` + template + void CopyOffsets(int32_t start, Offset* out_data) const { + DCHECK_LE(start, size()); + + const builder_offset_type* offsets = binary_builder_.offsets_data(); + const builder_offset_type delta = + start < binary_builder_.length() ? offsets[start] : 0; + for (int32_t i = start; i < size(); ++i) { + const builder_offset_type adjusted_offset = offsets[i] - delta; + Offset cast_offset = static_cast(adjusted_offset); + assert(static_cast(cast_offset) == + adjusted_offset); // avoid truncation + *out_data++ = cast_offset; + } + + // Copy last value since BinaryBuilder only materializes it on in Finish() + *out_data = static_cast(binary_builder_.value_data_length() - delta); + } + + template + void CopyOffsets(Offset* out_data) const { + CopyOffsets(0, out_data); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, uint8_t* out_data) const { + CopyValues(start, -1, out_data); + } + + // Same as above, but check output size in debug mode + void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const { + DCHECK_LE(start, size()); + + // The absolute byte offset of `start` value in the binary buffer. + const builder_offset_type offset = binary_builder_.offset(start); + const auto length = binary_builder_.value_data_length() - static_cast(offset); + + if (out_size != -1) { + assert(static_cast(length) <= out_size); + } + + auto view = binary_builder_.GetView(start); + memcpy(out_data, view.data(), length); + } + + void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); } + + void CopyValues(int64_t out_size, uint8_t* out_data) const { + CopyValues(0, out_size, out_data); + } + + void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size, + uint8_t* out_data) const { + // This method exists to cope with the fact that the BinaryMemoTable does + // not know the fixed width when inserting the null value. The data + // buffer hold a zero length string for the null value (if found). + // + // Thus, the method will properly inject an empty value of the proper width + // in the output buffer. + // + if (start >= size()) { + return; + } + + int32_t null_index = GetNull(); + if (null_index < start) { + // Nothing to skip, proceed as usual. + CopyValues(start, out_size, out_data); + return; + } + + builder_offset_type left_offset = binary_builder_.offset(start); + + // Ensure that the data length is exactly missing width_size bytes to fit + // in the expected output (n_values * width_size). +#ifndef NDEBUG + int64_t data_length = values_size() - static_cast(left_offset); + assert(data_length + width_size == out_size); + ARROW_UNUSED(data_length); +#endif + + auto in_data = binary_builder_.value_data() + left_offset; + // The null use 0-length in the data, slice the data in 2 and skip by + // width_size in out_data. [part_1][width_size][part_2] + auto null_data_offset = binary_builder_.offset(null_index); + auto left_size = null_data_offset - left_offset; + if (left_size > 0) { + memcpy(out_data, in_data + left_offset, left_size); + } + // Zero-initialize the null entry + memset(out_data + left_size, 0, width_size); + + auto right_size = values_size() - static_cast(null_data_offset); + if (right_size > 0) { + // skip the null fixed size value. + auto out_offset = left_size + width_size; + assert(out_data + out_offset + right_size == out_data + out_size); + memcpy(out_data + out_offset, in_data + null_data_offset, right_size); + } + } + + // Visit the stored values in insertion order. + // The visitor function should have the signature `void(std::string_view)` + // or `void(const std::string_view&)`. + template + void VisitValues(int32_t start, VisitFunc&& visit) const { + for (int32_t i = start; i < size(); ++i) { + visit(binary_builder_.GetView(i)); + } + } + + protected: + struct Payload { + int32_t memo_index; + }; + + using HashTableType = HashTable; + using HashTableEntry = typename HashTable::Entry; + HashTableType hash_table_; + BinaryBuilderT binary_builder_; + + int32_t null_index_ = kKeyNotFound; + + std::pair Lookup(hash_t h, const void* data, + builder_offset_type length) const { + auto cmp_func = [&](const Payload* payload) { + std::string_view lhs = binary_builder_.GetView(payload->memo_index); + std::string_view rhs(static_cast(data), length); + return lhs == rhs; + }; + return hash_table_.Lookup(h, cmp_func); + } + + public: + Status MergeTable(const BinaryMemoTable& other_table) { + other_table.VisitValues(0, [this](std::string_view other_value) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_value, &unused)); + }); + return Status::OK(); + } +}; + +template +struct HashTraits {}; + +template <> +struct HashTraits { + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits> { + using c_type = typename T::c_type; + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits::value && !is_8bit_int::value>> { + using c_type = typename T::c_type; + using MemoTableType = ScalarMemoTable; +}; + +template +struct HashTraits::value && + !std::is_base_of::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table, + int64_t start_offset, int64_t* null_count, + std::shared_ptr* null_bitmap) { + int64_t dict_length = static_cast(memo_table.size()) - start_offset; + int64_t null_index = memo_table.GetNull(); + + *null_count = 0; + *null_bitmap = nullptr; + + if (null_index != kKeyNotFound && null_index >= start_offset) { + null_index -= start_offset; + *null_count = 1; + ARROW_ASSIGN_OR_RAISE(*null_bitmap, + internal::BitmapAllButOne(pool, dict_length, null_index)); + } + + return Status::OK(); +} + +struct StringViewHash { + // std::hash compatible hasher for use with std::unordered_* + // (the std::hash specialization provided by nonstd constructs std::string + // temporaries then invokes std::hash against those) + hash_t operator()(std::string_view value) const { + return ComputeStringHash<0>(value.data(), static_cast(value.size())); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h new file mode 100644 index 0000000000000000000000000000000000000000..59a2ac7109a3c08b4cd265f88b7ca0ecffe5ae9d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" + +#include "arrow/util/visibility.h" + +namespace arrow { + +class DataType; +struct ArraySpan; +struct Scalar; + +namespace internal { + +ARROW_EXPORT +uint8_t DetectUIntWidth(const uint64_t* values, int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectUIntWidth(const uint64_t* values, const uint8_t* valid_bytes, + int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectIntWidth(const int64_t* values, int64_t length, uint8_t min_width = 1); + +ARROW_EXPORT +uint8_t DetectIntWidth(const int64_t* values, const uint8_t* valid_bytes, int64_t length, + uint8_t min_width = 1); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int8_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int16_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int32_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastInts(const int64_t* source, int64_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint8_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint16_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint32_t* dest, int64_t length); + +ARROW_EXPORT +void DowncastUInts(const uint64_t* source, uint64_t* dest, int64_t length); + +ARROW_EXPORT +void UpcastInts(const int32_t* source, int64_t* dest, int64_t length); + +template +inline typename std::enable_if<(sizeof(InputInt) >= sizeof(OutputInt))>::type CastInts( + const InputInt* source, OutputInt* dest, int64_t length) { + DowncastInts(source, dest, length); +} + +template +inline typename std::enable_if<(sizeof(InputInt) < sizeof(OutputInt))>::type CastInts( + const InputInt* source, OutputInt* dest, int64_t length) { + UpcastInts(source, dest, length); +} + +template +ARROW_EXPORT void TransposeInts(const InputInt* source, OutputInt* dest, int64_t length, + const int32_t* transpose_map); + +ARROW_EXPORT +Status TransposeInts(const DataType& src_type, const DataType& dest_type, + const uint8_t* src, uint8_t* dest, int64_t src_offset, + int64_t dest_offset, int64_t length, const int32_t* transpose_map); + +/// \brief Do vectorized boundschecking of integer-type array indices. The +/// indices must be nonnegative and strictly less than the passed upper +/// limit (which is usually the length of an array that is being indexed-into). +ARROW_EXPORT +Status CheckIndexBounds(const ArraySpan& values, uint64_t upper_limit); + +/// \brief Boundscheck integer values to determine if they are all between the +/// passed upper and lower limits (inclusive). Upper and lower bounds must be +/// the same type as the data and are not currently casted. +ARROW_EXPORT +Status CheckIntegersInRange(const ArraySpan& values, const Scalar& bound_lower, + const Scalar& bound_upper); + +/// \brief Use CheckIntegersInRange to determine whether the passed integers +/// can fit safely in the passed integer type. This helps quickly determine if +/// integer narrowing (e.g. int64->int32) is safe to do. +ARROW_EXPORT +Status IntegersCanFit(const ArraySpan& values, const DataType& target_type); + +/// \brief Convenience for boundschecking a single Scalar value +ARROW_EXPORT +Status IntegersCanFit(const Scalar& value, const DataType& target_type); + +/// Upcast an integer to the largest possible width (currently 64 bits) + +template +typename std::enable_if< + std::is_integral::value && std::is_signed::value, int64_t>::type +UpcastInt(Integer v) { + return v; +} + +template +typename std::enable_if< + std::is_integral::value && std::is_unsigned::value, uint64_t>::type +UpcastInt(Integer v) { + return v; +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h new file mode 100644 index 0000000000000000000000000000000000000000..113b1bdd93103bfc3573c41f5b4c4f8b18bdbbbe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h @@ -0,0 +1,420 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifndef _WIN32 +#define ARROW_HAVE_SIGACTION 1 +#endif + +#include +#include +#include +#include +#include + +#if ARROW_HAVE_SIGACTION +#include // Needed for struct sigaction +#endif + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow { +namespace internal { + +// NOTE: 8-bit path strings on Windows are encoded using UTF-8. +// Using MBCS would fail encoding some paths. + +#if defined(_WIN32) +using NativePathString = std::wstring; +#else +using NativePathString = std::string; +#endif + +class ARROW_EXPORT PlatformFilename { + public: + struct Impl; + + ~PlatformFilename(); + PlatformFilename(); + PlatformFilename(const PlatformFilename&); + PlatformFilename(PlatformFilename&&); + PlatformFilename& operator=(const PlatformFilename&); + PlatformFilename& operator=(PlatformFilename&&); + explicit PlatformFilename(NativePathString path); + explicit PlatformFilename(const NativePathString::value_type* path); + + const NativePathString& ToNative() const; + std::string ToString() const; + + PlatformFilename Parent() const; + Result Real() const; + + // These functions can fail for character encoding reasons. + static Result FromString(std::string_view file_name); + Result Join(std::string_view child_name) const; + + PlatformFilename Join(const PlatformFilename& child_name) const; + + bool operator==(const PlatformFilename& other) const; + bool operator!=(const PlatformFilename& other) const; + + // Made public to avoid the proliferation of friend declarations. + const Impl* impl() const { return impl_.get(); } + + private: + std::unique_ptr impl_; + + explicit PlatformFilename(Impl impl); +}; + +/// Create a directory if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDir(const PlatformFilename& dir_path); + +/// Create a directory and its parents if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDirTree(const PlatformFilename& dir_path); + +/// Delete a directory's contents (but not the directory itself) if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirContents(const PlatformFilename& dir_path, + bool allow_not_found = true); + +/// Delete a directory tree if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true); + +// Non-recursively list the contents of the given directory. +// The returned names are the children's base names, not including dir_path. +ARROW_EXPORT +Result> ListDir(const PlatformFilename& dir_path); + +/// Delete a file if it exists. +/// +/// Return whether the file existed. +ARROW_EXPORT +Result DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true); + +/// Return whether a file exists. +ARROW_EXPORT +Result FileExists(const PlatformFilename& path); + +// TODO expose this more publicly to make it available from io/file.h? +/// A RAII wrapper for a file descriptor. +/// +/// The underlying file descriptor is automatically closed on destruction. +/// Moving is supported with well-defined semantics. +/// Furthermore, closing is idempotent. +class ARROW_EXPORT FileDescriptor { + public: + FileDescriptor() = default; + explicit FileDescriptor(int fd) : fd_(fd) {} + FileDescriptor(FileDescriptor&&); + FileDescriptor& operator=(FileDescriptor&&); + + ~FileDescriptor(); + + Status Close(); + + /// May return -1 if closed or default-initialized + int fd() const { return fd_.load(); } + + /// Detach and return the underlying file descriptor + int Detach(); + + bool closed() const { return fd_.load() == -1; } + + protected: + static void CloseFromDestructor(int fd); + + std::atomic fd_{-1}; +}; + +/// Open a file for reading and return a file descriptor. +ARROW_EXPORT +Result FileOpenReadable(const PlatformFilename& file_name); + +/// Open a file for writing and return a file descriptor. +ARROW_EXPORT +Result FileOpenWritable(const PlatformFilename& file_name, + bool write_only = true, bool truncate = true, + bool append = false); + +/// Read from current file position. Return number of bytes read. +ARROW_EXPORT +Result FileRead(int fd, uint8_t* buffer, int64_t nbytes); +/// Read from given file position. Return number of bytes read. +ARROW_EXPORT +Result FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes); + +ARROW_EXPORT +Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes); +ARROW_EXPORT +Status FileTruncate(int fd, const int64_t size); + +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos); +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos, int whence); +ARROW_EXPORT +Result FileTell(int fd); +ARROW_EXPORT +Result FileGetSize(int fd); + +ARROW_EXPORT +Status FileClose(int fd); + +struct Pipe { + FileDescriptor rfd; + FileDescriptor wfd; + + Status Close() { return rfd.Close() & wfd.Close(); } +}; + +ARROW_EXPORT +Result CreatePipe(); + +ARROW_EXPORT +Status SetPipeFileDescriptorNonBlocking(int fd); + +class ARROW_EXPORT SelfPipe { + public: + static Result> Make(bool signal_safe); + virtual ~SelfPipe(); + + /// \brief Wait for a wakeup. + /// + /// Status::Invalid is returned if the pipe has been shutdown. + /// Otherwise the next sent payload is returned. + virtual Result Wait() = 0; + + /// \brief Wake up the pipe by sending a payload. + /// + /// This method is async-signal-safe if `signal_safe` was set to true. + virtual void Send(uint64_t payload) = 0; + + /// \brief Wake up the pipe and shut it down. + virtual Status Shutdown() = 0; +}; + +ARROW_EXPORT +int64_t GetPageSize(); + +struct MemoryRegion { + void* addr; + size_t size; +}; + +ARROW_EXPORT +Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes, + void** new_addr); +ARROW_EXPORT +Status MemoryAdviseWillNeed(const std::vector& regions); + +ARROW_EXPORT +Result GetEnvVar(const char* name); +ARROW_EXPORT +Result GetEnvVar(const std::string& name); +ARROW_EXPORT +Result GetEnvVarNative(const char* name); +ARROW_EXPORT +Result GetEnvVarNative(const std::string& name); + +ARROW_EXPORT +Status SetEnvVar(const char* name, const char* value); +ARROW_EXPORT +Status SetEnvVar(const std::string& name, const std::string& value); +ARROW_EXPORT +Status DelEnvVar(const char* name); +ARROW_EXPORT +Status DelEnvVar(const std::string& name); + +ARROW_EXPORT +std::string ErrnoMessage(int errnum); +#if _WIN32 +ARROW_EXPORT +std::string WinErrorMessage(int errnum); +#endif + +ARROW_EXPORT +std::shared_ptr StatusDetailFromErrno(int errnum); +#if _WIN32 +ARROW_EXPORT +std::shared_ptr StatusDetailFromWinError(int errnum); +#endif +ARROW_EXPORT +std::shared_ptr StatusDetailFromSignal(int signum); + +template +Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromErrno(int errnum, Args&&... args) { + return StatusFromErrno(errnum, StatusCode::IOError, std::forward(args)...); +} + +#if _WIN32 +template +Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromWinError(int errnum, Args&&... args) { + return StatusFromWinError(errnum, StatusCode::IOError, std::forward(args)...); +} +#endif + +template +Status StatusFromSignal(int signum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum), + std::forward(args)...); +} + +template +Status CancelledFromSignal(int signum, Args&&... args) { + return StatusFromSignal(signum, StatusCode::Cancelled, std::forward(args)...); +} + +ARROW_EXPORT +int ErrnoFromStatus(const Status&); + +// Always returns 0 on non-Windows platforms (for Python). +ARROW_EXPORT +int WinErrorFromStatus(const Status&); + +ARROW_EXPORT +int SignalFromStatus(const Status&); + +class ARROW_EXPORT TemporaryDir { + public: + ~TemporaryDir(); + + /// '/'-terminated path to the temporary dir + const PlatformFilename& path() { return path_; } + + /// Create a temporary subdirectory in the system temporary dir, + /// named starting with `prefix`. + static Result> Make(const std::string& prefix); + + private: + PlatformFilename path_; + + explicit TemporaryDir(PlatformFilename&&); +}; + +class ARROW_EXPORT SignalHandler { + public: + typedef void (*Callback)(int); + + SignalHandler(); + explicit SignalHandler(Callback cb); +#if ARROW_HAVE_SIGACTION + explicit SignalHandler(const struct sigaction& sa); +#endif + + Callback callback() const; +#if ARROW_HAVE_SIGACTION + const struct sigaction& action() const; +#endif + + protected: +#if ARROW_HAVE_SIGACTION + // Storing the full sigaction allows to restore the entire signal handling + // configuration. + struct sigaction sa_; +#else + Callback cb_; +#endif +}; + +/// \brief Return the current handler for the given signal number. +ARROW_EXPORT +Result GetSignalHandler(int signum); + +/// \brief Set a new handler for the given signal number. +/// +/// The old signal handler is returned. +ARROW_EXPORT +Result SetSignalHandler(int signum, const SignalHandler& handler); + +/// \brief Reinstate the signal handler +/// +/// For use in signal handlers. This is needed on platforms without sigaction() +/// such as Windows, as the default signal handler is restored there as +/// soon as a signal is raised. +ARROW_EXPORT +void ReinstateSignalHandler(int signum, SignalHandler::Callback handler); + +/// \brief Send a signal to the current process +/// +/// The thread which will receive the signal is unspecified. +ARROW_EXPORT +Status SendSignal(int signum); + +/// \brief Send a signal to the given thread +/// +/// This function isn't supported on Windows. +ARROW_EXPORT +Status SendSignalToThread(int signum, uint64_t thread_id); + +/// \brief Get an unpredictable random seed +/// +/// This function may be slightly costly, so should only be used to initialize +/// a PRNG, not to generate a large amount of random numbers. +/// It is better to use this function rather than std::random_device, unless +/// absolutely necessary (e.g. to generate a cryptographic secret). +ARROW_EXPORT +int64_t GetRandomSeed(); + +/// \brief Get the current thread id +/// +/// In addition to having the same properties as std::thread, the returned value +/// is a regular integer value, which is more convenient than an opaque type. +ARROW_EXPORT +uint64_t GetThreadId(); + +/// \brief Get the current memory used by the current process in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetCurrentRSS(); + +/// \brief Get the total memory available to the system in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetTotalMemoryBytes(); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/key_value_metadata.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/key_value_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..57ade11e758684777fc8e2828c9c3d1b9deb0bee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/key_value_metadata.h @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \brief A container for key-value pair type metadata. Not thread-safe +class ARROW_EXPORT KeyValueMetadata { + public: + KeyValueMetadata(); + KeyValueMetadata(std::vector keys, std::vector values); + explicit KeyValueMetadata(const std::unordered_map& map); + + static std::shared_ptr Make(std::vector keys, + std::vector values); + + void ToUnorderedMap(std::unordered_map* out) const; + void Append(std::string key, std::string value); + + Result Get(std::string_view key) const; + bool Contains(std::string_view key) const; + // Note that deleting may invalidate known indices + Status Delete(std::string_view key); + Status Delete(int64_t index); + Status DeleteMany(std::vector indices); + Status Set(std::string key, std::string value); + + void reserve(int64_t n); + + int64_t size() const; + const std::string& key(int64_t i) const; + const std::string& value(int64_t i) const; + const std::vector& keys() const { return keys_; } + const std::vector& values() const { return values_; } + + std::vector> sorted_pairs() const; + + /// \brief Perform linear search for key, returning -1 if not found + int FindKey(std::string_view key) const; + + std::shared_ptr Copy() const; + + /// \brief Return a new KeyValueMetadata by combining the passed metadata + /// with this KeyValueMetadata. Colliding keys will be overridden by the + /// passed metadata. Assumes keys in both containers are unique + std::shared_ptr Merge(const KeyValueMetadata& other) const; + + bool Equals(const KeyValueMetadata& other) const; + std::string ToString() const; + + private: + std::vector keys_; + std::vector values_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(KeyValueMetadata); +}; + +/// \brief Create a KeyValueMetadata instance +/// +/// \param pairs key-value mapping +ARROW_EXPORT std::shared_ptr key_value_metadata( + const std::unordered_map& pairs); + +/// \brief Create a KeyValueMetadata instance +/// +/// \param keys sequence of metadata keys +/// \param values sequence of corresponding metadata values +ARROW_EXPORT std::shared_ptr key_value_metadata( + std::vector keys, std::vector values); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h new file mode 100644 index 0000000000000000000000000000000000000000..9e4533c4b4760a416b0aca4b91c32ffd324d7f08 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +#if __cpp_lib_launder +using std::launder; +#else +template +constexpr T* launder(T* p) noexcept { + return p; +} +#endif + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..2baa560563bb4e8ac798a9ce5e8f4d392b40ec5f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h @@ -0,0 +1,259 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef GANDIVA_IR + +// The LLVM IR code doesn't have an NDEBUG mode. And, it shouldn't include references to +// streams or stdc++. So, making the DCHECK calls void in that case. + +#define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +#define DCHECK(condition) ARROW_IGNORE_EXPR(condition) +#define DCHECK_OK(status) ARROW_IGNORE_EXPR(status) +#define DCHECK_EQ(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_NE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_LE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_LT(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_GE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_GT(val1, val2) ARROW_IGNORE_EXPR(val1) + +#else // !GANDIVA_IR + +#include +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +enum class ArrowLogLevel : int { + ARROW_DEBUG = -1, + ARROW_INFO = 0, + ARROW_WARNING = 1, + ARROW_ERROR = 2, + ARROW_FATAL = 3 +}; + +#define ARROW_LOG_INTERNAL(level) ::arrow::util::ArrowLog(__FILE__, __LINE__, level) +#define ARROW_LOG(level) ARROW_LOG_INTERNAL(::arrow::util::ArrowLogLevel::ARROW_##level) + +#define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +#define ARROW_CHECK_OR_LOG(condition, level) \ + ARROW_PREDICT_TRUE(condition) \ + ? ARROW_IGNORE_EXPR(0) \ + : ::arrow::util::Voidify() & ARROW_LOG(level) << " Check failed: " #condition " " + +#define ARROW_CHECK(condition) ARROW_CHECK_OR_LOG(condition, FATAL) + +// If 'to_call' returns a bad status, CHECK immediately with a logged message +// of 'msg' followed by the status. +#define ARROW_CHECK_OK_PREPEND(to_call, msg, level) \ + do { \ + ::arrow::Status _s = (to_call); \ + ARROW_CHECK_OR_LOG(_s.ok(), level) \ + << "Operation failed: " << ARROW_STRINGIFY(to_call) << "\n" \ + << (msg) << ": " << _s.ToString(); \ + } while (false) + +// If the status is bad, CHECK immediately, appending the status to the +// logged message. +#define ARROW_CHECK_OK(s) ARROW_CHECK_OK_PREPEND(s, "Bad status", FATAL) + +#define ARROW_CHECK_EQ(val1, val2) ARROW_CHECK((val1) == (val2)) +#define ARROW_CHECK_NE(val1, val2) ARROW_CHECK((val1) != (val2)) +#define ARROW_CHECK_LE(val1, val2) ARROW_CHECK((val1) <= (val2)) +#define ARROW_CHECK_LT(val1, val2) ARROW_CHECK((val1) < (val2)) +#define ARROW_CHECK_GE(val1, val2) ARROW_CHECK((val1) >= (val2)) +#define ARROW_CHECK_GT(val1, val2) ARROW_CHECK((val1) > (val2)) + +#ifdef NDEBUG +#define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_WARNING + +// CAUTION: DCHECK_OK() always evaluates its argument, but other DCHECK*() macros +// only do so in debug mode. + +#define ARROW_DCHECK(condition) \ + while (false) ARROW_IGNORE_EXPR(condition); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_OK(s) \ + ARROW_IGNORE_EXPR(s); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_EQ(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_NE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_LE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_LT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_GE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_GT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() + +#else +#define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_FATAL + +#define ARROW_DCHECK ARROW_CHECK +#define ARROW_DCHECK_OK ARROW_CHECK_OK +#define ARROW_DCHECK_EQ ARROW_CHECK_EQ +#define ARROW_DCHECK_NE ARROW_CHECK_NE +#define ARROW_DCHECK_LE ARROW_CHECK_LE +#define ARROW_DCHECK_LT ARROW_CHECK_LT +#define ARROW_DCHECK_GE ARROW_CHECK_GE +#define ARROW_DCHECK_GT ARROW_CHECK_GT + +#endif // NDEBUG + +#define DCHECK ARROW_DCHECK +#define DCHECK_OK ARROW_DCHECK_OK +#define DCHECK_EQ ARROW_DCHECK_EQ +#define DCHECK_NE ARROW_DCHECK_NE +#define DCHECK_LE ARROW_DCHECK_LE +#define DCHECK_LT ARROW_DCHECK_LT +#define DCHECK_GE ARROW_DCHECK_GE +#define DCHECK_GT ARROW_DCHECK_GT + +// This code is adapted from +// https://github.com/ray-project/ray/blob/master/src/ray/util/logging.h. + +// To make the logging lib pluggable with other logging libs and make +// the implementation unawared by the user, ArrowLog is only a declaration +// which hide the implementation into logging.cc file. +// In logging.cc, we can choose different log libs using different macros. + +// This is also a null log which does not output anything. +class ARROW_EXPORT ArrowLogBase { + public: + virtual ~ArrowLogBase() {} + + virtual bool IsEnabled() const { return false; } + + template + ArrowLogBase& operator<<(const T& t) { + if (IsEnabled()) { + Stream() << t; + } + return *this; + } + + protected: + virtual std::ostream& Stream() = 0; +}; + +class ARROW_EXPORT ArrowLog : public ArrowLogBase { + public: + ArrowLog(const char* file_name, int line_number, ArrowLogLevel severity); + ~ArrowLog() override; + + /// Return whether or not current logging instance is enabled. + /// + /// \return True if logging is enabled and false otherwise. + bool IsEnabled() const override; + + /// The init function of arrow log for a program which should be called only once. + /// + /// \param appName The app name which starts the log. + /// \param severity_threshold Logging threshold for the program. + /// \param logDir Logging output file name. If empty, the log won't output to file. + static void StartArrowLog(const std::string& appName, + ArrowLogLevel severity_threshold = ArrowLogLevel::ARROW_INFO, + const std::string& logDir = ""); + + /// The shutdown function of arrow log, it should be used with StartArrowLog as a pair. + static void ShutDownArrowLog(); + + /// Install the failure signal handler to output call stack when crash. + /// If glog is not installed, this function won't do anything. + static void InstallFailureSignalHandler(); + + /// Uninstall the signal actions installed by InstallFailureSignalHandler. + static void UninstallSignalAction(); + + /// Return whether or not the log level is enabled in current setting. + /// + /// \param log_level The input log level to test. + /// \return True if input log level is not lower than the threshold. + static bool IsLevelEnabled(ArrowLogLevel log_level); + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrowLog); + + // Hide the implementation of log provider by void *. + // Otherwise, lib user may define the same macro to use the correct header file. + void* logging_provider_; + /// True if log messages should be logged and false if they should be ignored. + bool is_enabled_; + + static ArrowLogLevel severity_threshold_; + + protected: + std::ostream& Stream() override; +}; + +// This class make ARROW_CHECK compilation pass to change the << operator to void. +// This class is copied from glog. +class ARROW_EXPORT Voidify { + public: + Voidify() {} + // This has to be an operator with a precedence lower than << but + // higher than ?: + void operator&(ArrowLogBase&) {} +}; + +namespace detail { + +/// @brief A helper for the nil log sink. +/// +/// Using this helper is analogous to sending log messages to /dev/null: +/// nothing gets logged. +class NullLog { + public: + /// The no-op output operator. + /// + /// @param [in] t + /// The object to send into the nil sink. + /// @return Reference to the updated object. + template + NullLog& operator<<(const T& t) { + return *this; + } +}; + +} // namespace detail +} // namespace util +} // namespace arrow + +#endif // GANDIVA_IR diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..b5675faa143dbf7124e56c23c3cbc9f922cad2de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h @@ -0,0 +1,191 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#define ARROW_EXPAND(x) x +#define ARROW_STRINGIFY(x) #x +#define ARROW_CONCAT(x, y) x##y + +// From Google gutil +#ifndef ARROW_DISALLOW_COPY_AND_ASSIGN +#define ARROW_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete +#endif + +#ifndef ARROW_DEFAULT_MOVE_AND_ASSIGN +#define ARROW_DEFAULT_MOVE_AND_ASSIGN(TypeName) \ + TypeName(TypeName&&) = default; \ + TypeName& operator=(TypeName&&) = default +#endif + +#define ARROW_UNUSED(x) (void)(x) +#define ARROW_ARG_UNUSED(x) +// +// GCC can be told that a certain branch is not likely to be taken (for +// instance, a CHECK failure), and use that information in static analysis. +// Giving it this information can help it optimize for the common case in +// the absence of better information (ie. -fprofile-arcs). +// +#if defined(__GNUC__) +#define ARROW_PREDICT_FALSE(x) (__builtin_expect(!!(x), 0)) +#define ARROW_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#define ARROW_NORETURN __attribute__((noreturn)) +#define ARROW_NOINLINE __attribute__((noinline)) +#define ARROW_PREFETCH(addr) __builtin_prefetch(addr) +#elif defined(_MSC_VER) +#define ARROW_NORETURN __declspec(noreturn) +#define ARROW_NOINLINE __declspec(noinline) +#define ARROW_PREDICT_FALSE(x) (x) +#define ARROW_PREDICT_TRUE(x) (x) +#define ARROW_PREFETCH(addr) +#else +#define ARROW_NORETURN +#define ARROW_PREDICT_FALSE(x) (x) +#define ARROW_PREDICT_TRUE(x) (x) +#define ARROW_PREFETCH(addr) +#endif + +#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) +#define ARROW_RESTRICT __restrict +#else +#define ARROW_RESTRICT +#endif + +// ---------------------------------------------------------------------- +// C++/CLI support macros (see ARROW-1134) + +#ifndef NULLPTR + +#ifdef __cplusplus_cli +#define NULLPTR __nullptr +#else +#define NULLPTR nullptr +#endif + +#endif // ifndef NULLPTR + +// ---------------------------------------------------------------------- + +// clang-format off +// [[deprecated]] is only available in C++14, use this for the time being +// This macro takes an optional deprecation message +#ifdef __COVERITY__ +# define ARROW_DEPRECATED(...) +#else +# define ARROW_DEPRECATED(...) [[deprecated(__VA_ARGS__)]] +#endif + +#ifdef __COVERITY__ +# define ARROW_DEPRECATED_ENUM_VALUE(...) +#else +# define ARROW_DEPRECATED_ENUM_VALUE(...) [[deprecated(__VA_ARGS__)]] +#endif + +// clang-format on + +// Macros to disable deprecation warnings + +#ifdef __clang__ +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("clang diagnostic pop") +#elif defined(__GNUC__) +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + __pragma(warning(push)) __pragma(warning(disable : 4996)) +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING __pragma(warning(pop)) +#else +#define ARROW_SUPPRESS_DEPRECATION_WARNING +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING +#endif + +// ---------------------------------------------------------------------- + +// macros to disable padding +// these macros are portable across different compilers and platforms +//[https://github.com/google/flatbuffers/blob/master/include/flatbuffers/flatbuffers.h#L1355] +#if !defined(MANUALLY_ALIGNED_STRUCT) +#if defined(_MSC_VER) +#define MANUALLY_ALIGNED_STRUCT(alignment) \ + __pragma(pack(1)); \ + struct __declspec(align(alignment)) +#define STRUCT_END(name, size) \ + __pragma(pack()); \ + static_assert(sizeof(name) == size, "compiler breaks packing rules") +#elif defined(__GNUC__) || defined(__clang__) +#define MANUALLY_ALIGNED_STRUCT(alignment) \ + _Pragma("pack(1)") struct __attribute__((aligned(alignment))) +#define STRUCT_END(name, size) \ + _Pragma("pack()") static_assert(sizeof(name) == size, "compiler breaks packing rules") +#else +#error Unknown compiler, please define structure alignment macros +#endif +#endif // !defined(MANUALLY_ALIGNED_STRUCT) + +// ---------------------------------------------------------------------- +// Convenience macro disabling a particular UBSan check in a function + +#if defined(__clang__) +#define ARROW_DISABLE_UBSAN(feature) __attribute__((no_sanitize(feature))) +#else +#define ARROW_DISABLE_UBSAN(feature) +#endif + +// ---------------------------------------------------------------------- +// Machine information + +#if INTPTR_MAX == INT64_MAX +#define ARROW_BITNESS 64 +#elif INTPTR_MAX == INT32_MAX +#define ARROW_BITNESS 32 +#else +#error Unexpected INTPTR_MAX +#endif + +// ---------------------------------------------------------------------- +// From googletest +// (also in parquet-cpp) + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name) \ + friend class test_case_name##_##test_name##_Test diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h new file mode 100644 index 0000000000000000000000000000000000000000..5523909061d4c096b03c4853584ec9abc0f39a14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/result.h" + +namespace arrow { +namespace internal { + +/// Helper providing single-lookup conditional insertion into std::map or +/// std::unordered_map. If `key` exists in the container, an iterator to that pair +/// will be returned. If `key` does not exist in the container, `gen(key)` will be +/// invoked and its return value inserted. +template +auto GetOrInsertGenerated(Map* map, typename Map::key_type key, Gen&& gen) + -> decltype(map->begin()->second = gen(map->begin()->first), map->begin()) { + decltype(gen(map->begin()->first)) placeholder{}; + + auto it_success = map->emplace(std::move(key), std::move(placeholder)); + if (it_success.second) { + // insertion of placeholder succeeded, overwrite it with gen() + const auto& inserted_key = it_success.first->first; + auto* value = &it_success.first->second; + *value = gen(inserted_key); + } + return it_success.first; +} + +template +auto GetOrInsertGenerated(Map* map, typename Map::key_type key, Gen&& gen) + -> Resultbegin()->second = gen(map->begin()->first).ValueOrDie(), + map->begin())> { + decltype(gen(map->begin()->first).ValueOrDie()) placeholder{}; + + auto it_success = map->emplace(std::move(key), std::move(placeholder)); + if (it_success.second) { + // insertion of placeholder succeeded, overwrite it with gen() + const auto& inserted_key = it_success.first->first; + auto* value = &it_success.first->second; + ARROW_ASSIGN_OR_RAISE(*value, gen(inserted_key)); + } + return it_success.first; +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..7ee87c5d6ac8160c921ce83153e30112335ad7fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/math_constants.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +// Not provided by default in MSVC, +// and _USE_MATH_DEFINES is not reliable with unity builds +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 +#endif +#ifndef M_PI_4 +#define M_PI_4 0.785398163397448309616 +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h new file mode 100644 index 0000000000000000000000000000000000000000..ac63cf70cd9ae9c05189f89e2f96c4d216d09573 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/mutex.h @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +/// A wrapper around std::mutex since we can't use it directly in +/// public headers due to C++/CLI. +/// https://docs.microsoft.com/en-us/cpp/standard-library/mutex#remarks +class ARROW_EXPORT Mutex { + public: + Mutex(); + Mutex(Mutex&&) = default; + Mutex& operator=(Mutex&&) = default; + + /// A Guard is falsy if a lock could not be acquired. + class ARROW_EXPORT Guard { + public: + Guard() : locked_(NULLPTR, [](Mutex* mutex) {}) {} + Guard(Guard&&) = default; + Guard& operator=(Guard&&) = default; + + explicit operator bool() const { return bool(locked_); } + + void Unlock() { locked_.reset(); } + + private: + explicit Guard(Mutex* locked); + + std::unique_ptr locked_; + friend Mutex; + }; + + Guard TryLock(); + Guard Lock(); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +#ifndef _WIN32 +/// Return a pointer to a process-wide, process-specific Mutex that can be used +/// at any point in a child process. NULL is returned when called in the parent. +/// +/// The rule is to first check that getpid() corresponds to the parent process pid +/// and, if not, call this function to lock any after-fork reinitialization code. +/// Like this: +/// +/// std::atomic pid{getpid()}; +/// ... +/// if (pid.load() != getpid()) { +/// // In child process +/// auto lock = GlobalForkSafeMutex()->Lock(); +/// if (pid.load() != getpid()) { +/// // Reinitialize internal structures after fork +/// ... +/// pid.store(getpid()); +ARROW_EXPORT +Mutex* GlobalForkSafeMutex(); +#endif + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h new file mode 100644 index 0000000000000000000000000000000000000000..82cea473c5b277323772c6914ee28b1903b5240d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/print.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. template + +#pragma once + +#include +#include "arrow/util/string.h" + +using arrow::internal::ToChars; + +namespace arrow { +namespace internal { + +namespace detail { + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) { + TuplePrinter::Print(os, t); + *os << std::get(t); + } +}; + +template +struct TuplePrinter { + static void Print(OStream* os, const Tuple& t) {} +}; + +} // namespace detail + +// Print elements from a tuple to a stream, in order. +// Typical use is to pack a bunch of existing values with std::forward_as_tuple() +// before passing it to this function. +template +void PrintTuple(OStream* os, const std::tuple& tup) { + detail::TuplePrinter, sizeof...(Args)>::Print(os, tup); +} + +template +struct PrintVector { + const Range& range_; + const Separator& separator_; + + template // template to dodge inclusion of + friend Os& operator<<(Os& os, PrintVector l) { + bool first = true; + os << "["; + for (const auto& element : l.range_) { + if (first) { + first = false; + } else { + os << l.separator_; + } + os << ToChars(element); // use ToChars to avoid locale dependence + } + os << "]"; + return os; + } +}; +template +PrintVector(const Range&, const Separator&) -> PrintVector; +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/ree_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/ree_util.h new file mode 100644 index 0000000000000000000000000000000000000000..a3e745ba830a37fce75100fd4f87505607b3fa5b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/ree_util.h @@ -0,0 +1,582 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace ree_util { + +/// \brief Get the child array holding the run ends from an REE array +inline const ArraySpan& RunEndsArray(const ArraySpan& span) { return span.child_data[0]; } + +/// \brief Get the child array holding the data values from an REE array +inline const ArraySpan& ValuesArray(const ArraySpan& span) { return span.child_data[1]; } + +/// \brief Get a pointer to run ends values of an REE array +template +const RunEndCType* RunEnds(const ArraySpan& span) { + assert(RunEndsArray(span).type->id() == CTypeTraits::ArrowType::type_id); + return RunEndsArray(span).GetValues(1); +} + +/// \brief Perform basic validations on the parameters of an REE array +/// and its two children arrays +/// +/// All the checks complete in O(1) time. Consequently, this function: +/// - DOES NOT check that run_ends is sorted and all-positive +/// - DOES NOT check the actual contents of the run_ends and values arrays +Status ValidateRunEndEncodedChildren(const RunEndEncodedType& type, + int64_t logical_length, + const std::shared_ptr& run_ends_data, + const std::shared_ptr& values_data, + int64_t null_count, int64_t logical_offset); + +/// \brief Compute the logical null count of an REE array +int64_t LogicalNullCount(const ArraySpan& span); + +namespace internal { + +/// \brief Uses binary-search to find the physical offset given a logical offset +/// and run-end values +/// +/// \return the physical offset or run_ends_size if the physical offset is not +/// found in run_ends +template +int64_t FindPhysicalIndex(const RunEndCType* run_ends, int64_t run_ends_size, int64_t i, + int64_t absolute_offset) { + assert(absolute_offset + i >= 0); + auto it = std::upper_bound(run_ends, run_ends + run_ends_size, absolute_offset + i); + int64_t result = std::distance(run_ends, it); + assert(result <= run_ends_size); + return result; +} + +/// \brief Uses binary-search to calculate the range of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length +/// +/// \return a pair of physical offset and physical length +template +std::pair FindPhysicalRange(const RunEndCType* run_ends, + int64_t run_ends_size, int64_t length, + int64_t offset) { + const int64_t physical_offset = + FindPhysicalIndex(run_ends, run_ends_size, 0, offset); + // The physical length is calculated by finding the offset of the last element + // and adding 1 to it, so first we ensure there is at least one element. + if (length == 0) { + return {physical_offset, 0}; + } + const int64_t physical_index_of_last = FindPhysicalIndex( + run_ends + physical_offset, run_ends_size - physical_offset, length - 1, offset); + + assert(physical_index_of_last < run_ends_size - physical_offset); + return {physical_offset, physical_index_of_last + 1}; +} + +/// \brief Uses binary-search to calculate the number of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length +template +int64_t FindPhysicalLength(const RunEndCType* run_ends, int64_t run_ends_size, + int64_t length, int64_t offset) { + auto [_, physical_length] = + FindPhysicalRange(run_ends, run_ends_size, length, offset); + // GH-37107: This is a workaround for GCC 7. GCC 7 doesn't ignore + // variables in structured binding automatically from unused + // variables when one of these variables are used. + // See also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81767 + ARROW_UNUSED(_); + return physical_length; +} + +/// \brief Find the physical index into the values array of the REE ArraySpan +/// +/// This function uses binary-search, so it has a O(log N) cost. +template +int64_t FindPhysicalIndex(const ArraySpan& span, int64_t i, int64_t absolute_offset) { + const int64_t run_ends_size = RunEndsArray(span).length; + return FindPhysicalIndex(RunEnds(span), run_ends_size, i, absolute_offset); +} + +/// \brief Find the physical length of an REE ArraySpan +/// +/// The physical length of an REE is the number of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length. +/// +/// Avoid calling this function if the physical length can be established in +/// some other way (e.g. when iterating over the runs sequentially until the +/// end). This function uses binary-search, so it has a O(log N) cost. +template +int64_t FindPhysicalLength(const ArraySpan& span) { + return FindPhysicalLength( + /*run_ends=*/RunEnds(span), + /*run_ends_size=*/RunEndsArray(span).length, + /*length=*/span.length, + /*offset=*/span.offset); +} + +template +struct PhysicalIndexFinder; + +// non-inline implementations for each run-end type +ARROW_EXPORT int64_t FindPhysicalIndexImpl16(PhysicalIndexFinder& self, + int64_t i); +ARROW_EXPORT int64_t FindPhysicalIndexImpl32(PhysicalIndexFinder& self, + int64_t i); +ARROW_EXPORT int64_t FindPhysicalIndexImpl64(PhysicalIndexFinder& self, + int64_t i); + +/// \brief Stateful version of FindPhysicalIndex() that caches the result of +/// the previous search and uses it to optimize the next search. +/// +/// When new queries for the physical index of a logical index come in, +/// binary search is performed again but the first candidate checked is the +/// result of the previous search (cached physical index) instead of the +/// midpoint of the run-ends array. +/// +/// If that test fails, internal::FindPhysicalIndex() is called with one of the +/// partitions defined by the cached index. If the queried logical indices +/// follow an increasing or decreasing pattern, this first test is much more +/// effective in (1) finding the answer right away (close logical indices belong +/// to the same runs) or (2) discarding many more candidates than probing +/// the midpoint would. +/// +/// The most adversarial case (i.e. alternating between 0 and length-1 queries) +/// only adds one extra binary search probe when compared to always starting +/// binary search from the midpoint without any of these optimizations. +/// +/// \tparam RunEndCType The numeric type of the run-ends array. +template +struct PhysicalIndexFinder { + const ArraySpan array_span; + const RunEndCType* run_ends; + int64_t last_physical_index = 0; + + explicit PhysicalIndexFinder(const ArrayData& data) + : array_span(data), + run_ends(RunEndsArray(array_span).template GetValues(1)) { + assert(CTypeTraits::ArrowType::type_id == + ::arrow::internal::checked_cast(*data.type) + .run_end_type() + ->id()); + } + + /// \brief Find the physical index into the values array of the REE array. + /// + /// \pre 0 <= i < array_span.length() + /// \param i the logical index into the REE array + /// \return the physical index into the values array + int64_t FindPhysicalIndex(int64_t i) { + if constexpr (std::is_same_v) { + return FindPhysicalIndexImpl16(*this, i); + } else if constexpr (std::is_same_v) { + return FindPhysicalIndexImpl32(*this, i); + } else { + static_assert(std::is_same_v, "Unsupported RunEndCType."); + return FindPhysicalIndexImpl64(*this, i); + } + } +}; + +} // namespace internal + +/// \brief Find the physical index into the values array of the REE ArraySpan +/// +/// This function uses binary-search, so it has a O(log N) cost. +ARROW_EXPORT int64_t FindPhysicalIndex(const ArraySpan& span, int64_t i, + int64_t absolute_offset); + +/// \brief Find the physical length of an REE ArraySpan +/// +/// The physical length of an REE is the number of physical values (and +/// run-ends) necessary to represent the logical range of values from +/// offset to length. +/// +/// Avoid calling this function if the physical length can be established in +/// some other way (e.g. when iterating over the runs sequentially until the +/// end). This function uses binary-search, so it has a O(log N) cost. +ARROW_EXPORT int64_t FindPhysicalLength(const ArraySpan& span); + +/// \brief Find the physical range of physical values referenced by the REE in +/// the logical range from offset to offset + length +/// +/// \return a pair of physical offset and physical length +ARROW_EXPORT std::pair FindPhysicalRange(const ArraySpan& span, + int64_t offset, + int64_t length); + +// Publish PhysicalIndexFinder outside of the internal namespace. +template +using PhysicalIndexFinder = internal::PhysicalIndexFinder; + +template +class RunEndEncodedArraySpan { + private: + struct PrivateTag {}; + + public: + /// \brief Iterator representing the current run during iteration over a + /// run-end encoded array + class Iterator { + public: + Iterator(PrivateTag, const RunEndEncodedArraySpan& span, int64_t logical_pos, + int64_t physical_pos) + : span(span), logical_pos_(logical_pos), physical_pos_(physical_pos) {} + + /// \brief Return the physical index of the run + /// + /// The values array can be addressed with this index to get the value + /// that makes up the run. + /// + /// NOTE: if this Iterator is equal to RunEndEncodedArraySpan::end(), + /// the value returned is undefined. + int64_t index_into_array() const { return physical_pos_; } + + /// \brief Return the initial logical position of the run + /// + /// If this Iterator is equal to RunEndEncodedArraySpan::end(), this is + /// the same as RunEndEncodedArraySpan::length(). + int64_t logical_position() const { return logical_pos_; } + + /// \brief Return the logical position immediately after the run. + /// + /// Pre-condition: *this != RunEndEncodedArraySpan::end() + int64_t run_end() const { return span.run_end(physical_pos_); } + + /// \brief Returns the logical length of the run. + /// + /// Pre-condition: *this != RunEndEncodedArraySpan::end() + int64_t run_length() const { return run_end() - logical_pos_; } + + /// \brief Check if the iterator is at the end of the array. + /// + /// This can be used to avoid paying the cost of a call to + /// RunEndEncodedArraySpan::end(). + /// + /// \return true if the iterator is at the end of the array + bool is_end(const RunEndEncodedArraySpan& span) const { + return logical_pos_ >= span.length(); + } + + Iterator& operator++() { + logical_pos_ = span.run_end(physical_pos_); + physical_pos_ += 1; + return *this; + } + + Iterator operator++(int) { + const Iterator prev = *this; + ++(*this); + return prev; + } + + Iterator& operator--() { + physical_pos_ -= 1; + logical_pos_ = (physical_pos_ > 0) ? span.run_end(physical_pos_ - 1) : 0; + return *this; + } + + Iterator operator--(int) { + const Iterator prev = *this; + --(*this); + return prev; + } + + bool operator==(const Iterator& other) const { + return logical_pos_ == other.logical_pos_; + } + + bool operator!=(const Iterator& other) const { + return logical_pos_ != other.logical_pos_; + } + + public: + const RunEndEncodedArraySpan& span; + + private: + int64_t logical_pos_; + int64_t physical_pos_; + }; + + // Prevent implicit ArrayData -> ArraySpan conversion in + // RunEndEncodedArraySpan instantiation. + explicit RunEndEncodedArraySpan(const ArrayData& data) = delete; + + /// \brief Construct a RunEndEncodedArraySpan from an ArraySpan and new + /// absolute offset and length. + /// + /// RunEndEncodedArraySpan{span, off, len} is equivalent to: + /// + /// span.SetSlice(off, len); + /// RunEndEncodedArraySpan{span} + /// + /// ArraySpan::SetSlice() updates the null_count to kUnknownNullCount, but + /// we don't need that here as REE arrays have null_count set to 0 by + /// convention. + explicit RunEndEncodedArraySpan(const ArraySpan& array_span, int64_t offset, + int64_t length) + : array_span_{array_span}, + run_ends_(RunEnds(array_span_)), + length_(length), + offset_(offset) { + assert(array_span_.type->id() == Type::RUN_END_ENCODED); + } + + explicit RunEndEncodedArraySpan(const ArraySpan& array_span) + : RunEndEncodedArraySpan(array_span, array_span.offset, array_span.length) {} + + int64_t offset() const { return offset_; } + int64_t length() const { return length_; } + + int64_t PhysicalIndex(int64_t logical_pos) const { + return internal::FindPhysicalIndex(run_ends_, RunEndsArray(array_span_).length, + logical_pos, offset_); + } + + /// \brief Create an iterator from a logical position and its + /// pre-computed physical offset into the run ends array + /// + /// \param logical_pos is an index in the [0, length()] range + /// \param physical_offset the pre-calculated PhysicalIndex(logical_pos) + Iterator iterator(int64_t logical_pos, int64_t physical_offset) const { + return Iterator{PrivateTag{}, *this, logical_pos, physical_offset}; + } + + /// \brief Create an iterator from a logical position + /// + /// \param logical_pos is an index in the [0, length()] range + Iterator iterator(int64_t logical_pos) const { + if (logical_pos < length()) { + return iterator(logical_pos, PhysicalIndex(logical_pos)); + } + // If logical_pos is above the valid range, use length() as the logical + // position and calculate the physical address right after the last valid + // physical position. Which is the physical index of the last logical + // position, plus 1. + return (length() == 0) ? iterator(0, PhysicalIndex(0)) + : iterator(length(), PhysicalIndex(length() - 1) + 1); + } + + /// \brief Create an iterator representing the logical begin of the run-end + /// encoded array + Iterator begin() const { return iterator(0, PhysicalIndex(0)); } + + /// \brief Create an iterator representing the first invalid logical position + /// of the run-end encoded array + /// + /// \warning Avoid calling end() in a loop, as it will recompute the physical + /// length of the array on each call (O(log N) cost per call). + /// + /// \par You can write your loops like this instead: + /// \code + /// for (auto it = array.begin(), end = array.end(); it != end; ++it) { + /// // ... + /// } + /// \endcode + /// + /// \par Or this version that does not look like idiomatic C++, but removes + /// the need for calling end() completely: + /// \code + /// for (auto it = array.begin(); !it.is_end(array); ++it) { + /// // ... + /// } + /// \endcode + Iterator end() const { + return iterator(length(), + (length() == 0) ? PhysicalIndex(0) : PhysicalIndex(length() - 1) + 1); + } + + // Pre-condition: physical_pos < RunEndsArray(array_span_).length); + inline int64_t run_end(int64_t physical_pos) const { + assert(physical_pos < RunEndsArray(array_span_).length); + // Logical index of the end of the run at physical_pos with offset applied + const int64_t logical_run_end = + std::max(static_cast(run_ends_[physical_pos]) - offset(), 0); + // The current run may go further than the logical length, cap it + return std::min(logical_run_end, length()); + } + + private: + const ArraySpan& array_span_; + const RunEndCType* run_ends_; + const int64_t length_; + const int64_t offset_; +}; + +/// \brief Iterate over two run-end encoded arrays in runs or sub-runs that are +/// inside run boundaries on both inputs +/// +/// Both RunEndEncodedArraySpan should have the same logical length. Instances +/// of this iterator only hold references to the RunEndEncodedArraySpan inputs. +template +class MergedRunsIterator { + private: + using LeftIterator = typename Left::Iterator; + using RightIterator = typename Right::Iterator; + + MergedRunsIterator(LeftIterator left_it, RightIterator right_it, + int64_t common_logical_length, int64_t common_logical_pos) + : ree_iterators_{std::move(left_it), std::move(right_it)}, + logical_length_(common_logical_length), + logical_pos_(common_logical_pos) {} + + public: + /// \brief Construct a MergedRunsIterator positioned at logical position 0. + /// + /// Pre-condition: left.length() == right.length() + MergedRunsIterator(const Left& left, const Right& right) + : MergedRunsIterator(left.begin(), right.begin(), left.length(), 0) { + assert(left.length() == right.length()); + } + + static Result MakeBegin(const Left& left, const Right& right) { + if (left.length() != right.length()) { + return Status::Invalid( + "MergedRunsIterator expects RunEndEncodedArraySpans of the same length"); + } + return MergedRunsIterator(left, right); + } + + static Result MakeEnd(const Left& left, const Right& right) { + if (left.length() != right.length()) { + return Status::Invalid( + "MergedRunsIterator expects RunEndEncodedArraySpans of the same length"); + } + return MergedRunsIterator(left.end(), right.end(), left.length(), left.length()); + } + + /// \brief Return the left RunEndEncodedArraySpan child + const Left& left() const { return std::get<0>(ree_iterators_).span; } + + /// \brief Return the right RunEndEncodedArraySpan child + const Right& right() const { return std::get<1>(ree_iterators_).span; } + + /// \brief Return the initial logical position of the run + /// + /// If is_end(), this is the same as length(). + int64_t logical_position() const { return logical_pos_; } + + /// \brief Whether the iterator is at logical position 0. + bool is_begin() const { return logical_pos_ == 0; } + + /// \brief Whether the iterator has reached the end of both arrays + bool is_end() const { return logical_pos_ == logical_length_; } + + /// \brief Return the logical position immediately after the run. + /// + /// Pre-condition: !is_end() + int64_t run_end() const { + const auto& left_it = std::get<0>(ree_iterators_); + const auto& right_it = std::get<1>(ree_iterators_); + return std::min(left_it.run_end(), right_it.run_end()); + } + + /// \brief returns the logical length of the current run + /// + /// Pre-condition: !is_end() + int64_t run_length() const { return run_end() - logical_pos_; } + + /// \brief Return a physical index into the values array of a given input, + /// pointing to the value of the current run + template + int64_t index_into_array() const { + return std::get(ree_iterators_).index_into_array(); + } + + int64_t index_into_left_array() const { return index_into_array<0>(); } + int64_t index_into_right_array() const { return index_into_array<1>(); } + + MergedRunsIterator& operator++() { + auto& left_it = std::get<0>(ree_iterators_); + auto& right_it = std::get<1>(ree_iterators_); + + const int64_t left_run_end = left_it.run_end(); + const int64_t right_run_end = right_it.run_end(); + + if (left_run_end < right_run_end) { + logical_pos_ = left_run_end; + ++left_it; + } else if (left_run_end > right_run_end) { + logical_pos_ = right_run_end; + ++right_it; + } else { + logical_pos_ = left_run_end; + ++left_it; + ++right_it; + } + return *this; + } + + MergedRunsIterator operator++(int) { + MergedRunsIterator prev = *this; + ++(*this); + return prev; + } + + MergedRunsIterator& operator--() { + auto& left_it = std::get<0>(ree_iterators_); + auto& right_it = std::get<1>(ree_iterators_); + + // The logical position of each iterator is the run_end() of the previous run. + const int64_t left_logical_pos = left_it.logical_position(); + const int64_t right_logical_pos = right_it.logical_position(); + + if (left_logical_pos < right_logical_pos) { + --right_it; + logical_pos_ = std::max(left_logical_pos, right_it.logical_position()); + } else if (left_logical_pos > right_logical_pos) { + --left_it; + logical_pos_ = std::max(left_it.logical_position(), right_logical_pos); + } else { + --left_it; + --right_it; + logical_pos_ = std::max(left_it.logical_position(), right_it.logical_position()); + } + return *this; + } + + MergedRunsIterator operator--(int) { + MergedRunsIterator prev = *this; + --(*this); + return prev; + } + + bool operator==(const MergedRunsIterator& other) const { + return logical_pos_ == other.logical_position(); + } + + bool operator!=(const MergedRunsIterator& other) const { return !(*this == other); } + + private: + std::tuple ree_iterators_; + const int64_t logical_length_; + int64_t logical_pos_; +}; + +} // namespace ree_util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h new file mode 100644 index 0000000000000000000000000000000000000000..590fbac7153889129e7bca7652125980cb4457cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// Match regex against target and produce string_views out of matches. +inline bool RegexMatch(const std::regex& regex, std::string_view target, + std::initializer_list out_matches) { + assert(regex.mark_count() == out_matches.size()); + + std::match_results match; + if (!std::regex_match(target.begin(), target.end(), match, regex)) { + return false; + } + + // Match #0 is the whole matched sequence + assert(regex.mark_count() + 1 == match.size()); + auto out_it = out_matches.begin(); + for (size_t i = 1; i < match.size(); ++i) { + **out_it++ = target.substr(match.position(i), match.length(i)); + } + return true; +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f5690062a049dd2485fe68461237eb6d9e0265 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h @@ -0,0 +1,826 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Imported from Apache Impala (incubating) on 2016-01-29 and modified for use +// in parquet-cpp, Arrow + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/bit_run_reader.h" +#include "arrow/util/bit_stream_utils.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +/// Utility classes to do run length encoding (RLE) for fixed bit width values. If runs +/// are sufficiently long, RLE is used, otherwise, the values are just bit-packed +/// (literal encoding). +/// For both types of runs, there is a byte-aligned indicator which encodes the length +/// of the run and the type of the run. +/// This encoding has the benefit that when there aren't any long enough runs, values +/// are always decoded at fixed (can be precomputed) bit offsets OR both the value and +/// the run length are byte aligned. This allows for very efficient decoding +/// implementations. +/// The encoding is: +/// encoded-block := run* +/// run := literal-run | repeated-run +/// literal-run := literal-indicator < literal bytes > +/// repeated-run := repeated-indicator < repeated value. padded to byte boundary > +/// literal-indicator := varint_encode( number_of_groups << 1 | 1) +/// repeated-indicator := varint_encode( number_of_repetitions << 1 ) +// +/// Each run is preceded by a varint. The varint's least significant bit is +/// used to indicate whether the run is a literal run or a repeated run. The rest +/// of the varint is used to determine the length of the run (eg how many times the +/// value repeats). +// +/// In the case of literal runs, the run length is always a multiple of 8 (i.e. encode +/// in groups of 8), so that no matter the bit-width of the value, the sequence will end +/// on a byte boundary without padding. +/// Given that we know it is a multiple of 8, we store the number of 8-groups rather than +/// the actual number of encoded ints. (This means that the total number of encoded values +/// cannot be determined from the encoded data, since the number of values in the last +/// group may not be a multiple of 8). For the last group of literal runs, we pad +/// the group to 8 with zeros. This allows for 8 at a time decoding on the read side +/// without the need for additional checks. +// +/// There is a break-even point when it is more storage efficient to do run length +/// encoding. For 1 bit-width values, that point is 8 values. They require 2 bytes +/// for both the repeated encoding or the literal encoding. This value can always +/// be computed based on the bit-width. +/// TODO: think about how to use this for strings. The bit packing isn't quite the same. +// +/// Examples with bit-width 1 (eg encoding booleans): +/// ---------------------------------------- +/// 100 1s followed by 100 0s: +/// <1, padded to 1 byte> <0, padded to 1 byte> +/// - (total 4 bytes) +// +/// alternating 1s and 0s (200 total): +/// 200 ints = 25 groups of 8 +/// <25 bytes of values, bitpacked> +/// (total 26 bytes, 1 byte overhead) +// + +/// Decoder class for RLE encoded data. +class RleDecoder { + public: + /// Create a decoder object. buffer/buffer_len is the decoded data. + /// bit_width is the width of each value (before encoding). + RleDecoder(const uint8_t* buffer, int buffer_len, int bit_width) + : bit_reader_(buffer, buffer_len), + bit_width_(bit_width), + current_value_(0), + repeat_count_(0), + literal_count_(0) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + } + + RleDecoder() : bit_width_(-1) {} + + void Reset(const uint8_t* buffer, int buffer_len, int bit_width) { + DCHECK_GE(bit_width, 0); + DCHECK_LE(bit_width, 64); + bit_reader_.Reset(buffer, buffer_len); + bit_width_ = bit_width; + current_value_ = 0; + repeat_count_ = 0; + literal_count_ = 0; + } + + /// Gets the next value. Returns false if there are no more. + template + bool Get(T* val); + + /// Gets a batch of values. Returns the number of decoded elements. + template + int GetBatch(T* values, int batch_size); + + /// Like GetBatch but add spacing for null entries + template + int GetBatchSpaced(int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out); + + /// Like GetBatch but the values are then decoded using the provided dictionary + template + int GetBatchWithDict(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size); + + /// Like GetBatchWithDict but add spacing for null entries + /// + /// Null entries will be zero-initialized in `values` to avoid leaking + /// private data. + template + int GetBatchWithDictSpaced(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset); + + protected: + ::arrow::bit_util::BitReader bit_reader_; + /// Number of bits needed to encode the value. Must be between 0 and 64. + int bit_width_; + uint64_t current_value_; + int32_t repeat_count_; + int32_t literal_count_; + + private: + /// Fills literal_count_ and repeat_count_ with next values. Returns false if there + /// are no more. + template + bool NextCounts(); + + /// Utility methods for retrieving spaced values. + template + int GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, T* out); +}; + +/// Class to incrementally build the rle data. This class does not allocate any memory. +/// The encoding has two modes: encoding repeated runs and literal runs. +/// If the run is sufficiently short, it is more efficient to encode as a literal run. +/// This class does so by buffering 8 values at a time. If they are not all the same +/// they are added to the literal run. If they are the same, they are added to the +/// repeated run. When we switch modes, the previous run is flushed out. +class RleEncoder { + public: + /// buffer/buffer_len: preallocated output buffer. + /// bit_width: max number of bits for value. + /// TODO: consider adding a min_repeated_run_length so the caller can control + /// when values should be encoded as repeated runs. Currently this is derived + /// based on the bit_width, which can determine a storage optimal choice. + /// TODO: allow 0 bit_width (and have dict encoder use it) + RleEncoder(uint8_t* buffer, int buffer_len, int bit_width) + : bit_width_(bit_width), bit_writer_(buffer, buffer_len) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + max_run_byte_size_ = MinBufferSize(bit_width); + DCHECK_GE(buffer_len, max_run_byte_size_) << "Input buffer not big enough."; + Clear(); + } + + /// Returns the minimum buffer size needed to use the encoder for 'bit_width' + /// This is the maximum length of a single run for 'bit_width'. + /// It is not valid to pass a buffer less than this length. + static int MinBufferSize(int bit_width) { + /// 1 indicator byte and MAX_VALUES_PER_LITERAL_RUN 'bit_width' values. + int max_literal_run_size = 1 + static_cast(::arrow::bit_util::BytesForBits( + MAX_VALUES_PER_LITERAL_RUN * bit_width)); + /// Up to kMaxVlqByteLength indicator and a single 'bit_width' value. + int max_repeated_run_size = + ::arrow::bit_util::BitReader::kMaxVlqByteLength + + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + return std::max(max_literal_run_size, max_repeated_run_size); + } + + /// Returns the maximum byte size it could take to encode 'num_values'. + static int MaxBufferSize(int bit_width, int num_values) { + // For a bit_width > 1, the worst case is the repetition of "literal run of length 8 + // and then a repeated run of length 8". + // 8 values per smallest run, 8 bits per byte + int bytes_per_run = bit_width; + int num_runs = static_cast(::arrow::bit_util::CeilDiv(num_values, 8)); + int literal_max_size = num_runs + num_runs * bytes_per_run; + + // In the very worst case scenario, the data is a concatenation of repeated + // runs of 8 values. Repeated run has a 1 byte varint followed by the + // bit-packed repeated value + int min_repeated_run_size = + 1 + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + int repeated_max_size = num_runs * min_repeated_run_size; + + return std::max(literal_max_size, repeated_max_size); + } + + /// Encode value. Returns true if the value fits in buffer, false otherwise. + /// This value must be representable with bit_width_ bits. + bool Put(uint64_t value); + + /// Flushes any pending values to the underlying buffer. + /// Returns the total number of bytes written + int Flush(); + + /// Resets all the state in the encoder. + void Clear(); + + /// Returns pointer to underlying buffer + uint8_t* buffer() { return bit_writer_.buffer(); } + int32_t len() { return bit_writer_.bytes_written(); } + + private: + /// Flushes any buffered values. If this is part of a repeated run, this is largely + /// a no-op. + /// If it is part of a literal run, this will call FlushLiteralRun, which writes + /// out the buffered literal values. + /// If 'done' is true, the current run would be written even if it would normally + /// have been buffered more. This should only be called at the end, when the + /// encoder has received all values even if it would normally continue to be + /// buffered. + void FlushBufferedValues(bool done); + + /// Flushes literal values to the underlying buffer. If update_indicator_byte, + /// then the current literal run is complete and the indicator byte is updated. + void FlushLiteralRun(bool update_indicator_byte); + + /// Flushes a repeated run to the underlying buffer. + void FlushRepeatedRun(); + + /// Checks and sets buffer_full_. This must be called after flushing a run to + /// make sure there are enough bytes remaining to encode the next run. + void CheckBufferFull(); + + /// The maximum number of values in a single literal run + /// (number of groups encodable by a 1-byte indicator * 8) + static const int MAX_VALUES_PER_LITERAL_RUN = (1 << 6) * 8; + + /// Number of bits needed to encode the value. Must be between 0 and 64. + const int bit_width_; + + /// Underlying buffer. + ::arrow::bit_util::BitWriter bit_writer_; + + /// If true, the buffer is full and subsequent Put()'s will fail. + bool buffer_full_; + + /// The maximum byte size a single run can take. + int max_run_byte_size_; + + /// We need to buffer at most 8 values for literals. This happens when the + /// bit_width is 1 (so 8 values fit in one byte). + /// TODO: generalize this to other bit widths + int64_t buffered_values_[8]; + + /// Number of values in buffered_values_ + int num_buffered_values_; + + /// The current (also last) value that was written and the count of how + /// many times in a row that value has been seen. This is maintained even + /// if we are in a literal run. If the repeat_count_ get high enough, we switch + /// to encoding repeated runs. + uint64_t current_value_; + int repeat_count_; + + /// Number of literals in the current run. This does not include the literals + /// that might be in buffered_values_. Only after we've got a group big enough + /// can we decide if they should part of the literal_count_ or repeat_count_ + int literal_count_; + + /// Pointer to a byte in the underlying buffer that stores the indicator byte. + /// This is reserved as soon as we need a literal run but the value is written + /// when the literal run is complete. + uint8_t* literal_indicator_byte_; +}; + +template +inline bool RleDecoder::Get(T* val) { + return GetBatch(val, 1) == 1; +} + +template +inline int RleDecoder::GetBatch(T* values, int batch_size) { + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { // Repeated value case. + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, static_cast(current_value_)); + + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(remaining, literal_count_); + int actual_read = bit_reader_.GetBatch(bit_width_, out, literal_batch); + if (actual_read != literal_batch) { + return values_read; + } + + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, + T* out) { + if (ARROW_PREDICT_FALSE(null_count == batch_size)) { + converter.FillZero(out, out + batch_size); + return batch_size; + } + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + int values_remaining = batch_size - null_count; + + // Assume no bits to start. + arrow::internal::BitRunReader bit_reader(valid_bits, valid_bits_offset, + /*length=*/batch_size); + arrow::internal::BitRun valid_run = bit_reader.NextRun(); + while (values_read < batch_size) { + if (ARROW_PREDICT_FALSE(valid_run.length == 0)) { + valid_run = bit_reader.NextRun(); + } + + DCHECK_GT(batch_size, 0); + DCHECK_GT(valid_run.length, 0); + + if (valid_run.set) { + if ((repeat_count_ == 0) && (literal_count_ == 0)) { + if (!NextCounts()) return values_read; + DCHECK((repeat_count_ > 0) ^ (literal_count_ > 0)); + } + + if (repeat_count_ > 0) { + int repeat_batch = 0; + // Consume the entire repeat counts incrementing repeat_batch to + // be the total of nulls + values consumed, we only need to + // get the total count because we can fill in the same value for + // nulls and non-nulls. This proves to be a big efficiency win. + while (repeat_count_ > 0 && (values_read + repeat_batch) < batch_size) { + DCHECK_GT(valid_run.length, 0); + if (valid_run.set) { + int update_size = std::min(static_cast(valid_run.length), repeat_count_); + repeat_count_ -= update_size; + repeat_batch += update_size; + valid_run.length -= update_size; + values_remaining -= update_size; + } else { + // We can consume all nulls here because we would do so on + // the next loop anyways. + repeat_batch += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + RunType current_value = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!converter.IsValid(current_value))) { + return values_read; + } + converter.Fill(out, out + repeat_batch, current_value); + out += repeat_batch; + values_read += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(values_remaining, literal_count_); + DCHECK_GT(literal_batch, 0); + + // Decode the literals + constexpr int kBufferSize = 1024; + RunType indices[kBufferSize]; + literal_batch = std::min(literal_batch, kBufferSize); + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (!converter.IsValid(indices, /*length=*/actual_read)) { + return values_read; + } + int skipped = 0; + int literals_read = 0; + while (literals_read < literal_batch) { + if (valid_run.set) { + int update_size = std::min(literal_batch - literals_read, + static_cast(valid_run.length)); + converter.Copy(out, indices + literals_read, update_size); + literals_read += update_size; + out += update_size; + valid_run.length -= update_size; + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + skipped += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + literal_count_ -= literal_batch; + values_remaining -= literal_batch; + values_read += literal_batch + skipped; + } + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + values_read += static_cast(valid_run.length); + valid_run.length = 0; + } + } + DCHECK_EQ(valid_run.length, 0); + DCHECK_EQ(values_remaining, 0); + return values_read; +} + +// Converter for GetSpaced that handles runs that get returned +// directly as output. +template +struct PlainRleConverter { + T kZero = {}; + inline bool IsValid(const T& values) const { return true; } + inline bool IsValid(const T* values, int32_t length) const { return true; } + inline void Fill(T* begin, T* end, const T& run_value) const { + std::fill(begin, end, run_value); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + inline void Copy(T* out, const T* values, int length) const { + std::memcpy(out, values, length * sizeof(T)); + } +}; + +template +inline int RleDecoder::GetBatchSpaced(int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out) { + if (null_count == 0) { + return GetBatch(out, batch_size); + } + + PlainRleConverter converter; + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatch(out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +static inline bool IndexInRange(int32_t idx, int32_t dictionary_length) { + return idx >= 0 && idx < dictionary_length; +} + +// Converter for GetSpaced that handles runs of returned dictionary +// indices. +template +struct DictionaryConverter { + T kZero = {}; + const T* dictionary; + int32_t dictionary_length; + + inline bool IsValid(int32_t value) { return IndexInRange(value, dictionary_length); } + + inline bool IsValid(const int32_t* values, int32_t length) const { + using IndexType = int32_t; + IndexType min_index = std::numeric_limits::max(); + IndexType max_index = std::numeric_limits::min(); + for (int x = 0; x < length; x++) { + min_index = std::min(values[x], min_index); + max_index = std::max(values[x], max_index); + } + + return IndexInRange(min_index, dictionary_length) && + IndexInRange(max_index, dictionary_length); + } + inline void Fill(T* begin, T* end, const int32_t& run_value) const { + std::fill(begin, end, dictionary[run_value]); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + + inline void Copy(T* out, const int32_t* values, int length) const { + for (int x = 0; x < length; x++) { + out[x] = dictionary[values[x]]; + } + } +}; + +template +inline int RleDecoder::GetBatchWithDict(const T* dictionary, int32_t dictionary_length, + T* values, int batch_size) { + // Per https://github.com/apache/parquet-format/blob/master/Encodings.md, + // the maximum dictionary index width in Parquet is 32 bits. + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { + auto idx = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!IndexInRange(idx, dictionary_length))) { + return values_read; + } + T val = dictionary[idx]; + + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, val); + + /* Upkeep counters */ + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + constexpr int kBufferSize = 1024; + IndexType indices[kBufferSize]; + + int literal_batch = std::min(remaining, literal_count_); + literal_batch = std::min(literal_batch, kBufferSize); + + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (ARROW_PREDICT_FALSE(!converter.IsValid(indices, /*length=*/literal_batch))) { + return values_read; + } + converter.Copy(out, indices, literal_batch); + + /* Upkeep counters */ + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetBatchWithDictSpaced(const T* dictionary, + int32_t dictionary_length, T* out, + int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (null_count == 0) { + return GetBatchWithDict(dictionary, dictionary_length, out, batch_size); + } + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatchWithDict(dictionary, dictionary_length, out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +template +bool RleDecoder::NextCounts() { + // Read the next run's indicator int, it could be a literal or repeated run. + // The int is encoded as a vlq-encoded value. + uint32_t indicator_value = 0; + if (!bit_reader_.GetVlqInt(&indicator_value)) return false; + + // lsb indicates if it is a literal run or repeated run + bool is_literal = indicator_value & 1; + uint32_t count = indicator_value >> 1; + if (is_literal) { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX) / 8)) { + return false; + } + literal_count_ = count * 8; + } else { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX))) { + return false; + } + repeat_count_ = count; + T value = {}; + if (!bit_reader_.GetAligned( + static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8)), &value)) { + return false; + } + current_value_ = static_cast(value); + } + return true; +} + +/// This function buffers input values 8 at a time. After seeing all 8 values, +/// it decides whether they should be encoded as a literal or repeated run. +inline bool RleEncoder::Put(uint64_t value) { + DCHECK(bit_width_ == 64 || value < (1ULL << bit_width_)); + if (ARROW_PREDICT_FALSE(buffer_full_)) return false; + + if (ARROW_PREDICT_TRUE(current_value_ == value)) { + ++repeat_count_; + if (repeat_count_ > 8) { + // This is just a continuation of the current run, no need to buffer the + // values. + // Note that this is the fast path for long repeated runs. + return true; + } + } else { + if (repeat_count_ >= 8) { + // We had a run that was long enough but it has ended. Flush the + // current repeated run. + DCHECK_EQ(literal_count_, 0); + FlushRepeatedRun(); + } + repeat_count_ = 1; + current_value_ = value; + } + + buffered_values_[num_buffered_values_] = value; + if (++num_buffered_values_ == 8) { + DCHECK_EQ(literal_count_ % 8, 0); + FlushBufferedValues(false); + } + return true; +} + +inline void RleEncoder::FlushLiteralRun(bool update_indicator_byte) { + if (literal_indicator_byte_ == NULL) { + // The literal indicator byte has not been reserved yet, get one now. + literal_indicator_byte_ = bit_writer_.GetNextBytePtr(); + DCHECK(literal_indicator_byte_ != NULL); + } + + // Write all the buffered values as bit packed literals + for (int i = 0; i < num_buffered_values_; ++i) { + bool success = bit_writer_.PutValue(buffered_values_[i], bit_width_); + DCHECK(success) << "There is a bug in using CheckBufferFull()"; + } + num_buffered_values_ = 0; + + if (update_indicator_byte) { + // At this point we need to write the indicator byte for the literal run. + // We only reserve one byte, to allow for streaming writes of literal values. + // The logic makes sure we flush literal runs often enough to not overrun + // the 1 byte. + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + int32_t indicator_value = (num_groups << 1) | 1; + DCHECK_EQ(indicator_value & 0xFFFFFF00, 0); + *literal_indicator_byte_ = static_cast(indicator_value); + literal_indicator_byte_ = NULL; + literal_count_ = 0; + CheckBufferFull(); + } +} + +inline void RleEncoder::FlushRepeatedRun() { + DCHECK_GT(repeat_count_, 0); + bool result = true; + // The lsb of 0 indicates this is a repeated run + int32_t indicator_value = repeat_count_ << 1 | 0; + result &= bit_writer_.PutVlqInt(static_cast(indicator_value)); + result &= bit_writer_.PutAligned( + current_value_, static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8))); + DCHECK(result); + num_buffered_values_ = 0; + repeat_count_ = 0; + CheckBufferFull(); +} + +/// Flush the values that have been buffered. At this point we decide whether +/// we need to switch between the run types or continue the current one. +inline void RleEncoder::FlushBufferedValues(bool done) { + if (repeat_count_ >= 8) { + // Clear the buffered values. They are part of the repeated run now and we + // don't want to flush them out as literals. + num_buffered_values_ = 0; + if (literal_count_ != 0) { + // There was a current literal run. All the values in it have been flushed + // but we still need to update the indicator byte. + DCHECK_EQ(literal_count_ % 8, 0); + DCHECK_EQ(repeat_count_, 8); + FlushLiteralRun(true); + } + DCHECK_EQ(literal_count_, 0); + return; + } + + literal_count_ += num_buffered_values_; + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + if (num_groups + 1 >= (1 << 6)) { + // We need to start a new literal run because the indicator byte we've reserved + // cannot store more values. + DCHECK(literal_indicator_byte_ != NULL); + FlushLiteralRun(true); + } else { + FlushLiteralRun(done); + } + repeat_count_ = 0; +} + +inline int RleEncoder::Flush() { + if (literal_count_ > 0 || repeat_count_ > 0 || num_buffered_values_ > 0) { + bool all_repeat = literal_count_ == 0 && (repeat_count_ == num_buffered_values_ || + num_buffered_values_ == 0); + // There is something pending, figure out if it's a repeated or literal run + if (repeat_count_ > 0 && all_repeat) { + FlushRepeatedRun(); + } else { + DCHECK_EQ(literal_count_ % 8, 0); + // Buffer the last group of literals to 8 by padding with 0s. + for (; num_buffered_values_ != 0 && num_buffered_values_ < 8; + ++num_buffered_values_) { + buffered_values_[num_buffered_values_] = 0; + } + literal_count_ += num_buffered_values_; + FlushLiteralRun(true); + repeat_count_ = 0; + } + } + bit_writer_.Flush(); + DCHECK_EQ(num_buffered_values_, 0); + DCHECK_EQ(literal_count_, 0); + DCHECK_EQ(repeat_count_, 0); + + return bit_writer_.bytes_written(); +} + +inline void RleEncoder::CheckBufferFull() { + int bytes_written = bit_writer_.bytes_written(); + if (bytes_written + max_run_byte_size_ > bit_writer_.buffer_len()) { + buffer_full_ = true; + } +} + +inline void RleEncoder::Clear() { + buffer_full_ = false; + current_value_ = 0; + repeat_count_ = 0; + num_buffered_values_ = 0; + literal_count_ = 0; + literal_indicator_byte_ = NULL; + bit_writer_.Clear(); +} + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h new file mode 100644 index 0000000000000000000000000000000000000000..ee9105d5f4beb431f155f8b47b7efdcc72452bc5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/simd.h @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef _MSC_VER +// MSVC x86_64/arm64 + +#if defined(_M_AMD64) || defined(_M_X64) +#include +#endif + +#else +// gcc/clang (possibly others) + +#if defined(ARROW_HAVE_BMI2) +#include +#endif + +#if defined(ARROW_HAVE_AVX2) || defined(ARROW_HAVE_AVX512) +#include +#elif defined(ARROW_HAVE_SSE4_2) +#include +#endif + +#ifdef ARROW_HAVE_NEON +#include +#endif + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..52e191c4c07846b922a5bd830c2cbbde50538eba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h @@ -0,0 +1,511 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/aligned_storage.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + void destroy() noexcept {} +}; + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + ~StaticVectorStorageBase() noexcept { destroy(); } + + void destroy() noexcept { storage_type::destroy_several(static_data_, size_); } +}; + +template ::value> +struct StaticVectorStorage : public StaticVectorStorageBase { + using Base = StaticVectorStorageBase; + using typename Base::storage_type; + + using Base::size_; + using Base::static_data_; + + StaticVectorStorage() noexcept = default; + + constexpr storage_type* storage_ptr() { return static_data_; } + + constexpr const storage_type* const_storage_ptr() const { return static_data_; } + + // Adjust storage size, but don't initialize any objects + void bump_size(size_t addend) { + assert(size_ + addend <= N); + size_ += addend; + } + + void ensure_capacity(size_t min_capacity) { assert(min_capacity <= N); } + + // Adjust storage size, but don't destroy any objects + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + // Move objects from another storage, but don't destroy any objects currently + // stored in *this. + // You need to call destroy() first if necessary (e.g. in a + // move assignment operator). + void move_construct(StaticVectorStorage&& other) noexcept { + size_ = other.size_; + if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return N; } + + constexpr size_t max_size() const { return N; } + + void reserve(size_t n) {} + + void clear() { + storage_type::destroy_several(static_data_, size_); + size_ = 0; + } +}; + +template +struct SmallVectorStorage { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + storage_type* data_ = static_data_; + size_t dynamic_capacity_ = 0; + + SmallVectorStorage() noexcept = default; + + ~SmallVectorStorage() { destroy(); } + + constexpr storage_type* storage_ptr() { return data_; } + + constexpr const storage_type* const_storage_ptr() const { return data_; } + + void bump_size(size_t addend) { + const size_t new_size = size_ + addend; + ensure_capacity(new_size); + size_ = new_size; + } + + void ensure_capacity(size_t min_capacity) { + if (dynamic_capacity_) { + // Grow dynamic storage if necessary + if (min_capacity > dynamic_capacity_) { + size_t new_capacity = std::max(dynamic_capacity_ * 2, min_capacity); + reallocate_dynamic(new_capacity); + } + } else if (min_capacity > N) { + switch_to_dynamic(min_capacity); + } + } + + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + void destroy() noexcept { + storage_type::destroy_several(data_, size_); + if (dynamic_capacity_) { + delete[] data_; + } + } + + void move_construct(SmallVectorStorage&& other) noexcept { + size_ = other.size_; + dynamic_capacity_ = other.dynamic_capacity_; + if (dynamic_capacity_) { + data_ = other.data_; + other.data_ = other.static_data_; + other.dynamic_capacity_ = 0; + other.size_ = 0; + } else if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return dynamic_capacity_ ? dynamic_capacity_ : N; } + + constexpr size_t max_size() const { return std::numeric_limits::max(); } + + void reserve(size_t n) { + if (dynamic_capacity_) { + if (n > dynamic_capacity_) { + reallocate_dynamic(n); + } + } else if (n > N) { + switch_to_dynamic(n); + } + } + + void clear() { + storage_type::destroy_several(data_, size_); + size_ = 0; + } + + private: + void switch_to_dynamic(size_t new_capacity) { + dynamic_capacity_ = new_capacity; + data_ = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(static_data_, data_, size_); + } + + void reallocate_dynamic(size_t new_capacity) { + assert(new_capacity >= size_); + auto new_data = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(data_, new_data, size_); + delete[] data_; + dynamic_capacity_ = new_capacity; + data_ = new_data; + } +}; + +template +class StaticVectorImpl { + private: + Storage storage_; + + T* data_ptr() { return storage_.storage_ptr()->get(); } + + constexpr const T* const_data_ptr() const { + return storage_.const_storage_ptr()->get(); + } + + public: + using size_type = size_t; + using difference_type = ptrdiff_t; + using value_type = T; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = T*; + using const_iterator = const T*; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + constexpr StaticVectorImpl() noexcept = default; + + // Move and copy constructors + StaticVectorImpl(StaticVectorImpl&& other) noexcept { + storage_.move_construct(std::move(other.storage_)); + } + + StaticVectorImpl& operator=(StaticVectorImpl&& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + // TODO move_assign? + storage_.destroy(); + storage_.move_construct(std::move(other.storage_)); + } + return *this; + } + + StaticVectorImpl(const StaticVectorImpl& other) { + init_by_copying(other.storage_.size_, other.const_data_ptr()); + } + + StaticVectorImpl& operator=(const StaticVectorImpl& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + assign_by_copying(other.storage_.size_, other.data()); + } + return *this; + } + + // Automatic conversion from std::vector, for convenience + StaticVectorImpl(const std::vector& other) { // NOLINT: explicit + init_by_copying(other.size(), other.data()); + } + + StaticVectorImpl(std::vector&& other) noexcept { // NOLINT: explicit + init_by_moving(other.size(), other.data()); + } + + StaticVectorImpl& operator=(const std::vector& other) { + assign_by_copying(other.size(), other.data()); + return *this; + } + + StaticVectorImpl& operator=(std::vector&& other) noexcept { + assign_by_moving(other.size(), other.data()); + return *this; + } + + // Constructing from count and optional initialization value + explicit StaticVectorImpl(size_t count) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(); + } + } + + StaticVectorImpl(size_t count, const T& value) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(value); + } + } + + StaticVectorImpl(std::initializer_list values) { + storage_.bump_size(values.size()); + auto* p = storage_.storage_ptr(); + for (auto&& v : values) { + // Unfortunately, cannot move initializer values + p++->construct(v); + } + } + + // Size inspection + + constexpr bool empty() const { return storage_.size_ == 0; } + + constexpr size_t size() const { return storage_.size_; } + + constexpr size_t capacity() const { return storage_.capacity(); } + + constexpr size_t max_size() const { return storage_.max_size(); } + + // Data access + + T& operator[](size_t i) { return data_ptr()[i]; } + + constexpr const T& operator[](size_t i) const { return const_data_ptr()[i]; } + + T& front() { return data_ptr()[0]; } + + constexpr const T& front() const { return const_data_ptr()[0]; } + + T& back() { return data_ptr()[storage_.size_ - 1]; } + + constexpr const T& back() const { return const_data_ptr()[storage_.size_ - 1]; } + + T* data() { return data_ptr(); } + + constexpr const T* data() const { return const_data_ptr(); } + + // Iterators + + iterator begin() { return iterator(data_ptr()); } + + constexpr const_iterator begin() const { return const_iterator(const_data_ptr()); } + + constexpr const_iterator cbegin() const { return const_iterator(const_data_ptr()); } + + iterator end() { return iterator(data_ptr() + storage_.size_); } + + constexpr const_iterator end() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + constexpr const_iterator cend() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + + constexpr const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + constexpr const_reverse_iterator crbegin() const { + return const_reverse_iterator(end()); + } + + reverse_iterator rend() { return reverse_iterator(begin()); } + + constexpr const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + constexpr const_reverse_iterator crend() const { + return const_reverse_iterator(begin()); + } + + // Mutations + + void reserve(size_t n) { storage_.reserve(n); } + + void clear() { storage_.clear(); } + + void push_back(const T& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(value); + } + + void push_back(T&& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::move(value)); + } + + template + void emplace_back(Args&&... args) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::forward(args)...); + } + + template + iterator insert(const_iterator insert_at, InputIt first, InputIt last) { + const size_t n = storage_.size_; + const size_t it_size = static_cast(last - first); // XXX might be O(n)? + const size_t pos = static_cast(insert_at - const_data_ptr()); + storage_.bump_size(it_size); + auto* p = storage_.storage_ptr(); + if (it_size == 0) { + return p[pos].get(); + } + const size_t end_pos = pos + it_size; + + // Move [pos; n) to [end_pos; end_pos + n - pos) + size_t i = n; + size_t j = end_pos + n - pos; + while (j > std::max(n, end_pos)) { + p[--j].move_construct(&p[--i]); + } + while (j > end_pos) { + p[--j].move_assign(&p[--i]); + } + assert(j == end_pos); + // Copy [first; last) to [pos; end_pos) + j = pos; + while (j < std::min(n, end_pos)) { + p[j++].assign(*first++); + } + while (j < end_pos) { + p[j++].construct(*first++); + } + assert(first == last); + return p[pos].get(); + } + + void resize(size_t n) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(T{}); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + void resize(size_t n, const T& value) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(value); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + private: + template + void init_by_copying(size_t n, InputIt src) { + storage_.bump_size(n); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } + + template + void init_by_moving(size_t n, InputIt src) { + init_by_copying(n, std::make_move_iterator(src)); + } + + template + void assign_by_copying(size_t n, InputIt src) { + const size_t old_size = storage_.size_; + if (n > old_size) { + storage_.bump_size(n - old_size); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < old_size; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = old_size; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } else { + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = n; i < old_size; ++i) { + dest[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + template + void assign_by_moving(size_t n, InputIt src) { + assign_by_copying(n, std::make_move_iterator(src)); + } +}; + +template +using StaticVector = StaticVectorImpl>; + +template +using SmallVector = StaticVectorImpl>; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h new file mode 100644 index 0000000000000000000000000000000000000000..cdffe0b2317e5ba555c37ec16e5294bc912a49d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace arrow { +namespace internal { + +template > +std::vector ArgSort(const std::vector& values, Cmp&& cmp = {}) { + std::vector indices(values.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), + [&](int64_t i, int64_t j) -> bool { return cmp(values[i], values[j]); }); + return indices; +} + +template +size_t Permute(const std::vector& indices, std::vector* values) { + if (indices.size() <= 1) { + return indices.size(); + } + + // mask indicating which of values are in the correct location + std::vector sorted(indices.size(), false); + + size_t cycle_count = 0; + + for (auto cycle_start = sorted.begin(); cycle_start != sorted.end(); + cycle_start = std::find(cycle_start, sorted.end(), false)) { + ++cycle_count; + + // position in which an element belongs WRT sort + auto sort_into = static_cast(cycle_start - sorted.begin()); + + if (indices[sort_into] == sort_into) { + // trivial cycle + sorted[sort_into] = true; + continue; + } + + // resolve this cycle + const auto end = sort_into; + for (int64_t take_from = indices[sort_into]; take_from != end; + take_from = indices[sort_into]) { + std::swap(values->at(sort_into), values->at(take_from)); + sorted[sort_into] = true; + sort_into = take_from; + } + sorted[sort_into] = true; + } + + return cycle_count; +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db4e67f59ed6e3afb5c90cb758b7998dd9d510f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace internal { + +class StopWatch { + // This clock should give us wall clock time + using ClockType = std::chrono::steady_clock; + + public: + StopWatch() {} + + void Start() { start_ = ClockType::now(); } + + // Returns time in nanoseconds. + uint64_t Stop() { + auto stop = ClockType::now(); + std::chrono::nanoseconds d = stop - start_; + assert(d.count() >= 0); + return static_cast(d.count()); + } + + private: + std::chrono::time_point start_; +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e377773f62f810d330c40e565d5acda0aabd4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/string.h @@ -0,0 +1,173 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +#include +#endif + +#include "arrow/result.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +ARROW_EXPORT std::string HexEncode(const uint8_t* data, size_t length); + +ARROW_EXPORT std::string Escape(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(const char* data, size_t length); + +ARROW_EXPORT std::string HexEncode(std::string_view str); + +ARROW_EXPORT std::string Escape(std::string_view str); + +ARROW_EXPORT Status ParseHexValue(const char* hex_pair, uint8_t* out); + +ARROW_EXPORT Status ParseHexValues(std::string_view hex_string, uint8_t* out); + +namespace internal { + +/// Like std::string_view::starts_with in C++20 +inline bool StartsWith(std::string_view s, std::string_view prefix) { + return s.length() >= prefix.length() && + (s.empty() || s.substr(0, prefix.length()) == prefix); +} + +/// Like std::string_view::ends_with in C++20 +inline bool EndsWith(std::string_view s, std::string_view suffix) { + return s.length() >= suffix.length() && + (s.empty() || s.substr(s.length() - suffix.length()) == suffix); +} + +/// \brief Split a string with a delimiter +ARROW_EXPORT +std::vector SplitString(std::string_view v, char delim, + int64_t limit = 0); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Join strings with a delimiter +ARROW_EXPORT +std::string JoinStrings(const std::vector& strings, + std::string_view delimiter); + +/// \brief Trim whitespace from left and right sides of string +ARROW_EXPORT +std::string TrimString(std::string value); + +ARROW_EXPORT +bool AsciiEqualsCaseInsensitive(std::string_view left, std::string_view right); + +ARROW_EXPORT +std::string AsciiToLower(std::string_view value); + +ARROW_EXPORT +std::string AsciiToUpper(std::string_view value); + +/// \brief Search for the first instance of a token and replace it or return nullopt if +/// the token is not found. +ARROW_EXPORT +std::optional Replace(std::string_view s, std::string_view token, + std::string_view replacement); + +/// \brief Get boolean value from string +/// +/// If "1", "true" (case-insensitive), returns true +/// If "0", "false" (case-insensitive), returns false +/// Otherwise, returns Status::Invalid +ARROW_EXPORT +arrow::Result ParseBoolean(std::string_view value); + +#if __has_include() + +namespace detail { +template +struct can_to_chars : public std::false_type {}; + +template +struct can_to_chars< + T, std::void_t(), std::declval(), + std::declval>()))>> + : public std::true_type {}; +} // namespace detail + +/// \brief Whether std::to_chars exists for the current value type. +/// +/// This is useful as some C++ libraries do not implement all specified overloads +/// for std::to_chars. +template +inline constexpr bool have_to_chars = detail::can_to_chars::value; + +/// \brief An ergonomic wrapper around std::to_chars, returning a std::string +/// +/// For most inputs, the std::string result will not incur any heap allocation +/// thanks to small string optimization. +/// +/// Compared to std::to_string, this function gives locale-agnostic results +/// and might also be faster. +template +std::string ToChars(T value, Args&&... args) { + if constexpr (!have_to_chars) { + // Some C++ standard libraries do not yet implement std::to_chars for all types, + // in which case we have to fallback to std::string. + return std::to_string(value); + } else { + // According to various sources, the GNU libstdc++ and Microsoft's C++ STL + // allow up to 15 bytes of small string optimization, while clang's libc++ + // goes up to 22 bytes. Choose the pessimistic value. + std::string out(15, 0); + auto res = std::to_chars(&out.front(), &out.back(), value, args...); + while (res.ec != std::errc{}) { + assert(res.ec == std::errc::value_too_large); + out.resize(out.capacity() * 2); + res = std::to_chars(&out.front(), &out.back(), value, args...); + } + const auto length = res.ptr - out.data(); + assert(length <= static_cast(out.length())); + out.resize(length); + return out; + } +} + +#else // !__has_include() + +template +inline constexpr bool have_to_chars = false; + +template +std::string ToChars(T value, Args&&... args) { + return std::to_string(value); +} + +#endif + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h new file mode 100644 index 0000000000000000000000000000000000000000..3bb72f0d9cb7d7bb8b9ce8f2a65cc9f954924ca3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/task_group.h @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cancel.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief A group of related tasks +/// +/// A TaskGroup executes tasks with the signature `Status()`. +/// Execution can be serial or parallel, depending on the TaskGroup +/// implementation. When Finish() returns, it is guaranteed that all +/// tasks have finished, or at least one has errored. +/// +/// Once an error has occurred any tasks that are submitted to the task group +/// will not run. The call to Append will simply return without scheduling the +/// task. +/// +/// If the task group is parallel it is possible that multiple tasks could be +/// running at the same time and one of those tasks fails. This will put the +/// task group in a failure state (so additional tasks cannot be run) however +/// it will not interrupt running tasks. Finish will not complete +/// until all running tasks have finished, even if one task fails. +/// +/// Once a task group has finished new tasks may not be added to it. If you need to start +/// a new batch of work then you should create a new task group. +class ARROW_EXPORT TaskGroup : public std::enable_shared_from_this { + public: + /// Add a Status-returning function to execute. Execution order is + /// undefined. The function may be executed immediately or later. + template + void Append(Function&& func) { + return AppendReal(std::forward(func)); + } + + /// Wait for execution of all tasks (and subgroups) to be finished, + /// or for at least one task (or subgroup) to error out. + /// The returned Status propagates the error status of the first failing + /// task (or subgroup). + virtual Status Finish() = 0; + + /// Returns a future that will complete the first time all tasks are finished. + /// This should be called only after all top level tasks + /// have been added to the task group. + /// + /// If you are using a TaskGroup asynchronously there are a few considerations to keep + /// in mind. The tasks should not block on I/O, etc (defeats the purpose of using + /// futures) and should not be doing any nested locking or you run the risk of the tasks + /// getting stuck in the thread pool waiting for tasks which cannot get scheduled. + /// + /// Primarily this call is intended to help migrate existing work written with TaskGroup + /// in mind to using futures without having to do a complete conversion on the first + /// pass. + virtual Future<> FinishAsync() = 0; + + /// The current aggregate error Status. Non-blocking, useful for stopping early. + virtual Status current_status() = 0; + + /// Whether some tasks have already failed. Non-blocking, useful for stopping early. + virtual bool ok() const = 0; + + /// How many tasks can typically be executed in parallel. + /// This is only a hint, useful for testing or debugging. + virtual int parallelism() = 0; + + static std::shared_ptr MakeSerial(StopToken = StopToken::Unstoppable()); + static std::shared_ptr MakeThreaded(internal::Executor*, + StopToken = StopToken::Unstoppable()); + + virtual ~TaskGroup() = default; + + protected: + TaskGroup() = default; + ARROW_DISALLOW_COPY_AND_ASSIGN(TaskGroup); + + virtual void AppendReal(FnOnce task) = 0; +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h new file mode 100644 index 0000000000000000000000000000000000000000..308df468840eb299ac35f1e308a643df4b8e0e4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/tdigest.h @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// approximate quantiles from arbitrary length dataset with O(1) space +// based on 'Computing Extremely Accurate Quantiles Using t-Digests' from Dunning & Ertl +// - https://arxiv.org/abs/1902.04023 +// - https://github.com/tdunning/t-digest + +#pragma once + +#include +#include +#include + +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +namespace internal { + +class ARROW_EXPORT TDigest { + public: + explicit TDigest(uint32_t delta = 100, uint32_t buffer_size = 500); + ~TDigest(); + TDigest(TDigest&&); + TDigest& operator=(TDigest&&); + + // reset and re-use this tdigest + void Reset(); + + // validate data integrity + Status Validate() const; + + // dump internal data, only for debug + void Dump() const; + + // buffer a single data point, consume internal buffer if full + // this function is intensively called and performance critical + // call it only if you are sure no NAN exists in input data + void Add(double value) { + DCHECK(!std::isnan(value)) << "cannot add NAN"; + if (ARROW_PREDICT_FALSE(input_.size() == input_.capacity())) { + MergeInput(); + } + input_.push_back(value); + } + + // skip NAN on adding + template + typename std::enable_if::value>::type NanAdd(T value) { + if (!std::isnan(value)) Add(value); + } + + template + typename std::enable_if::value>::type NanAdd(T value) { + Add(static_cast(value)); + } + + // merge with other t-digests, called infrequently + void Merge(const std::vector& others); + void Merge(const TDigest& other); + + // calculate quantile + double Quantile(double q) const; + + double Min() const { return Quantile(0); } + double Max() const { return Quantile(1); } + double Mean() const; + + // check if this tdigest contains no valid data points + bool is_empty() const; + + private: + // merge input data with current tdigest + void MergeInput() const; + + // input buffer, size = buffer_size * sizeof(double) + mutable std::vector input_; + + // hide other members with pimpl + class TDigestImpl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..511daed1ecaac688b6d444349bf1c63fb6c53ad6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/iterator.h" + +namespace arrow { + +struct TestInt { + TestInt(); + TestInt(int i); // NOLINT runtime/explicit + int value; + + bool operator==(const TestInt& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestInt& v); +}; + +template <> +struct IterationTraits { + static TestInt End() { return TestInt(); } + static bool IsEnd(const TestInt& val) { return val == IterationTraits::End(); } +}; + +struct TestStr { + TestStr(); + TestStr(const std::string& s); // NOLINT runtime/explicit + TestStr(const char* s); // NOLINT runtime/explicit + explicit TestStr(const TestInt& test_int); + std::string value; + + bool operator==(const TestStr& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestStr& v); +}; + +template <> +struct IterationTraits { + static TestStr End() { return TestStr(); } + static bool IsEnd(const TestStr& val) { return val == IterationTraits::End(); } +}; + +std::vector RangeVector(unsigned int max, unsigned int step = 1); + +template +inline Iterator VectorIt(std::vector v) { + return MakeVectorIterator(std::move(v)); +} + +template +inline Iterator PossiblySlowVectorIt(std::vector v, bool slow = false) { + auto iterator = MakeVectorIterator(std::move(v)); + if (slow) { + return MakeTransformedIterator(std::move(iterator), + [](T item) -> Result> { + SleepABit(); + return TransformYield(item); + }); + } else { + return iterator; + } +} + +template +inline void AssertIteratorExhausted(Iterator& it) { + ASSERT_OK_AND_ASSIGN(T next, it.Next()); + ASSERT_TRUE(IsIterationEnd(next)); +} + +Transformer MakeFilter(std::function filter); + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..44b1e227b0e5fac7ed104df5c487bdc223e44f26 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h @@ -0,0 +1,620 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/cancel.h" +#include "arrow/util/config.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +#if defined(_MSC_VER) +// Disable harmless warning for decorated name length limit +#pragma warning(disable : 4503) +#endif + +namespace arrow { + +/// \brief Get the capacity of the global thread pool +/// +/// Return the number of worker threads in the thread pool to which +/// Arrow dispatches various CPU-bound tasks. This is an ideal number, +/// not necessarily the exact number of threads at a given point in time. +/// +/// You can change this number using SetCpuThreadPoolCapacity(). +ARROW_EXPORT int GetCpuThreadPoolCapacity(); + +/// \brief Set the capacity of the global thread pool +/// +/// Set the number of worker threads int the thread pool to which +/// Arrow dispatches various CPU-bound tasks. +/// +/// The current number is returned by GetCpuThreadPoolCapacity(). +ARROW_EXPORT Status SetCpuThreadPoolCapacity(int threads); + +namespace internal { + +// Hints about a task that may be used by an Executor. +// They are ignored by the provided ThreadPool implementation. +struct TaskHints { + // The lower, the more urgent + int32_t priority = 0; + // The IO transfer size in bytes + int64_t io_size = -1; + // The approximate CPU cost in number of instructions + int64_t cpu_cost = -1; + // An application-specific ID + int64_t external_id = -1; +}; + +class ARROW_EXPORT Executor { + public: + using StopCallback = internal::FnOnce; + + virtual ~Executor(); + + // Spawn a fire-and-forget task. + template + Status Spawn(Function&& func) { + return SpawnReal(TaskHints{}, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(Function&& func, StopToken stop_token) { + return SpawnReal(TaskHints{}, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func) { + return SpawnReal(hints, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token, + StopCallback stop_callback) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + std::move(stop_callback)); + } + + // Transfers a future to this executor. Any continuations added to the + // returned future will run in this executor. Otherwise they would run + // on the same thread that called MarkFinished. + // + // This is necessary when (for example) an I/O task is completing a future. + // The continuations of that future should run on the CPU thread pool keeping + // CPU heavy work off the I/O thread pool. So the I/O task should transfer + // the future to the CPU executor before returning. + // + // By default this method will only transfer if the future is not already completed. If + // the future is already completed then any callback would be run synchronously and so + // no transfer is typically necessary. However, in cases where you want to force a + // transfer (e.g. to help the scheduler break up units of work across multiple cores) + // then you can override this behavior with `always_transfer`. + template + Future Transfer(Future future) { + return DoTransfer(std::move(future), false); + } + + // Overload of Transfer which will always schedule callbacks on new threads even if the + // future is finished when the callback is added. + // + // This can be useful in cases where you want to ensure parallelism + template + Future TransferAlways(Future future) { + return DoTransfer(std::move(future), true); + } + + // Submit a callable and arguments for execution. Return a future that + // will return the callable's result value once. + // The callable's arguments are copied before execution. + template > + Result Submit(TaskHints hints, StopToken stop_token, Function&& func, + Args&&... args) { + using ValueType = typename FutureType::ValueType; + + auto future = FutureType::Make(); + auto task = std::bind(::arrow::detail::ContinueFuture{}, future, + std::forward(func), std::forward(args)...); + struct { + WeakFuture weak_fut; + + void operator()(const Status& st) { + auto fut = weak_fut.get(); + if (fut.is_valid()) { + fut.MarkFinished(st); + } + } + } stop_callback{WeakFuture(future)}; + ARROW_RETURN_NOT_OK(SpawnReal(hints, std::move(task), std::move(stop_token), + std::move(stop_callback))); + + return future; + } + + template > + Result Submit(StopToken stop_token, Function&& func, Args&&... args) { + return Submit(TaskHints{}, stop_token, std::forward(func), + std::forward(args)...); + } + + template > + Result Submit(TaskHints hints, Function&& func, Args&&... args) { + return Submit(std::move(hints), StopToken::Unstoppable(), + std::forward(func), std::forward(args)...); + } + + template > + Result Submit(Function&& func, Args&&... args) { + return Submit(TaskHints{}, StopToken::Unstoppable(), std::forward(func), + std::forward(args)...); + } + + // Return the level of parallelism (the number of tasks that may be executed + // concurrently). This may be an approximate number. + virtual int GetCapacity() = 0; + + // Return true if the thread from which this function is called is owned by this + // Executor. Returns false if this Executor does not support this property. + virtual bool OwnsThisThread() { return false; } + + // Return true if this is the current executor being called + // n.b. this defaults to just calling OwnsThisThread + // unless the threadpool is disabled + virtual bool IsCurrentExecutor() { return OwnsThisThread(); } + + /// \brief An interface to represent something with a custom destructor + /// + /// \see KeepAlive + class ARROW_EXPORT Resource { + public: + virtual ~Resource() = default; + }; + + /// \brief Keep a resource alive until all executor threads have terminated + /// + /// Executors may have static storage duration. In particular, the CPU and I/O + /// executors are currently implemented this way. These threads may access other + /// objects with static storage duration such as the OpenTelemetry runtime context + /// the default memory pool, or other static executors. + /// + /// The order in which these objects are destroyed is difficult to control. In order + /// to ensure those objects remain alive until all threads have finished those objects + /// should be wrapped in a Resource object and passed into this method. The given + /// shared_ptr will be kept alive until all threads have finished their worker loops. + virtual void KeepAlive(std::shared_ptr resource); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Executor); + + Executor() = default; + + template , typename FTSync = typename FT::SyncType> + Future DoTransfer(Future future, bool always_transfer = false) { + auto transferred = Future::Make(); + if (always_transfer) { + CallbackOptions callback_options = CallbackOptions::Defaults(); + callback_options.should_schedule = ShouldSchedule::Always; + callback_options.executor = this; + auto sync_callback = [transferred](const FTSync& result) mutable { + transferred.MarkFinished(result); + }; + future.AddCallback(sync_callback, callback_options); + return transferred; + } + + // We could use AddCallback's ShouldSchedule::IfUnfinished but we can save a bit of + // work by doing the test here. + auto callback = [this, transferred](const FTSync& result) mutable { + auto spawn_status = + Spawn([transferred, result]() mutable { transferred.MarkFinished(result); }); + if (!spawn_status.ok()) { + transferred.MarkFinished(spawn_status); + } + }; + auto callback_factory = [&callback]() { return callback; }; + if (future.TryAddCallback(callback_factory)) { + return transferred; + } + // If the future is already finished and we aren't going to force spawn a thread + // then we don't need to add another layer of callback and can return the original + // future + return future; + } + + // Subclassing API + virtual Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) = 0; +}; + +/// \brief An executor implementation that runs all tasks on a single thread using an +/// event loop. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT SerialExecutor : public Executor { + public: + template + using TopLevelTask = internal::FnOnce(Executor*)>; + + ~SerialExecutor() override; + + int GetCapacity() override { return 1; }; + bool OwnsThisThread() override; + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + /// \brief Runs the TopLevelTask and any scheduled tasks + /// + /// The TopLevelTask (or one of the tasks it schedules) must either return an invalid + /// status or call the finish signal. Failure to do this will result in a deadlock. For + /// this reason it is preferable (if possible) to use the helper methods (below) + /// RunSynchronously/RunSerially which delegates the responsibility onto a Future + /// producer's existing responsibility to always mark a future finished (which can + /// someday be aided by ARROW-12207). + template , + typename FTSync = typename FT::SyncType> + static FTSync RunInSerialExecutor(TopLevelTask initial_task) { + Future fut = SerialExecutor().Run(std::move(initial_task)); + return FutureToSync(fut); + } + + /// \brief Transform an AsyncGenerator into an Iterator + /// + /// An event loop will be created and each call to Next will power the event loop with + /// the calling thread until the next item is ready to be delivered. + /// + /// Note: The iterator's destructor will run until the given generator is fully + /// exhausted. If you wish to abandon iteration before completion then the correct + /// approach is to use a stop token to cause the generator to exhaust early. + template + static Iterator IterateGenerator( + internal::FnOnce()>>(Executor*)> initial_task) { + auto serial_executor = std::unique_ptr(new SerialExecutor()); + auto maybe_generator = std::move(initial_task)(serial_executor.get()); + if (!maybe_generator.ok()) { + return MakeErrorIterator(maybe_generator.status()); + } + auto generator = maybe_generator.MoveValueUnsafe(); + struct SerialIterator { + SerialIterator(std::unique_ptr executor, + std::function()> generator) + : executor(std::move(executor)), generator(std::move(generator)) {} + ARROW_DISALLOW_COPY_AND_ASSIGN(SerialIterator); + ARROW_DEFAULT_MOVE_AND_ASSIGN(SerialIterator); + ~SerialIterator() { + // A serial iterator must be consumed before it can be destroyed. Allowing it to + // do otherwise would lead to resource leakage. There will likely be deadlocks at + // this spot in the future but these will be the result of other bugs and not the + // fact that we are forcing consumption here. + + // If a streaming API needs to support early abandonment then it should be done so + // with a cancellation token and not simply discarding the iterator and expecting + // the underlying work to clean up correctly. + if (executor && !executor->IsFinished()) { + while (true) { + Result maybe_next = Next(); + if (!maybe_next.ok() || IsIterationEnd(*maybe_next)) { + break; + } + } + } + } + + Result Next() { + executor->Unpause(); + // This call may lead to tasks being scheduled in the serial executor + Future next_fut = generator(); + next_fut.AddCallback([this](const Result& res) { + // If we're done iterating we should drain the rest of the tasks in the executor + if (!res.ok() || IsIterationEnd(*res)) { + executor->Finish(); + return; + } + // Otherwise we will break out immediately, leaving the remaining tasks for + // the next call. + executor->Pause(); + }); +#ifdef ARROW_ENABLE_THREADING + // future must run on this thread + // Borrow this thread and run tasks until the future is finished + executor->RunLoop(); +#else + next_fut.Wait(); +#endif + if (!next_fut.is_finished()) { + // Not clear this is possible since RunLoop wouldn't generally exit + // unless we paused/finished which would imply next_fut has been + // finished. + return Status::Invalid( + "Serial executor terminated before next result computed"); + } + // At this point we may still have tasks in the executor, that is ok. + // We will run those tasks the next time through. + return next_fut.result(); + } + + std::unique_ptr executor; + std::function()> generator; + }; + return Iterator(SerialIterator{std::move(serial_executor), std::move(generator)}); + } + +#ifndef ARROW_ENABLE_THREADING + // run a pending task from loop + // returns true if any tasks were run in the last go round the loop (i.e. if it + // returns false, all executors are waiting) + static bool RunTasksOnAllExecutors(); + static SerialExecutor* GetCurrentExecutor(); + + bool IsCurrentExecutor() override; + +#endif + + protected: + virtual void RunLoop(); + + // State uses mutex + struct State; + std::shared_ptr state_; + + SerialExecutor(); + + // We mark the serial executor "finished" when there should be + // no more tasks scheduled on it. It's not strictly needed but + // can help catch bugs where we are trying to use the executor + // after we are done with it. + void Finish(); + bool IsFinished(); + // We pause the executor when we are running an async generator + // and we have received an item that we can deliver. + void Pause(); + void Unpause(); + + template ::SyncType> + Future Run(TopLevelTask initial_task) { + auto final_fut = std::move(initial_task)(this); + final_fut.AddCallback([this](const FTSync&) { Finish(); }); + RunLoop(); + return final_fut; + } + +#ifndef ARROW_ENABLE_THREADING + // we have to run tasks from all live executors + // during RunLoop if we don't have threading + static std::unordered_set all_executors; + // a pointer to the last one called by the loop + // so all tasks get spawned equally + // on multiple calls to RunTasksOnAllExecutors + static SerialExecutor* last_called_executor; + // without threading we can't tell which executor called the + // current process - so we set it in spawning the task + static SerialExecutor* current_executor; +#endif // ARROW_ENABLE_THREADING +}; + +#ifdef ARROW_ENABLE_THREADING + +/// An Executor implementation spawning tasks in FIFO manner on a fixed-size +/// pool of worker threads. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT ThreadPool : public Executor { + public: + // Construct a thread pool with the given number of worker threads + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + bool OwnsThisThread() override; + // Dynamically change the number of worker threads. + // + // This function always returns immediately. + // If fewer threads are running than this number, new threads are spawned + // on-demand when needed for task execution. + // If more threads are running than this number, excess threads are reaped + // as soon as possible. + Status SetCapacity(int threads); + + // Heuristic for the default capacity of a thread pool for CPU-bound tasks. + // This is exposed as a static method to help with testing. + static int DefaultCapacity(); + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + void KeepAlive(std::shared_ptr resource) override; + + struct State; + + protected: + FRIEND_TEST(TestThreadPool, SetCapacity); + FRIEND_TEST(TestGlobalThreadPool, Capacity); + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + ThreadPool(); + + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Collect finished worker threads, making sure the OS threads have exited + void CollectFinishedWorkersUnlocked(); + // Launch a given number of additional workers + void LaunchWorkersUnlocked(int threads); + // Get the current actual capacity + int GetActualCapacity(); + + static std::shared_ptr MakeCpuThreadPool(); + + std::shared_ptr sp_state_; + State* state_; + bool shutdown_on_destroy_; +}; +#else // ARROW_ENABLE_THREADING +// an executor implementation which pretends to be a thread pool but runs everything +// on the main thread using a static queue (shared between all thread pools, otherwise +// cross-threadpool dependencies will break everything) +class ARROW_EXPORT ThreadPool : public SerialExecutor { + public: + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + virtual int GetActualCapacity(); + + bool OwnsThisThread() override { return true; } + + // Dynamically change the number of worker threads. + // without threading this is equal to the + // number of tasks that can be running at once + // (inside each other) + Status SetCapacity(int threads); + + static int DefaultCapacity() { return 8; } + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + protected: + static std::shared_ptr MakeCpuThreadPool(); + ThreadPool(); +}; + +#endif // ARROW_ENABLE_THREADING + +// Return the process-global thread pool for CPU-bound tasks. +ARROW_EXPORT ThreadPool* GetCpuThreadPool(); + +/// \brief Potentially run an async operation serially (if use_threads is false) +/// \see RunSerially +/// +/// If `use_threads` is true, the global CPU executor is used. +/// If `use_threads` is false, a temporary SerialExecutor is used. +/// `get_future` is called (from this thread) with the chosen executor and must +/// return a future that will eventually finish. This function returns once the +/// future has finished. +template +typename Fut::SyncType RunSynchronously(FnOnce get_future, + bool use_threads) { + if (use_threads) { + auto fut = std::move(get_future)(GetCpuThreadPool()); + return FutureToSync(fut); + } else { + return SerialExecutor::RunInSerialExecutor(std::move(get_future)); + } +} + +/// \brief Potentially iterate an async generator serially (if use_threads is false) +/// \see IterateGenerator +/// +/// If `use_threads` is true, the global CPU executor will be used. Each call to +/// the iterator will simply wait until the next item is available. Tasks may run in +/// the background between calls. +/// +/// If `use_threads` is false, the calling thread only will be used. Each call to +/// the iterator will use the calling thread to do enough work to generate one item. +/// Tasks will be left in a queue until the next call and no work will be done between +/// calls. +template +Iterator IterateSynchronously( + FnOnce()>>(Executor*)> get_gen, bool use_threads) { + if (use_threads) { + auto maybe_gen = std::move(get_gen)(GetCpuThreadPool()); + if (!maybe_gen.ok()) { + return MakeErrorIterator(maybe_gen.status()); + } + return MakeGeneratorIterator(*maybe_gen); + } else { + return SerialExecutor::IterateGenerator(std::move(get_gen)); + } +} + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/time.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/time.h new file mode 100644 index 0000000000000000000000000000000000000000..981eab59676ada65656a6c5dbfbe2c26b332d804 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/time.h @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +enum DivideOrMultiply { + MULTIPLY, + DIVIDE, +}; + +ARROW_EXPORT +std::pair GetTimestampConversion(TimeUnit::type in_unit, + TimeUnit::type out_unit); + +// Converts a Timestamp value into another Timestamp value. +// +// This function takes care of properly transforming from one unit to another. +// +// \param[in] in the input type. Must be TimestampType. +// \param[in] out the output type. Must be TimestampType. +// \param[in] value the input value. +// +// \return The converted value, or an error. +ARROW_EXPORT Result ConvertTimestampValue(const std::shared_ptr& in, + const std::shared_ptr& out, + int64_t value); + +template +decltype(std::declval()(std::chrono::seconds{}, std::declval()...)) +VisitDuration(TimeUnit::type unit, Visitor&& visitor, Args&&... args) { + switch (unit) { + default: + case TimeUnit::SECOND: + break; + case TimeUnit::MILLI: + return visitor(std::chrono::milliseconds{}, std::forward(args)...); + case TimeUnit::MICRO: + return visitor(std::chrono::microseconds{}, std::forward(args)...); + case TimeUnit::NANO: + return visitor(std::chrono::nanoseconds{}, std::forward(args)...); + } + return visitor(std::chrono::seconds{}, std::forward(args)...); +} + +/// Convert a count of seconds to the corresponding count in a different TimeUnit +struct CastSecondsToUnitImpl { + template + int64_t operator()(Duration, int64_t seconds) { + auto duration = std::chrono::duration_cast(std::chrono::seconds{seconds}); + return static_cast(duration.count()); + } +}; + +inline int64_t CastSecondsToUnit(TimeUnit::type unit, int64_t seconds) { + return VisitDuration(unit, CastSecondsToUnitImpl{}, seconds); +} + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..6d904f19b11b5cce5e86ce7c4a7cbfc92825da06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { + +namespace internal { +struct Empty; +} // namespace internal + +template +class WeakFuture; +class FutureWaiter; + +class TimestampParser; + +namespace internal { + +class Executor; +class TaskGroup; +class ThreadPool; +class CpuInfo; + +namespace tracing { + +struct Scope; + +} // namespace tracing +} // namespace internal + +struct Compression { + /// \brief Compression algorithm + enum type { + UNCOMPRESSED, + SNAPPY, + GZIP, + BROTLI, + ZSTD, + LZ4, + LZ4_FRAME, + LZO, + BZ2, + LZ4_HADOOP + }; +}; + +namespace util { +class AsyncTaskScheduler; +class Compressor; +class Decompressor; +class Codec; +} // namespace util + +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h new file mode 100644 index 0000000000000000000000000000000000000000..900d8011dfd69506ec7ee546f6f32109c448e5f5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Contains utilities for making UBSan happy. + +#pragma once + +#include +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +namespace internal { + +constexpr uint8_t kNonNullFiller = 0; + +} // namespace internal + +/// \brief Returns maybe_null if not null or a non-null pointer to an arbitrary memory +/// that shouldn't be dereferenced. +/// +/// Memset/Memcpy are undefined when a nullptr is passed as an argument use this utility +/// method to wrap locations where this could happen. +/// +/// Note: Flatbuffers has UBSan warnings if a zero length vector is passed. +/// https://github.com/google/flatbuffers/pull/5355 is trying to resolve +/// them. +template +inline T* MakeNonNull(T* maybe_null = NULLPTR) { + if (ARROW_PREDICT_TRUE(maybe_null != NULLPTR)) { + return maybe_null; + } + + return const_cast(reinterpret_cast(&internal::kNonNullFiller)); +} + +template +inline std::enable_if_t, T> SafeLoadAs( + const uint8_t* unaligned) { + std::remove_const_t ret; + std::memcpy(&ret, unaligned, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t, T> SafeLoad(const T* unaligned) { + std::remove_const_t ret; + std::memcpy(&ret, unaligned, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t && + std::is_trivially_copyable_v && sizeof(T) == sizeof(U), + U> +SafeCopy(T value) { + std::remove_const_t ret; + std::memcpy(&ret, &value, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t, void> SafeStore(void* unaligned, + T value) { + std::memcpy(unaligned, &value, sizeof(T)); +} + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/union_util.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/union_util.h new file mode 100644 index 0000000000000000000000000000000000000000..0f30d5a32781924a3c64904a203a03d9d3d48d79 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/union_util.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include "arrow/array/data.h" + +namespace arrow { +namespace union_util { + +/// \brief Compute the number of of logical nulls in a sparse union array +int64_t LogicalSparseUnionNullCount(const ArraySpan& span); + +/// \brief Compute the number of of logical nulls in a dense union array +int64_t LogicalDenseUnionNullCount(const ArraySpan& span); + +} // namespace union_util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h new file mode 100644 index 0000000000000000000000000000000000000000..855a61408da995fc0e6aeeab3aa01e9fea050c0f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief A parsed URI +class ARROW_EXPORT Uri { + public: + Uri(); + ~Uri(); + Uri(Uri&&); + Uri& operator=(Uri&&); + + // XXX Should we use std::string_view instead? These functions are + // not performance-critical. + + /// The URI scheme, such as "http", or the empty string if the URI has no + /// explicit scheme. + std::string scheme() const; + + /// Convenience function that returns true if the scheme() is "file" + bool is_file_scheme() const; + + /// Whether the URI has an explicit host name. This may return true if + /// the URI has an empty host (e.g. "file:///tmp/foo"), while it returns + /// false is the URI has not host component at all (e.g. "file:/tmp/foo"). + bool has_host() const; + /// The URI host name, such as "localhost", "127.0.0.1" or "::1", or the empty + /// string is the URI does not have a host component. + std::string host() const; + + /// The URI port number, as a string such as "80", or the empty string is the URI + /// does not have a port number component. + std::string port_text() const; + /// The URI port parsed as an integer, or -1 if the URI does not have a port + /// number component. + int32_t port() const; + + /// The username specified in the URI. + std::string username() const; + /// The password specified in the URI. + std::string password() const; + + /// The URI path component. + std::string path() const; + + /// The URI query string + std::string query_string() const; + + /// The URI query items + /// + /// Note this API doesn't allow differentiating between an empty value + /// and a missing value, such in "a&b=1" vs. "a=&b=1". + Result>> query_items() const; + + /// Get the string representation of this URI. + const std::string& ToString() const; + + /// Factory function to parse a URI from its string representation. + Status Parse(const std::string& uri_string); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +/// Percent-encode the input string, for use e.g. as a URI query parameter. +/// +/// This will escape directory separators, making this function unsuitable +/// for encoding URI paths directly. See UriFromAbsolutePath() instead. +ARROW_EXPORT +std::string UriEscape(std::string_view s); + +ARROW_EXPORT +std::string UriUnescape(std::string_view s); + +/// Encode a host for use within a URI, such as "localhost", +/// "127.0.0.1", or "[::1]". +ARROW_EXPORT +std::string UriEncodeHost(std::string_view host); + +/// Whether the string is a syntactically valid URI scheme according to RFC 3986. +ARROW_EXPORT +bool IsValidUriScheme(std::string_view s); + +/// Create a file uri from a given absolute path +ARROW_EXPORT +Result UriFromAbsolutePath(std::string_view path); + +} // namespace internal +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h new file mode 100644 index 0000000000000000000000000000000000000000..ca93fab5b9f4e1f43d451689f0e75cb5572ce983 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +// Convert a UTF8 string to a wstring (either UTF16 or UTF32, depending +// on the wchar_t width). +ARROW_EXPORT Result UTF8ToWideString(std::string_view source); + +// Similarly, convert a wstring to a UTF8 string. +ARROW_EXPORT Result WideStringToUTF8(const std::wstring& source); + +// Convert UTF8 string to a UTF16 string. +ARROW_EXPORT Result UTF8StringToUTF16(std::string_view source); + +// Convert UTF16 string to a UTF8 string. +ARROW_EXPORT Result UTF16StringToUTF8(std::u16string_view source); + +// This function needs to be called before doing UTF8 validation. +ARROW_EXPORT void InitializeUTF8(); + +ARROW_EXPORT bool ValidateUTF8(const uint8_t* data, int64_t size); + +ARROW_EXPORT bool ValidateUTF8(std::string_view str); + +// Skip UTF8 byte order mark, if any. +ARROW_EXPORT +Result SkipUTF8BOM(const uint8_t* data, int64_t size); + +static constexpr uint32_t kMaxUnicodeCodepoint = 0x110000; + +} // namespace util +} // namespace arrow diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_compatibility.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_compatibility.h new file mode 100644 index 0000000000000000000000000000000000000000..ea0d0167569e8272b1b56da5c344fd83b17e6013 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_compatibility.h @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifdef _WIN32 + +// Windows defines min and max macros that mess up std::min/max +#ifndef NOMINMAX +#define NOMINMAX +#endif + +#define WIN32_LEAN_AND_MEAN + +// Set Windows 7 as a conservative minimum for Apache Arrow +#if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x601 +#undef _WIN32_WINNT +#endif +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x601 +#endif + +#include +#include + +#include "arrow/util/windows_fixup.h" + +#endif // _WIN32 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h new file mode 100644 index 0000000000000000000000000000000000000000..2949ac4ab768890d866be6133babbe6f92459ab3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This header needs to be included multiple times. + +#ifdef _WIN32 + +#ifdef max +#undef max +#endif +#ifdef min +#undef min +#endif + +// The Windows API defines macros from *File resolving to either +// *FileA or *FileW. Need to undo them. +#ifdef CopyFile +#undef CopyFile +#endif +#ifdef CreateFile +#undef CreateFile +#endif +#ifdef DeleteFile +#undef DeleteFile +#endif + +// Other annoying Windows macro definitions... +#ifdef IN +#undef IN +#endif +#ifdef OUT +#undef OUT +#endif + +// Note that we can't undefine OPTIONAL, because it can be used in other +// Windows headers... + +#endif // _WIN32 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 new file mode 100644 index 0000000000000000000000000000000000000000..85ea63048402cb79f72f2212592c8d7640ba1352 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b328f397e22675cd21a4d8776a128568a4a31f4178e20f8903a60ecc211e6f2 +size 2753896 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python.so b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python.so new file mode 100644 index 0000000000000000000000000000000000000000..24d70446582bf4bdf9c5b99c6f257e248cadf5bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70b3b4ea40841939c029331b153eb7eab8a10bc32b03c242b674c456b75c2d0f +size 2705736 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 b/env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 new file mode 100644 index 0000000000000000000000000000000000000000..4e7d0d44b45f31292b643f0374d38341c0962fe3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16d0d85de2070d4490c6cb02ffa5e994e6305b3e97c54a4c04914ad2fbc6c669 +size 10851312