diff --git a/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9116173e532d5fbffd7fc596f478495c3ef0dec2 --- /dev/null +++ b/ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42787a47c14038affaa6997674816506ae6d10dd7e8b3dfc35c90a52026fc982 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..acd6cba11d6e689e5627d4503792358c96a1b84b --- /dev/null +++ b/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5dbb776214980e309ca63828409e3f40bbc566805152479a4e13703a48fd21f +size 33555612 diff --git a/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..8d1a6520bc9196798883acb46855d2a9e65004ad --- /dev/null +++ b/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b67284a4e140d271ba46aba22965ed84ae00a8e87ce91f82f3cc8589c90b46ff +size 33555627 diff --git a/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3b6c20366b0e7b6fe61c7f0d9ab767d65e15331b --- /dev/null +++ b/ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3d24f58bab28f666390d91d4f38b3f780aa4bf48b071b64ce8a911c47b71332 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h new file mode 100644 index 0000000000000000000000000000000000000000..2e5210b073ee4218145646bc512e06a9a0d3df6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h @@ -0,0 +1,466 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Eager evaluation convenience APIs for invoking common functions, including +// necessary memory allocations + +#pragma once + +#include + +#include "arrow/compute/function_options.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; + +namespace compute { + +class ExecContext; + +// ---------------------------------------------------------------------- +// Aggregate functions + +/// \addtogroup compute-concrete-options +/// @{ + +/// \brief Control general scalar aggregate kernel behavior +/// +/// By default, null values are ignored (skip_nulls = true). +class ARROW_EXPORT ScalarAggregateOptions : public FunctionOptions { + public: + explicit ScalarAggregateOptions(bool skip_nulls = true, uint32_t min_count = 1); + static constexpr char const kTypeName[] = "ScalarAggregateOptions"; + static ScalarAggregateOptions Defaults() { return ScalarAggregateOptions{}; } + + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control count aggregate kernel behavior. +/// +/// By default, only non-null values are counted. +class ARROW_EXPORT CountOptions : public FunctionOptions { + public: + enum CountMode { + /// Count only non-null values. + ONLY_VALID = 0, + /// Count only null values. + ONLY_NULL, + /// Count both non-null and null values. + ALL, + }; + explicit CountOptions(CountMode mode = CountMode::ONLY_VALID); + static constexpr char const kTypeName[] = "CountOptions"; + static CountOptions Defaults() { return CountOptions{}; } + + CountMode mode; +}; + +/// \brief Control Mode kernel behavior +/// +/// Returns top-n common values and counts. +/// By default, returns the most common value and count. +class ARROW_EXPORT ModeOptions : public FunctionOptions { + public: + explicit ModeOptions(int64_t n = 1, bool skip_nulls = true, uint32_t min_count = 0); + static constexpr char const kTypeName[] = "ModeOptions"; + static ModeOptions Defaults() { return ModeOptions{}; } + + int64_t n = 1; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control Delta Degrees of Freedom (ddof) of Variance and Stddev kernel +/// +/// The divisor used in calculations is N - ddof, where N is the number of elements. +/// By default, ddof is zero, and population variance or stddev is returned. +class ARROW_EXPORT VarianceOptions : public FunctionOptions { + public: + explicit VarianceOptions(int ddof = 0, bool skip_nulls = true, uint32_t min_count = 0); + static constexpr char const kTypeName[] = "VarianceOptions"; + static VarianceOptions Defaults() { return VarianceOptions{}; } + + int ddof = 0; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control Quantile kernel behavior +/// +/// By default, returns the median value. +class ARROW_EXPORT QuantileOptions : public FunctionOptions { + public: + /// Interpolation method to use when quantile lies between two data points + enum Interpolation { + LINEAR = 0, + LOWER, + HIGHER, + NEAREST, + MIDPOINT, + }; + + explicit QuantileOptions(double q = 0.5, enum Interpolation interpolation = LINEAR, + bool skip_nulls = true, uint32_t min_count = 0); + + explicit QuantileOptions(std::vector q, + enum Interpolation interpolation = LINEAR, + bool skip_nulls = true, uint32_t min_count = 0); + + static constexpr char const kTypeName[] = "QuantileOptions"; + static QuantileOptions Defaults() { return QuantileOptions{}; } + + /// probability level of quantile must be between 0 and 1 inclusive + std::vector q; + enum Interpolation interpolation; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control TDigest approximate quantile kernel behavior +/// +/// By default, returns the median value. +class ARROW_EXPORT TDigestOptions : public FunctionOptions { + public: + explicit TDigestOptions(double q = 0.5, uint32_t delta = 100, + uint32_t buffer_size = 500, bool skip_nulls = true, + uint32_t min_count = 0); + explicit TDigestOptions(std::vector q, uint32_t delta = 100, + uint32_t buffer_size = 500, bool skip_nulls = true, + uint32_t min_count = 0); + static constexpr char const kTypeName[] = "TDigestOptions"; + static TDigestOptions Defaults() { return TDigestOptions{}; } + + /// probability level of quantile must be between 0 and 1 inclusive + std::vector q; + /// compression parameter, default 100 + uint32_t delta; + /// input buffer size, default 500 + uint32_t buffer_size; + /// If true (the default), null values are ignored. Otherwise, if any value is null, + /// emit null. + bool skip_nulls; + /// If less than this many non-null values are observed, emit null. + uint32_t min_count; +}; + +/// \brief Control Index kernel behavior +class ARROW_EXPORT IndexOptions : public FunctionOptions { + public: + explicit IndexOptions(std::shared_ptr value); + // Default constructor for serialization + IndexOptions(); + static constexpr char const kTypeName[] = "IndexOptions"; + + std::shared_ptr value; +}; + +/// \brief Configure a grouped aggregation +struct ARROW_EXPORT Aggregate { + Aggregate() = default; + + Aggregate(std::string function, std::shared_ptr options, + std::vector target, std::string name = "") + : function(std::move(function)), + options(std::move(options)), + target(std::move(target)), + name(std::move(name)) {} + + Aggregate(std::string function, std::shared_ptr options, + FieldRef target, std::string name = "") + : Aggregate(std::move(function), std::move(options), + std::vector{std::move(target)}, std::move(name)) {} + + Aggregate(std::string function, FieldRef target, std::string name) + : Aggregate(std::move(function), /*options=*/NULLPTR, + std::vector{std::move(target)}, std::move(name)) {} + + Aggregate(std::string function, std::string name) + : Aggregate(std::move(function), /*options=*/NULLPTR, + /*target=*/std::vector{}, std::move(name)) {} + + /// the name of the aggregation function + std::string function; + + /// options for the aggregation function + std::shared_ptr options; + + /// zero or more fields to which aggregations will be applied + std::vector target; + + /// optional output field name for aggregations + std::string name; +}; + +/// @} + +/// \brief Count values in an array. +/// +/// \param[in] options counting options, see CountOptions for more information +/// \param[in] datum to count +/// \param[in] ctx the function execution context, optional +/// \return out resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Count(const Datum& datum, + const CountOptions& options = CountOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the mean of a numeric array. +/// +/// \param[in] value datum to compute the mean, expecting Array +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed mean as a DoubleScalar +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Mean( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the product of values of a numeric array. +/// +/// \param[in] value datum to compute product of, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed sum as a Scalar +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Product( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Sum values of a numeric array. +/// +/// \param[in] value datum to sum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed sum as a Scalar +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Sum( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the first value of an array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed first as Scalar +/// +/// \since 13.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result First( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the last value of an array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed last as a Scalar +/// +/// \since 13.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Last( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the min / max of a numeric array +/// +/// This function returns both the min and max as a struct scalar, with type +/// struct, where T is the input type +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as a struct scalar +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result MinMax( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Test whether any element in a boolean array evaluates to true. +/// +/// This function returns true if any of the elements in the array evaluates +/// to true and false otherwise. Null values are ignored by default. +/// If null values are taken into account by setting ScalarAggregateOptions +/// parameter skip_nulls = false then Kleene logic is used. +/// See KleeneOr for more details on Kleene logic. +/// +/// \param[in] value input datum, expecting a boolean array +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as a BooleanScalar +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Any( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Test whether all elements in a boolean array evaluate to true. +/// +/// This function returns true if all of the elements in the array evaluate +/// to true and false otherwise. Null values are ignored by default. +/// If null values are taken into account by setting ScalarAggregateOptions +/// parameter skip_nulls = false then Kleene logic is used. +/// See KleeneAnd for more details on Kleene logic. +/// +/// \param[in] value input datum, expecting a boolean array +/// \param[in] options see ScalarAggregateOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as a BooleanScalar + +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result All( + const Datum& value, + const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the modal (most common) value of a numeric array +/// +/// This function returns top-n most common values and number of times they occur as +/// an array of `struct`, where T is the input type. +/// Values with larger counts are returned before smaller ones. +/// If there are more than one values with same count, smaller value is returned first. +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see ModeOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as an array of struct +/// +/// \since 2.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Mode(const Datum& value, + const ModeOptions& options = ModeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the standard deviation of a numeric array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see VarianceOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed standard deviation as a DoubleScalar +/// +/// \since 2.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Stddev(const Datum& value, + const VarianceOptions& options = VarianceOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the variance of a numeric array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see VarianceOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return datum of the computed variance as a DoubleScalar +/// +/// \since 2.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Variance(const Datum& value, + const VarianceOptions& options = VarianceOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the quantiles of a numeric array +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see QuantileOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as an array +/// +/// \since 4.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Quantile(const Datum& value, + const QuantileOptions& options = QuantileOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Calculate the approximate quantiles of a numeric array with T-Digest algorithm +/// +/// \param[in] value input datum, expecting Array or ChunkedArray +/// \param[in] options see TDigestOptions for more information +/// \param[in] ctx the function execution context, optional +/// \return resulting datum as an array +/// +/// \since 4.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result TDigest(const Datum& value, + const TDigestOptions& options = TDigestOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Find the first index of a value in an array. +/// +/// \param[in] value The array to search. +/// \param[in] options The array to search for. See IndexOptions. +/// \param[in] ctx the function execution context, optional +/// \return out a Scalar containing the index (or -1 if not found). +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Index(const Datum& value, const IndexOptions& options, + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..bad34f4a37881e82b3b0787f2d2c9c7c8d4a0461 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h @@ -0,0 +1,1717 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Eager evaluation convenience APIs for invoking common functions, including +// necessary memory allocations + +#pragma once + +#include +#include +#include + +#include "arrow/compute/function_options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +/// \addtogroup compute-concrete-options +/// +/// @{ + +class ARROW_EXPORT ArithmeticOptions : public FunctionOptions { + public: + explicit ArithmeticOptions(bool check_overflow = false); + static constexpr char const kTypeName[] = "ArithmeticOptions"; + bool check_overflow; +}; + +class ARROW_EXPORT ElementWiseAggregateOptions : public FunctionOptions { + public: + explicit ElementWiseAggregateOptions(bool skip_nulls = true); + static constexpr char const kTypeName[] = "ElementWiseAggregateOptions"; + static ElementWiseAggregateOptions Defaults() { return ElementWiseAggregateOptions{}; } + bool skip_nulls; +}; + +/// Rounding and tie-breaking modes for round compute functions. +/// Additional details and examples are provided in compute.rst. +enum class RoundMode : int8_t { + /// Round to nearest integer less than or equal in magnitude (aka "floor") + DOWN, + /// Round to nearest integer greater than or equal in magnitude (aka "ceil") + UP, + /// Get the integral part without fractional digits (aka "trunc") + TOWARDS_ZERO, + /// Round negative values with DOWN rule + /// and positive values with UP rule (aka "away from zero") + TOWARDS_INFINITY, + /// Round ties with DOWN rule (also called "round half towards negative infinity") + HALF_DOWN, + /// Round ties with UP rule (also called "round half towards positive infinity") + HALF_UP, + /// Round ties with TOWARDS_ZERO rule (also called "round half away from infinity") + HALF_TOWARDS_ZERO, + /// Round ties with TOWARDS_INFINITY rule (also called "round half away from zero") + HALF_TOWARDS_INFINITY, + /// Round ties to nearest even integer + HALF_TO_EVEN, + /// Round ties to nearest odd integer + HALF_TO_ODD, +}; + +class ARROW_EXPORT RoundOptions : public FunctionOptions { + public: + explicit RoundOptions(int64_t ndigits = 0, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundOptions"; + static RoundOptions Defaults() { return RoundOptions(); } + /// Rounding precision (number of digits to round to) + int64_t ndigits; + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +class ARROW_EXPORT RoundBinaryOptions : public FunctionOptions { + public: + explicit RoundBinaryOptions(RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundBinaryOptions"; + static RoundBinaryOptions Defaults() { return RoundBinaryOptions(); } + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +enum class CalendarUnit : int8_t { + NANOSECOND, + MICROSECOND, + MILLISECOND, + SECOND, + MINUTE, + HOUR, + DAY, + WEEK, + MONTH, + QUARTER, + YEAR +}; + +class ARROW_EXPORT RoundTemporalOptions : public FunctionOptions { + public: + explicit RoundTemporalOptions(int multiple = 1, CalendarUnit unit = CalendarUnit::DAY, + bool week_starts_monday = true, + bool ceil_is_strictly_greater = false, + bool calendar_based_origin = false); + static constexpr char const kTypeName[] = "RoundTemporalOptions"; + static RoundTemporalOptions Defaults() { return RoundTemporalOptions(); } + + /// Number of units to round to + int multiple; + /// The unit used for rounding of time + CalendarUnit unit; + /// What day does the week start with (Monday=true, Sunday=false) + bool week_starts_monday; + /// Enable this flag to return a rounded value that is strictly greater than the input. + /// For example: ceiling 1970-01-01T00:00:00 to 3 hours would yield 1970-01-01T03:00:00 + /// if set to true and 1970-01-01T00:00:00 if set to false. + /// This applies for ceiling only. + bool ceil_is_strictly_greater; + /// By default time is rounded to a multiple of units since 1970-01-01T00:00:00. + /// By setting calendar_based_origin to true, time will be rounded to a number + /// of units since the last greater calendar unit. + /// For example: rounding to a multiple of days since the beginning of the month or + /// to hours since the beginning of the day. + /// Exceptions: week and quarter are not used as greater units, therefore days will + /// will be rounded to the beginning of the month not week. Greater unit of week + /// is year. + /// Note that ceiling and rounding might change sorting order of an array near greater + /// unit change. For example rounding YYYY-mm-dd 23:00:00 to 5 hours will ceil and + /// round to YYYY-mm-dd+1 01:00:00 and floor to YYYY-mm-dd 20:00:00. On the other hand + /// YYYY-mm-dd+1 00:00:00 will ceil, round and floor to YYYY-mm-dd+1 00:00:00. This + /// can break the order of an already ordered array. + bool calendar_based_origin; +}; + +class ARROW_EXPORT RoundToMultipleOptions : public FunctionOptions { + public: + explicit RoundToMultipleOptions(double multiple = 1.0, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + explicit RoundToMultipleOptions(std::shared_ptr multiple, + RoundMode round_mode = RoundMode::HALF_TO_EVEN); + static constexpr char const kTypeName[] = "RoundToMultipleOptions"; + static RoundToMultipleOptions Defaults() { return RoundToMultipleOptions(); } + /// Rounding scale (multiple to round to). + /// + /// Should be a positive numeric scalar of a type compatible with the + /// argument to be rounded. The cast kernel is used to convert the rounding + /// multiple to match the result type. + std::shared_ptr multiple; + /// Rounding and tie-breaking mode + RoundMode round_mode; +}; + +/// Options for var_args_join. +class ARROW_EXPORT JoinOptions : public FunctionOptions { + public: + /// How to handle null values. (A null separator always results in a null output.) + enum NullHandlingBehavior { + /// A null in any input results in a null in the output. + EMIT_NULL, + /// Nulls in inputs are skipped. + SKIP, + /// Nulls in inputs are replaced with the replacement string. + REPLACE, + }; + explicit JoinOptions(NullHandlingBehavior null_handling = EMIT_NULL, + std::string null_replacement = ""); + static constexpr char const kTypeName[] = "JoinOptions"; + static JoinOptions Defaults() { return JoinOptions(); } + NullHandlingBehavior null_handling; + std::string null_replacement; +}; + +class ARROW_EXPORT MatchSubstringOptions : public FunctionOptions { + public: + explicit MatchSubstringOptions(std::string pattern, bool ignore_case = false); + MatchSubstringOptions(); + static constexpr char const kTypeName[] = "MatchSubstringOptions"; + + /// The exact substring (or regex, depending on kernel) to look for inside input values. + std::string pattern; + /// Whether to perform a case-insensitive match. + bool ignore_case; +}; + +class ARROW_EXPORT SplitOptions : public FunctionOptions { + public: + explicit SplitOptions(int64_t max_splits = -1, bool reverse = false); + static constexpr char const kTypeName[] = "SplitOptions"; + + /// Maximum number of splits allowed, or unlimited when -1 + int64_t max_splits; + /// Start splitting from the end of the string (only relevant when max_splits != -1) + bool reverse; +}; + +class ARROW_EXPORT SplitPatternOptions : public FunctionOptions { + public: + explicit SplitPatternOptions(std::string pattern, int64_t max_splits = -1, + bool reverse = false); + SplitPatternOptions(); + static constexpr char const kTypeName[] = "SplitPatternOptions"; + + /// The exact substring to split on. + std::string pattern; + /// Maximum number of splits allowed, or unlimited when -1 + int64_t max_splits; + /// Start splitting from the end of the string (only relevant when max_splits != -1) + bool reverse; +}; + +class ARROW_EXPORT ReplaceSliceOptions : public FunctionOptions { + public: + explicit ReplaceSliceOptions(int64_t start, int64_t stop, std::string replacement); + ReplaceSliceOptions(); + static constexpr char const kTypeName[] = "ReplaceSliceOptions"; + + /// Index to start slicing at + int64_t start; + /// Index to stop slicing at + int64_t stop; + /// String to replace the slice with + std::string replacement; +}; + +class ARROW_EXPORT ReplaceSubstringOptions : public FunctionOptions { + public: + explicit ReplaceSubstringOptions(std::string pattern, std::string replacement, + int64_t max_replacements = -1); + ReplaceSubstringOptions(); + static constexpr char const kTypeName[] = "ReplaceSubstringOptions"; + + /// Pattern to match, literal, or regular expression depending on which kernel is used + std::string pattern; + /// String to replace the pattern with + std::string replacement; + /// Max number of substrings to replace (-1 means unbounded) + int64_t max_replacements; +}; + +class ARROW_EXPORT ExtractRegexOptions : public FunctionOptions { + public: + explicit ExtractRegexOptions(std::string pattern); + ExtractRegexOptions(); + static constexpr char const kTypeName[] = "ExtractRegexOptions"; + + /// Regular expression with named capture fields + std::string pattern; +}; + +/// Options for IsIn and IndexIn functions +class ARROW_EXPORT SetLookupOptions : public FunctionOptions { + public: + /// How to handle null values. + enum NullMatchingBehavior { + /// MATCH, any null in `value_set` is successfully matched in + /// the input. + MATCH, + /// SKIP, any null in `value_set` is ignored and nulls in the input + /// produce null (IndexIn) or false (IsIn) values in the output. + SKIP, + /// EMIT_NULL, any null in `value_set` is ignored and nulls in the + /// input produce null (IndexIn and IsIn) values in the output. + EMIT_NULL, + /// INCONCLUSIVE, null values are regarded as unknown values, which is + /// sql-compatible. nulls in the input produce null (IndexIn and IsIn) + /// values in the output. Besides, if `value_set` contains a null, + /// non-null unmatched values in the input also produce null values + /// (IndexIn and IsIn) in the output. + INCONCLUSIVE + }; + + explicit SetLookupOptions(Datum value_set, NullMatchingBehavior = MATCH); + SetLookupOptions(); + + // DEPRECATED(will be removed after removing of skip_nulls) + explicit SetLookupOptions(Datum value_set, bool skip_nulls); + + static constexpr char const kTypeName[] = "SetLookupOptions"; + + /// The set of values to look up input values into. + Datum value_set; + + NullMatchingBehavior null_matching_behavior; + + // DEPRECATED(will be removed after removing of skip_nulls) + NullMatchingBehavior GetNullMatchingBehavior() const; + + // DEPRECATED(use null_matching_behavior instead) + /// Whether nulls in `value_set` count for lookup. + /// + /// If true, any null in `value_set` is ignored and nulls in the input + /// produce null (IndexIn) or false (IsIn) values in the output. + /// If false, any null in `value_set` is successfully matched in + /// the input. + std::optional skip_nulls; +}; + +/// Options for struct_field function +class ARROW_EXPORT StructFieldOptions : public FunctionOptions { + public: + explicit StructFieldOptions(std::vector indices); + explicit StructFieldOptions(std::initializer_list); + explicit StructFieldOptions(FieldRef field_ref); + StructFieldOptions(); + static constexpr char const kTypeName[] = "StructFieldOptions"; + + /// The FieldRef specifying what to extract from struct or union. + FieldRef field_ref; +}; + +class ARROW_EXPORT StrptimeOptions : public FunctionOptions { + public: + explicit StrptimeOptions(std::string format, TimeUnit::type unit, + bool error_is_null = false); + StrptimeOptions(); + static constexpr char const kTypeName[] = "StrptimeOptions"; + + /// The desired format string. + std::string format; + /// The desired time resolution + TimeUnit::type unit; + /// Return null on parsing errors if true or raise if false + bool error_is_null; +}; + +class ARROW_EXPORT StrftimeOptions : public FunctionOptions { + public: + explicit StrftimeOptions(std::string format, std::string locale = "C"); + StrftimeOptions(); + + static constexpr char const kTypeName[] = "StrftimeOptions"; + + static constexpr const char* kDefaultFormat = "%Y-%m-%dT%H:%M:%S"; + + /// The desired format string. + std::string format; + /// The desired output locale string. + std::string locale; +}; + +class ARROW_EXPORT PadOptions : public FunctionOptions { + public: + explicit PadOptions(int64_t width, std::string padding = " "); + PadOptions(); + static constexpr char const kTypeName[] = "PadOptions"; + + /// The desired string length. + int64_t width; + /// What to pad the string with. Should be one codepoint (Unicode)/byte (ASCII). + std::string padding; +}; + +class ARROW_EXPORT TrimOptions : public FunctionOptions { + public: + explicit TrimOptions(std::string characters); + TrimOptions(); + static constexpr char const kTypeName[] = "TrimOptions"; + + /// The individual characters to be trimmed from the string. + std::string characters; +}; + +class ARROW_EXPORT SliceOptions : public FunctionOptions { + public: + explicit SliceOptions(int64_t start, int64_t stop = std::numeric_limits::max(), + int64_t step = 1); + SliceOptions(); + static constexpr char const kTypeName[] = "SliceOptions"; + int64_t start, stop, step; +}; + +class ARROW_EXPORT ListSliceOptions : public FunctionOptions { + public: + explicit ListSliceOptions(int64_t start, std::optional stop = std::nullopt, + int64_t step = 1, + std::optional return_fixed_size_list = std::nullopt); + ListSliceOptions(); + static constexpr char const kTypeName[] = "ListSliceOptions"; + /// The start of list slicing. + int64_t start; + /// Optional stop of list slicing. If not set, then slice to end. (NotImplemented) + std::optional stop; + /// Slicing step + int64_t step; + // Whether to return a FixedSizeListArray. If true _and_ stop is after + // a list element's length, nulls will be appended to create the requested slice size. + // Default of `nullopt` will return whatever type it got in. + std::optional return_fixed_size_list; +}; + +class ARROW_EXPORT NullOptions : public FunctionOptions { + public: + explicit NullOptions(bool nan_is_null = false); + static constexpr char const kTypeName[] = "NullOptions"; + static NullOptions Defaults() { return NullOptions{}; } + + bool nan_is_null; +}; + +enum CompareOperator : int8_t { + EQUAL, + NOT_EQUAL, + GREATER, + GREATER_EQUAL, + LESS, + LESS_EQUAL, +}; + +struct ARROW_EXPORT CompareOptions { + explicit CompareOptions(CompareOperator op) : op(op) {} + CompareOptions() : CompareOptions(CompareOperator::EQUAL) {} + enum CompareOperator op; +}; + +class ARROW_EXPORT MakeStructOptions : public FunctionOptions { + public: + MakeStructOptions(std::vector n, std::vector r, + std::vector> m); + explicit MakeStructOptions(std::vector n); + MakeStructOptions(); + static constexpr char const kTypeName[] = "MakeStructOptions"; + + /// Names for wrapped columns + std::vector field_names; + + /// Nullability bits for wrapped columns + std::vector field_nullability; + + /// Metadata attached to wrapped columns + std::vector> field_metadata; +}; + +struct ARROW_EXPORT DayOfWeekOptions : public FunctionOptions { + public: + explicit DayOfWeekOptions(bool count_from_zero = true, uint32_t week_start = 1); + static constexpr char const kTypeName[] = "DayOfWeekOptions"; + static DayOfWeekOptions Defaults() { return DayOfWeekOptions(); } + + /// Number days from 0 if true and from 1 if false + bool count_from_zero; + /// What day does the week start with (Monday=1, Sunday=7). + /// The numbering is unaffected by the count_from_zero parameter. + uint32_t week_start; +}; + +/// Used to control timestamp timezone conversion and handling ambiguous/nonexistent +/// times. +struct ARROW_EXPORT AssumeTimezoneOptions : public FunctionOptions { + public: + /// \brief How to interpret ambiguous local times that can be interpreted as + /// multiple instants (normally two) due to DST shifts. + /// + /// AMBIGUOUS_EARLIEST emits the earliest instant amongst possible interpretations. + /// AMBIGUOUS_LATEST emits the latest instant amongst possible interpretations. + enum Ambiguous { AMBIGUOUS_RAISE, AMBIGUOUS_EARLIEST, AMBIGUOUS_LATEST }; + + /// \brief How to handle local times that do not exist due to DST shifts. + /// + /// NONEXISTENT_EARLIEST emits the instant "just before" the DST shift instant + /// in the given timestamp precision (for example, for a nanoseconds precision + /// timestamp, this is one nanosecond before the DST shift instant). + /// NONEXISTENT_LATEST emits the DST shift instant. + enum Nonexistent { NONEXISTENT_RAISE, NONEXISTENT_EARLIEST, NONEXISTENT_LATEST }; + + explicit AssumeTimezoneOptions(std::string timezone, + Ambiguous ambiguous = AMBIGUOUS_RAISE, + Nonexistent nonexistent = NONEXISTENT_RAISE); + AssumeTimezoneOptions(); + static constexpr char const kTypeName[] = "AssumeTimezoneOptions"; + + /// Timezone to convert timestamps from + std::string timezone; + + /// How to interpret ambiguous local times (due to DST shifts) + Ambiguous ambiguous; + /// How to interpret nonexistent local times (due to DST shifts) + Nonexistent nonexistent; +}; + +struct ARROW_EXPORT WeekOptions : public FunctionOptions { + public: + explicit WeekOptions(bool week_starts_monday = true, bool count_from_zero = false, + bool first_week_is_fully_in_year = false); + static constexpr char const kTypeName[] = "WeekOptions"; + static WeekOptions Defaults() { return WeekOptions{}; } + static WeekOptions ISODefaults() { + return WeekOptions{/*week_starts_monday*/ true, + /*count_from_zero=*/false, + /*first_week_is_fully_in_year=*/false}; + } + static WeekOptions USDefaults() { + return WeekOptions{/*week_starts_monday*/ false, + /*count_from_zero=*/false, + /*first_week_is_fully_in_year=*/false}; + } + + /// What day does the week start with (Monday=true, Sunday=false) + bool week_starts_monday; + /// Dates from current year that fall into last ISO week of the previous year return + /// 0 if true and 52 or 53 if false. + bool count_from_zero; + /// Must the first week be fully in January (true), or is a week that begins on + /// December 29, 30, or 31 considered to be the first week of the new year (false)? + bool first_week_is_fully_in_year; +}; + +struct ARROW_EXPORT Utf8NormalizeOptions : public FunctionOptions { + public: + enum Form { NFC, NFKC, NFD, NFKD }; + + explicit Utf8NormalizeOptions(Form form = NFC); + static Utf8NormalizeOptions Defaults() { return Utf8NormalizeOptions(); } + static constexpr char const kTypeName[] = "Utf8NormalizeOptions"; + + /// The Unicode normalization form to apply + Form form; +}; + +class ARROW_EXPORT RandomOptions : public FunctionOptions { + public: + enum Initializer { SystemRandom, Seed }; + + static RandomOptions FromSystemRandom() { return RandomOptions{SystemRandom, 0}; } + static RandomOptions FromSeed(uint64_t seed) { return RandomOptions{Seed, seed}; } + + RandomOptions(Initializer initializer, uint64_t seed); + RandomOptions(); + static constexpr char const kTypeName[] = "RandomOptions"; + static RandomOptions Defaults() { return RandomOptions(); } + + /// The type of initialization for random number generation - system or provided seed. + Initializer initializer; + /// The seed value used to initialize the random number generation. + uint64_t seed; +}; + +/// Options for map_lookup function +class ARROW_EXPORT MapLookupOptions : public FunctionOptions { + public: + enum Occurrence { + /// Return the first matching value + FIRST, + /// Return the last matching value + LAST, + /// Return all matching values + ALL + }; + + explicit MapLookupOptions(std::shared_ptr query_key, Occurrence occurrence); + MapLookupOptions(); + + constexpr static char const kTypeName[] = "MapLookupOptions"; + + /// The key to lookup in the map + std::shared_ptr query_key; + + /// Whether to return the first, last, or all matching values + Occurrence occurrence; +}; + +/// @} + +/// \brief Get the absolute value of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value transformed +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise absolute value +ARROW_EXPORT +Result AbsoluteValue(const Datum& arg, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Add two values together. Array values must be the same length. If +/// either addend is null the result will be null. +/// +/// \param[in] left the first addend +/// \param[in] right the second addend +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise sum +ARROW_EXPORT +Result Add(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Subtract two values. Array values must be the same length. If the +/// minuend or subtrahend is null the result will be null. +/// +/// \param[in] left the value subtracted from (minuend) +/// \param[in] right the value by which the minuend is reduced (subtrahend) +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise difference +ARROW_EXPORT +Result Subtract(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Multiply two values. Array values must be the same length. If either +/// factor is null the result will be null. +/// +/// \param[in] left the first factor +/// \param[in] right the second factor +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise product +ARROW_EXPORT +Result Multiply(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Divide two values. Array values must be the same length. If either +/// argument is null the result will be null. For integer types, if there is +/// a zero divisor, an error will be raised. +/// +/// \param[in] left the dividend +/// \param[in] right the divisor +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise quotient +ARROW_EXPORT +Result Divide(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Negate values. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value negated +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise negation +ARROW_EXPORT +Result Negate(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Raise the values of base array to the power of the exponent array values. +/// Array values must be the same length. If either base or exponent is null the result +/// will be null. +/// +/// \param[in] left the base +/// \param[in] right the exponent +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise base value raised to the power of exponent +ARROW_EXPORT +Result Power(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Raise Euler's number to the power of specified exponent, element-wise. +/// If the exponent value is null the result will be null. +/// +/// \param[in] arg the exponent +/// \param[in] ctx the function execution context, optional +/// \return the element-wise Euler's number raised to the power of exponent +ARROW_EXPORT +Result Exp(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Left shift the left array by the right array. Array values must be the +/// same length. If either operand is null, the result will be null. +/// +/// \param[in] left the value to shift +/// \param[in] right the value to shift by +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise left value shifted left by the right value +ARROW_EXPORT +Result ShiftLeft(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Right shift the left array by the right array. Array values must be the +/// same length. If either operand is null, the result will be null. Performs a +/// logical shift for unsigned values, and an arithmetic shift for signed values. +/// +/// \param[in] left the value to shift +/// \param[in] right the value to shift by +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise left value shifted right by the right value +ARROW_EXPORT +Result ShiftRight(const Datum& left, const Datum& right, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the sine of the array values. +/// \param[in] arg The values to compute the sine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise sine of the values +ARROW_EXPORT +Result Sin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cosine of the array values. +/// \param[in] arg The values to compute the cosine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise cosine of the values +ARROW_EXPORT +Result Cos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse sine (arcsine) of the array values. +/// \param[in] arg The values to compute the inverse sine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse sine of the values +ARROW_EXPORT +Result Asin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse cosine (arccosine) of the array values. +/// \param[in] arg The values to compute the inverse cosine for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse cosine of the values +ARROW_EXPORT +Result Acos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the tangent of the array values. +/// \param[in] arg The values to compute the tangent for. +/// \param[in] options arithmetic options (enable/disable overflow checking), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise tangent of the values +ARROW_EXPORT +Result Tan(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse tangent (arctangent) of the array values. +/// \param[in] arg The values to compute the inverse tangent for. +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse tangent of the values +ARROW_EXPORT +Result Atan(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Compute the inverse tangent (arctangent) of y/x, using the +/// argument signs to determine the correct quadrant. +/// \param[in] y The y-values to compute the inverse tangent for. +/// \param[in] x The x-values to compute the inverse tangent for. +/// \param[in] ctx the function execution context, optional +/// \return the elementwise inverse tangent of the values +ARROW_EXPORT +Result Atan2(const Datum& y, const Datum& x, ExecContext* ctx = NULLPTR); + +/// \brief Get the natural log of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise natural log +ARROW_EXPORT +Result Ln(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log base 10 of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log base 10 +ARROW_EXPORT +Result Log10(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log base 2 of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log base 2 +ARROW_EXPORT +Result Log2(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the natural log of (1 + value). +/// +/// If argument is null the result will be null. +/// This function may be more accurate than Log(1 + value) for values close to zero. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise natural log +ARROW_EXPORT +Result Log1p(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the log of a value to the given base. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the logarithm for. +/// \param[in] base The given base. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise log to the given base +ARROW_EXPORT +Result Logb(const Datum& arg, const Datum& base, + ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the square-root of a value. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg The values to compute the square-root for. +/// \param[in] options arithmetic options (overflow handling), optional +/// \param[in] ctx the function execution context, optional +/// \return the elementwise square-root +ARROW_EXPORT +Result Sqrt(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief Round to the nearest integer less than or equal in magnitude to the +/// argument. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] ctx the function execution context, optional +/// \return the rounded value +ARROW_EXPORT +Result Floor(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Round to the nearest integer greater than or equal in magnitude to the +/// argument. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] ctx the function execution context, optional +/// \return the rounded value +ARROW_EXPORT +Result Ceil(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Get the integral part without fractional digits. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to truncate +/// \param[in] ctx the function execution context, optional +/// \return the truncated value +ARROW_EXPORT +Result Trunc(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Find the element-wise maximum of any number of arrays or scalars. +/// Array values must be the same length. +/// +/// \param[in] args arrays or scalars to operate on. +/// \param[in] options options for handling nulls, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise maximum +ARROW_EXPORT +Result MaxElementWise( + const std::vector& args, + ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Find the element-wise minimum of any number of arrays or scalars. +/// Array values must be the same length. +/// +/// \param[in] args arrays or scalars to operate on. +/// \param[in] options options for handling nulls, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise minimum +ARROW_EXPORT +Result MinElementWise( + const std::vector& args, + ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Get the sign of a value. Array values can be of arbitrary length. If argument +/// is null the result will be null. +/// +/// \param[in] arg the value to extract sign from +/// \param[in] ctx the function execution context, optional +/// \return the element-wise sign function +ARROW_EXPORT +Result Sign(const Datum& arg, ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given precision. +/// +/// If arg is null the result will be null. +/// +/// \param[in] arg the value to be rounded +/// \param[in] options rounding options (rounding mode and number of digits), optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result Round(const Datum& arg, RoundOptions options = RoundOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given precision. +/// +/// If arg1 is null the result will be null. +/// If arg2 is null then the result will be null. If arg2 is negative, then the rounding +/// place will be shifted to the left (thus -1 would correspond to rounding to the nearest +/// ten). If positive, the rounding place will shift to the right (and +1 would +/// correspond to rounding to the nearest tenth). +/// +/// \param[in] arg1 the value to be rounded +/// \param[in] arg2 the number of significant digits to round to +/// \param[in] options rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result RoundBinary(const Datum& arg1, const Datum& arg2, + RoundBinaryOptions options = RoundBinaryOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a value to a given multiple. +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the value to round +/// \param[in] options rounding options (rounding mode and multiple), optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +ARROW_EXPORT +Result RoundToMultiple( + const Datum& arg, RoundToMultipleOptions options = RoundToMultipleOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Ceil a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to ceil +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result CeilTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Floor a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to floor +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result FloorTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Round a temporal value to a given frequency +/// +/// If argument is null the result will be null. +/// +/// \param[in] arg the temporal value to round +/// \param[in] options temporal rounding options, optional +/// \param[in] ctx the function execution context, optional +/// \return the element-wise rounded value +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RoundTemporal( + const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Invert the values of a boolean datum +/// \param[in] value datum to invert +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Invert(const Datum& value, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND of two boolean datums which always propagates nulls +/// (null and false is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result And(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND of two boolean datums with a Kleene truth table +/// (null and false is false). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneAnd(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Element-wise OR of two boolean datums which always propagates nulls +/// (null and true is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Or(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise OR of two boolean datums with a Kleene truth table +/// (null or true is true). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneOr(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise XOR of two boolean datums +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Xor(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND NOT of two boolean datums which always propagates nulls +/// (null and not true is null). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result AndNot(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR); + +/// \brief Element-wise AND NOT of two boolean datums with a Kleene truth table +/// (false and not null is false, null and not true is false). +/// +/// \param[in] left left operand +/// \param[in] right right operand +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result KleeneAndNot(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief IsIn returns true for each element of `values` that is contained in +/// `value_set` +/// +/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls. +/// +/// \param[in] values array-like input to look up in value_set +/// \param[in] options SetLookupOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsIn(const Datum& values, const SetLookupOptions& options, + ExecContext* ctx = NULLPTR); +ARROW_EXPORT +Result IsIn(const Datum& values, const Datum& value_set, + ExecContext* ctx = NULLPTR); + +/// \brief IndexIn examines each slot in the values against a value_set array. +/// If the value is not found in value_set, null will be output. +/// If found, the index of occurrence within value_set (ignoring duplicates) +/// will be output. +/// +/// For example given values = [99, 42, 3, null] and +/// value_set = [3, 3, 99], the output will be = [2, null, 0, null] +/// +/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls. +/// +/// \param[in] values array-like input +/// \param[in] options SetLookupOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IndexIn(const Datum& values, const SetLookupOptions& options, + ExecContext* ctx = NULLPTR); +ARROW_EXPORT +Result IndexIn(const Datum& values, const Datum& value_set, + ExecContext* ctx = NULLPTR); + +/// \brief IsValid returns true for each element of `values` that is not null, +/// false otherwise +/// +/// \param[in] values input to examine for validity +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsValid(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IsNull returns true for each element of `values` that is null, +/// false otherwise +/// +/// \param[in] values input to examine for nullity +/// \param[in] options NullOptions +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsNull(const Datum& values, NullOptions options = NullOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief IsNan returns true for each element of `values` that is NaN, +/// false otherwise +/// +/// \param[in] values input to look for NaN +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 3.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsNan(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IfElse returns elements chosen from `left` or `right` +/// depending on `cond`. `null` values in `cond` will be promoted to the result +/// +/// \param[in] cond `Boolean` condition Scalar/ Array +/// \param[in] left Scalar/ Array +/// \param[in] right Scalar/ Array +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IfElse(const Datum& cond, const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief CaseWhen behaves like a switch/case or if-else if-else statement: for +/// each row, select the first value for which the corresponding condition is +/// true, or (if given) select the 'else' value, else emit null. Note that a +/// null condition is the same as false. +/// +/// \param[in] cond Conditions (Boolean) +/// \param[in] cases Values (any type), along with an optional 'else' value. +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result CaseWhen(const Datum& cond, const std::vector& cases, + ExecContext* ctx = NULLPTR); + +/// \brief Year returns year for each element of `values` +/// +/// \param[in] values input to extract year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Year(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief IsLeapYear returns if a year is a leap year for each element of `values` +/// +/// \param[in] values input to extract leap year indicator from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result IsLeapYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Month returns month for each element of `values`. +/// Month is encoded as January=1, December=12 +/// +/// \param[in] values input to extract month from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Month(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Day returns day number for each element of `values` +/// +/// \param[in] values input to extract day from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Day(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief YearMonthDay returns a struct containing the Year, Month and Day value for +/// each element of `values`. +/// +/// \param[in] values input to extract (year, month, day) struct from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 7.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result YearMonthDay(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief DayOfWeek returns number of the day of the week value for each element of +/// `values`. +/// +/// By default week starts on Monday denoted by 0 and ends on Sunday denoted +/// by 6. Start day of the week (Monday=1, Sunday=7) and numbering base (0 or 1) can be +/// set using DayOfWeekOptions +/// +/// \param[in] values input to extract number of the day of the week from +/// \param[in] options for setting start of the week and day numbering +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayOfWeek(const Datum& values, + DayOfWeekOptions options = DayOfWeekOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief DayOfYear returns number of day of the year for each element of `values`. +/// January 1st maps to day number 1, February 1st to 32, etc. +/// +/// \param[in] values input to extract number of day of the year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayOfYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief ISOYear returns ISO year number for each element of `values`. +/// First week of an ISO year has the majority (4 or more) of its days in January. +/// +/// \param[in] values input to extract ISO year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result ISOYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief USYear returns US epidemiological year number for each element of `values`. +/// First week of US epidemiological year has the majority (4 or more) of it's +/// days in January. Last week of US epidemiological year has the year's last +/// Wednesday in it. US epidemiological week starts on Sunday. +/// +/// \param[in] values input to extract US epidemiological year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result USYear(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief ISOWeek returns ISO week of year number for each element of `values`. +/// First ISO week has the majority (4 or more) of its days in January. +/// ISO week starts on Monday. Year can have 52 or 53 weeks. +/// Week numbering can start with 1. +/// +/// \param[in] values input to extract ISO week of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result ISOWeek(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief USWeek returns US week of year number for each element of `values`. +/// First US week has the majority (4 or more) of its days in January. +/// US week starts on Sunday. Year can have 52 or 53 weeks. +/// Week numbering starts with 1. +/// +/// \param[in] values input to extract US week of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result USWeek(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Week returns week of year number for each element of `values`. +/// First ISO week has the majority (4 or more) of its days in January. +/// Year can have 52 or 53 weeks. Week numbering can start with 0 or 1 +/// depending on DayOfWeekOptions.count_from_zero. +/// +/// \param[in] values input to extract week of year from +/// \param[in] options for setting numbering start +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Week(const Datum& values, WeekOptions options = WeekOptions(), + ExecContext* ctx = NULLPTR); + +/// \brief ISOCalendar returns a (ISO year, ISO week, ISO day of week) struct for +/// each element of `values`. +/// ISO week starts on Monday denoted by 1 and ends on Sunday denoted by 7. +/// +/// \param[in] values input to ISO calendar struct from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result ISOCalendar(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Quarter returns the quarter of year number for each element of `values` +/// First quarter maps to 1 and fourth quarter maps to 4. +/// +/// \param[in] values input to extract quarter of year from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Quarter(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Hour returns hour value for each element of `values` +/// +/// \param[in] values input to extract hour from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Hour(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Minute returns minutes value for each element of `values` +/// +/// \param[in] values input to extract minutes from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Minute(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Second returns seconds value for each element of `values` +/// +/// \param[in] values input to extract seconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Second(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Millisecond returns number of milliseconds since the last full second +/// for each element of `values` +/// +/// \param[in] values input to extract milliseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Millisecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Microsecond returns number of microseconds since the last full millisecond +/// for each element of `values` +/// +/// \param[in] values input to extract microseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Microsecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Nanosecond returns number of nanoseconds since the last full millisecond +/// for each element of `values` +/// +/// \param[in] values input to extract nanoseconds from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result Nanosecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Subsecond returns the fraction of second elapsed since last full second +/// as a float for each element of `values` +/// +/// \param[in] values input to extract subsecond from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Subsecond(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Format timestamps according to a format string +/// +/// Return formatted time strings according to the format string +/// `StrftimeOptions::format` and to the locale specifier `Strftime::locale`. +/// +/// \param[in] values input timestamps +/// \param[in] options for setting format string and locale +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Strftime(const Datum& values, StrftimeOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief Parse timestamps according to a format string +/// +/// Return parsed timestamps according to the format string +/// `StrptimeOptions::format` at time resolution `Strftime::unit`. Parse errors are +/// raised depending on the `Strftime::error_is_null` setting. +/// +/// \param[in] values input strings +/// \param[in] options for setting format string, unit and error_is_null +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result Strptime(const Datum& values, StrptimeOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief Converts timestamps from local timestamp without a timezone to a timestamp with +/// timezone, interpreting the local timestamp as being in the specified timezone for each +/// element of `values` +/// +/// \param[in] values input to convert +/// \param[in] options for setting source timezone, exception and ambiguous timestamp +/// handling. +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 6.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result AssumeTimezone(const Datum& values, + AssumeTimezoneOptions options, + ExecContext* ctx = NULLPTR); + +/// \brief IsDaylightSavings extracts if currently observing daylight savings for each +/// element of `values` +/// +/// \param[in] values input to extract daylight savings indicator from +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result IsDaylightSavings(const Datum& values, + ExecContext* ctx = NULLPTR); + +/// \brief LocalTimestamp converts timestamp to timezone naive local timestamp +/// +/// \param[in] values input to convert to local time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result LocalTimestamp(const Datum& values, + ExecContext* ctx = NULLPTR); + +/// \brief Years Between finds the number of years between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result YearsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Quarters Between finds the number of quarters between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result QuartersBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Months Between finds the number of month between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MonthsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Weeks Between finds the number of weeks between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result WeeksBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Month Day Nano Between finds the number of months, days, and nanoseconds +/// between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MonthDayNanoBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief DayTime Between finds the number of days and milliseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DayTimeBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Days Between finds the number of days between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result DaysBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Hours Between finds the number of hours between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result HoursBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Minutes Between finds the number of minutes between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MinutesBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Seconds Between finds the number of hours between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result SecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Milliseconds Between finds the number of milliseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MillisecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Microseconds Between finds the number of microseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MicrosecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Nanoseconds Between finds the number of nanoseconds between two values +/// +/// \param[in] left input treated as the start time +/// \param[in] right input treated as the end time +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result NanosecondsBetween(const Datum& left, const Datum& right, + ExecContext* ctx = NULLPTR); + +/// \brief Finds either the FIRST, LAST, or ALL items with a key that matches the given +/// query key in a map. +/// +/// Returns an array of items for FIRST and LAST, and an array of list of items for ALL. +/// +/// \param[in] map to look in +/// \param[in] options to pass a query key and choose which matching keys to return +/// (FIRST, LAST or ALL) +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +/// +/// \since 8.0.0 +/// \note API not yet finalized +ARROW_EXPORT Result MapLookup(const Datum& map, MapLookupOptions options, + ExecContext* ctx = NULLPTR); +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..919572f16ee69edaa348f432d36214896b455732 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h @@ -0,0 +1,697 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/compute/function_options.h" +#include "arrow/compute/ordering.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace compute { + +class ExecContext; + +/// \addtogroup compute-concrete-options +/// @{ + +class ARROW_EXPORT FilterOptions : public FunctionOptions { + public: + /// Configure the action taken when a slot of the selection mask is null + enum NullSelectionBehavior { + /// The corresponding filtered value will be removed in the output. + DROP, + /// The corresponding filtered value will be null in the output. + EMIT_NULL, + }; + + explicit FilterOptions(NullSelectionBehavior null_selection = DROP); + static constexpr char const kTypeName[] = "FilterOptions"; + static FilterOptions Defaults() { return FilterOptions(); } + + NullSelectionBehavior null_selection_behavior = DROP; +}; + +class ARROW_EXPORT TakeOptions : public FunctionOptions { + public: + explicit TakeOptions(bool boundscheck = true); + static constexpr char const kTypeName[] = "TakeOptions"; + static TakeOptions BoundsCheck() { return TakeOptions(true); } + static TakeOptions NoBoundsCheck() { return TakeOptions(false); } + static TakeOptions Defaults() { return BoundsCheck(); } + + bool boundscheck = true; +}; + +/// \brief Options for the dictionary encode function +class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions { + public: + /// Configure how null values will be encoded + enum NullEncodingBehavior { + /// The null value will be added to the dictionary with a proper index. + ENCODE, + /// The null value will be masked in the indices array. + MASK + }; + + explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK); + static constexpr char const kTypeName[] = "DictionaryEncodeOptions"; + static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); } + + NullEncodingBehavior null_encoding_behavior = MASK; +}; + +/// \brief Options for the run-end encode function +class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions { + public: + explicit RunEndEncodeOptions(std::shared_ptr run_end_type = int32()); + static constexpr char const kTypeName[] = "RunEndEncodeOptions"; + static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); } + + std::shared_ptr run_end_type; +}; + +class ARROW_EXPORT ArraySortOptions : public FunctionOptions { + public: + explicit ArraySortOptions(SortOrder order = SortOrder::Ascending, + NullPlacement null_placement = NullPlacement::AtEnd); + static constexpr char const kTypeName[] = "ArraySortOptions"; + static ArraySortOptions Defaults() { return ArraySortOptions(); } + + /// Sorting order + SortOrder order; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +class ARROW_EXPORT SortOptions : public FunctionOptions { + public: + explicit SortOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd); + explicit SortOptions(const Ordering& ordering); + static constexpr char const kTypeName[] = "SortOptions"; + static SortOptions Defaults() { return SortOptions(); } + /// Convenience constructor to create an ordering from SortOptions + /// + /// Note: Both classes contain the exact same information. However, + /// sort_options should only be used in a "function options" context while Ordering + /// is used more generally. + Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); } + Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; +}; + +/// \brief SelectK options +class ARROW_EXPORT SelectKOptions : public FunctionOptions { + public: + explicit SelectKOptions(int64_t k = -1, std::vector sort_keys = {}); + static constexpr char const kTypeName[] = "SelectKOptions"; + static SelectKOptions Defaults() { return SelectKOptions(); } + + static SelectKOptions TopKDefault(int64_t k, std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Descending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Descending)); + } + return SelectKOptions{k, keys}; + } + static SelectKOptions BottomKDefault(int64_t k, + std::vector key_names = {}) { + std::vector keys; + for (const auto& name : key_names) { + keys.emplace_back(SortKey(name, SortOrder::Ascending)); + } + if (key_names.empty()) { + keys.emplace_back(SortKey("not-used", SortOrder::Ascending)); + } + return SelectKOptions{k, keys}; + } + + /// The number of `k` elements to keep. + int64_t k; + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; +}; + +/// \brief Rank options +class ARROW_EXPORT RankOptions : public FunctionOptions { + public: + /// Configure how ties between equal values are handled + enum Tiebreaker { + /// Ties get the smallest possible rank in sorted order. + Min, + /// Ties get the largest possible rank in sorted order. + Max, + /// Ranks are assigned in order of when ties appear in the input. + /// This ensures the ranks are a stable permutation of the input. + First, + /// The ranks span a dense [1, M] interval where M is the number + /// of distinct values in the input. + Dense + }; + + explicit RankOptions(std::vector sort_keys = {}, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First); + /// Convenience constructor for array inputs + explicit RankOptions(SortOrder order, + NullPlacement null_placement = NullPlacement::AtEnd, + Tiebreaker tiebreaker = RankOptions::First) + : RankOptions({SortKey("", order)}, null_placement, tiebreaker) {} + + static constexpr char const kTypeName[] = "RankOptions"; + static RankOptions Defaults() { return RankOptions(); } + + /// Column key(s) to order by and how to order by these sort keys. + std::vector sort_keys; + /// Whether nulls and NaNs are placed at the start or at the end + NullPlacement null_placement; + /// Tiebreaker for dealing with equal values in ranks + Tiebreaker tiebreaker; +}; + +/// \brief Partitioning options for NthToIndices +class ARROW_EXPORT PartitionNthOptions : public FunctionOptions { + public: + explicit PartitionNthOptions(int64_t pivot, + NullPlacement null_placement = NullPlacement::AtEnd); + PartitionNthOptions() : PartitionNthOptions(0) {} + static constexpr char const kTypeName[] = "PartitionNthOptions"; + + /// The index into the equivalent sorted array of the partition pivot element. + int64_t pivot; + /// Whether nulls and NaNs are partitioned at the start or at the end + NullPlacement null_placement; +}; + +/// \brief Options for cumulative functions +/// \note Also aliased as CumulativeSumOptions for backward compatibility +class ARROW_EXPORT CumulativeOptions : public FunctionOptions { + public: + explicit CumulativeOptions(bool skip_nulls = false); + explicit CumulativeOptions(double start, bool skip_nulls = false); + explicit CumulativeOptions(std::shared_ptr start, bool skip_nulls = false); + static constexpr char const kTypeName[] = "CumulativeOptions"; + static CumulativeOptions Defaults() { return CumulativeOptions(); } + + /// Optional starting value for cumulative operation computation, default depends on the + /// operation and input type. + /// - sum: 0 + /// - prod: 1 + /// - min: maximum of the input type + /// - max: minimum of the input type + /// - mean: start is ignored because it has no meaning for mean + std::optional> start; + + /// If true, nulls in the input are ignored and produce a corresponding null output. + /// When false, the first null encountered is propagated through the remaining output. + bool skip_nulls = false; +}; +using CumulativeSumOptions = CumulativeOptions; // For backward compatibility + +/// \brief Options for pairwise functions +class ARROW_EXPORT PairwiseOptions : public FunctionOptions { + public: + explicit PairwiseOptions(int64_t periods = 1); + static constexpr char const kTypeName[] = "PairwiseOptions"; + static PairwiseOptions Defaults() { return PairwiseOptions(); } + + /// Periods to shift for applying the binary operation, accepts negative values. + int64_t periods = 1; +}; + +/// @} + +/// \brief Filter with a boolean selection filter +/// +/// The output will be populated with values from the input at positions +/// where the selection filter is not 0. Nulls in the filter will be handled +/// based on options.null_selection_behavior. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// filter = [0, 1, 1, 0, null, 1], the output will be +/// (null_selection_behavior == DROP) = ["b", "c", "f"] +/// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"] +/// +/// \param[in] values array to filter +/// \param[in] filter indicates which values should be filtered out +/// \param[in] options configures null_selection_behavior +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Filter(const Datum& values, const Datum& filter, + const FilterOptions& options = FilterOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +namespace internal { + +// These internal functions are implemented in kernels/vector_selection.cc + +/// \brief Return the number of selected indices in the boolean filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +int64_t GetFilterOutputSize(const ArraySpan& filter, + FilterOptions::NullSelectionBehavior null_selection); + +/// \brief Compute uint64 selection indices for use with Take given a boolean +/// filter +/// +/// \param filter a plain or run-end encoded boolean array with or without nulls +/// \param null_selection how to handle nulls in the filter +ARROW_EXPORT +Result> GetTakeIndices( + const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection, + MemoryPool* memory_pool = default_memory_pool()); + +} // namespace internal + +/// \brief ReplaceWithMask replaces each value in the array corresponding +/// to a true value in the mask with the next element from `replacements`. +/// +/// \param[in] values Array input to replace +/// \param[in] mask Array or Scalar of Boolean mask values +/// \param[in] replacements The replacement values to draw from. There must +/// be as many replacement values as true values in the mask. +/// \param[in] ctx the function execution context, optional +/// +/// \return the resulting datum +/// +/// \since 5.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result ReplaceWithMask(const Datum& values, const Datum& mask, + const Datum& replacements, ExecContext* ctx = NULLPTR); + +/// \brief FillNullForward fill null values in forward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in forward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "c", "c", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief FillNullBackward fill null values in backward direction +/// +/// The output array will be of the same type as the input values +/// array, with replaced null values in backward direction. +/// +/// For example given values = ["a", "b", "c", null, null, "f"], +/// the output will be = ["a", "b", "c", "f", "f", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief Take from an array of values at indices in another array +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array at the given +/// indices. If an index is null then the taken element will be null. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"] and +/// indices = [2, 1, null, 3], the output will be +/// = [values[2], values[1], null, values[3]] +/// = ["c", "b", null, null] +/// +/// \param[in] values datum from which to take +/// \param[in] indices which values to take +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result Take(const Datum& values, const Datum& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Take with Array inputs and output +ARROW_EXPORT +Result> Take(const Array& values, const Array& indices, + const TakeOptions& options = TakeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Drop Null from an array of values +/// +/// The output array will be of the same type as the input values +/// array, with elements taken from the values array without nulls. +/// +/// For example given values = ["a", "b", "c", null, "e", "f"], +/// the output will be = ["a", "b", "c", "e", "f"] +/// +/// \param[in] values datum from which to take +/// \param[in] ctx the function execution context, optional +/// \return the resulting datum +ARROW_EXPORT +Result DropNull(const Datum& values, ExecContext* ctx = NULLPTR); + +/// \brief DropNull with Array inputs and output +ARROW_EXPORT +Result> DropNull(const Array& values, ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// Find index of n-th(0 based) smallest value and perform indirect +/// partition of an array around that element. Output indices[0 ~ n-1] +/// holds values no greater than n-th element, and indices[n+1 ~ end] +/// holds values no less than n-th element. Elements in each partition +/// is not sorted. Nulls will be partitioned to the end of the output. +/// Output is not guaranteed to be stable. +/// +/// \param[in] values array to be partitioned +/// \param[in] n pivot array around sorted n-th element +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, int64_t n, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that partition an array around n-th sorted element. +/// +/// This overload takes a PartitionNthOptions specifying the pivot index +/// and the null handling. +/// +/// \param[in] values array to be partitioned +/// \param[in] options options including pivot index and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would partition an array +ARROW_EXPORT +Result> NthToIndices(const Array& values, + const PartitionNthOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return indices that would select the first `k` elements. +/// +/// Perform an indirect sort of the datum, keeping only the first `k` elements. The output +/// array will contain indices such that the item indicated by the k-th index will be in +/// the position it would be if the datum were sorted by `options.sort_keys`. However, +/// indices of null values will not be part of the output. The sort is not guaranteed to +/// be stable. +/// +/// \param[in] datum datum to be partitioned +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return a datum with the same schema as the input +ARROW_EXPORT +Result> SelectKUnstable(const Datum& datum, + const SelectKOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// Perform an indirect sort of array. The output array will contain +/// indices that would sort an array, which would be the same length +/// as input. Nulls will be stably partitioned to the end of the output +/// regardless of order. +/// +/// For example given array = [null, 1, 3.3, null, 2, 5.3] and order +/// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0, +/// 3]. +/// +/// \param[in] array array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] array array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const Array& array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// Perform an indirect sort of chunked array. The output array will +/// contain indices that would sort a chunked array, which would be +/// the same length as input. Nulls will be stably partitioned to the +/// end of the output regardless of order. +/// +/// For example given chunked_array = [[null, 1], [3.3], [null, 2, +/// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2, +/// 4, 1, 0, 3]. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] order ascending or descending +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + SortOrder order = SortOrder::Ascending, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort a chunked array. +/// +/// This overload takes a ArraySortOptions specifying the sort order +/// and the null handling. +/// +/// \param[in] chunked_array chunked array to sort +/// \param[in] options options including sort order and null handling +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort an array +ARROW_EXPORT +Result> SortIndices(const ChunkedArray& chunked_array, + const ArraySortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Return the indices that would sort an input in the +/// specified order. Input is one of array, chunked array record batch +/// or table. +/// +/// Perform an indirect sort of input. The output array will contain +/// indices that would sort an input, which would be the same length +/// as input. Nulls will be stably partitioned to the start or to the end +/// of the output depending on SortOrder::null_placement. +/// +/// For example given input (table) = { +/// "column1": [[null, 1], [ 3, null, 2, 1]], +/// "column2": [[ 5], [3, null, null, 5, 5]], +/// } and options = { +/// {"column1", SortOrder::Ascending}, +/// {"column2", SortOrder::Descending}, +/// }, the output will be [5, 1, 4, 2, 0, 3]. +/// +/// \param[in] datum array, chunked array, record batch or table to sort +/// \param[in] options options +/// \param[in] ctx the function execution context, optional +/// \return offsets indices that would sort a table +ARROW_EXPORT +Result> SortIndices(const Datum& datum, const SortOptions& options, + ExecContext* ctx = NULLPTR); + +/// \brief Compute unique elements from an array-like object +/// +/// Note if a null occurs in the input it will NOT be included in the output. +/// +/// \param[in] datum array-like input +/// \param[in] ctx the function execution context, optional +/// \return result as Array +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> Unique(const Datum& datum, ExecContext* ctx = NULLPTR); + +// Constants for accessing the output of ValueCounts +ARROW_EXPORT extern const char kValuesFieldName[]; +ARROW_EXPORT extern const char kCountsFieldName[]; +ARROW_EXPORT extern const int32_t kValuesFieldIndex; +ARROW_EXPORT extern const int32_t kCountsFieldIndex; + +/// \brief Return counts of unique elements from an array-like object. +/// +/// Note that the counts do not include counts for nulls in the array. These can be +/// obtained separately from metadata. +/// +/// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values +/// which can lead to unexpected results if the input Array has these values. +/// +/// \param[in] value array-like input +/// \param[in] ctx the function execution context, optional +/// \return counts An array of structs. +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result> ValueCounts(const Datum& value, + ExecContext* ctx = NULLPTR); + +/// \brief Dictionary-encode values in an array-like object +/// +/// Any nulls encountered in the dictionary will be handled according to the +/// specified null encoding behavior. +/// +/// For example, given values ["a", "b", null, "a", null] the output will be +/// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null] +/// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"] +/// +/// If the input is already dictionary encoded this function is a no-op unless +/// it needs to modify the null_encoding (TODO) +/// +/// \param[in] data array-like input +/// \param[in] ctx the function execution context, optional +/// \param[in] options configures null encoding behavior +/// \return result with same shape and type as input +/// +/// \since 1.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result DictionaryEncode( + const Datum& data, + const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Run-end-encode values in an array-like object +/// +/// The returned run-end encoded type uses the same value type of the input and +/// run-end type defined in the options. +/// +/// \param[in] value array-like input +/// \param[in] options configures encoding behavior +/// \param[in] ctx the function execution context, optional +/// \return result with same shape but run-end encoded +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndEncode( + const Datum& value, + const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Decode a Run-End Encoded array to a plain array +/// +/// The output data type is the same as the values array type of run-end encoded +/// input. +/// +/// \param[in] value run-end-encoded input +/// \param[in] ctx the function execution context, optional +/// \return plain array resulting from decoding the run-end encoded input +/// +/// \since 12.0.0 +/// \note API not yet finalized +ARROW_EXPORT +Result RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative sum of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative sum behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeSum( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative product of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative prod behavior +/// \param[in] check_overflow whether to check for overflow, if true, return Invalid +/// status on overflow, otherwise wrap around on overflow +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeProd( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + bool check_overflow = false, ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative max of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative max behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMax( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative min of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative min behavior +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMin( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Compute the cumulative mean of an array-like object +/// +/// \param[in] values array-like input +/// \param[in] options configures cumulative mean behavior, `start` is ignored +/// \param[in] ctx the function execution context, optional +ARROW_EXPORT +Result CumulativeMean( + const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(), + ExecContext* ctx = NULLPTR); + +/// \brief Return the first order difference of an array. +/// +/// Computes the first order difference of an array, i.e. +/// output[i] = input[i] - input[i - p] if i >= p +/// output[i] = null otherwise +/// where p is the period. For example, with p = 1, +/// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5]. +/// With p = 2, +/// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6] +/// p can also be negative, in which case the diff is computed in +/// the opposite direction. +/// \param[in] array array input +/// \param[in] options options, specifying overflow behavior and period +/// \param[in] check_overflow whether to return error on overflow +/// \param[in] ctx the function execution context, optional +/// \return result as array +ARROW_EXPORT +Result> PairwiseDiff(const Array& array, + const PairwiseOptions& options, + bool check_overflow = false, + ExecContext* ctx = NULLPTR); + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h new file mode 100644 index 0000000000000000000000000000000000000000..3fbefe4a1ab7b7e432e07607f674b5de1c947cd5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h @@ -0,0 +1,489 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/data.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace compute { + +// It seems like 64K might be a good default chunksize to use for execution +// based on the experience of other query processing systems. The current +// default is not to chunk contiguous arrays, though, but this may change in +// the future once parallel execution is implemented +static constexpr int64_t kDefaultExecChunksize = UINT16_MAX; + +/// \brief Context for expression-global variables and options used by +/// function evaluation +class ARROW_EXPORT ExecContext { + public: + // If no function registry passed, the default is used. + explicit ExecContext(MemoryPool* pool = default_memory_pool(), + ::arrow::internal::Executor* executor = NULLPTR, + FunctionRegistry* func_registry = NULLPTR); + + /// \brief The MemoryPool used for allocations, default is + /// default_memory_pool(). + MemoryPool* memory_pool() const { return pool_; } + + const ::arrow::internal::CpuInfo* cpu_info() const; + + /// \brief An Executor which may be used to parallelize execution. + ::arrow::internal::Executor* executor() const { return executor_; } + + /// \brief The FunctionRegistry for looking up functions by name and + /// selecting kernels for execution. Defaults to the library-global function + /// registry provided by GetFunctionRegistry. + FunctionRegistry* func_registry() const { return func_registry_; } + + // \brief Set maximum length unit of work for kernel execution. Larger + // contiguous array inputs will be split into smaller chunks, and, if + // possible and enabled, processed in parallel. The default chunksize is + // INT64_MAX, so contiguous arrays are not split. + void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; } + + // \brief Maximum length for ExecBatch data chunks processed by + // kernels. Contiguous array inputs with longer length will be split into + // smaller chunks. + int64_t exec_chunksize() const { return exec_chunksize_; } + + /// \brief Set whether to use multiple threads for function execution. This + /// is not yet used. + void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; } + + /// \brief If true, then utilize multiple threads where relevant for function + /// execution. This is not yet used. + bool use_threads() const { return use_threads_; } + + // Set the preallocation strategy for kernel execution as it relates to + // chunked execution. For chunked execution, whether via ChunkedArray inputs + // or splitting larger Array arguments into smaller pieces, contiguous + // allocation (if permitted by the kernel) will allocate one large array to + // write output into yielding it to the caller at the end. If this option is + // set to off, then preallocations will be performed independently for each + // chunk of execution + // + // TODO: At some point we might want the limit the size of contiguous + // preallocations. For example, even if the exec_chunksize is 64K or less, we + // might limit contiguous allocations to 1M records, say. + void set_preallocate_contiguous(bool preallocate) { + preallocate_contiguous_ = preallocate; + } + + /// \brief If contiguous preallocations should be used when doing chunked + /// execution as specified by exec_chunksize(). See + /// set_preallocate_contiguous() for more information. + bool preallocate_contiguous() const { return preallocate_contiguous_; } + + private: + MemoryPool* pool_; + ::arrow::internal::Executor* executor_; + FunctionRegistry* func_registry_; + int64_t exec_chunksize_ = std::numeric_limits::max(); + bool preallocate_contiguous_ = true; + bool use_threads_ = true; +}; + +// TODO: Consider standardizing on uint16 selection vectors and only use them +// when we can ensure that each value is 64K length or smaller + +/// \brief Container for an array of value selection indices that were +/// materialized from a filter. +/// +/// Columnar query engines (see e.g. [1]) have found that rather than +/// materializing filtered data, the filter can instead be converted to an +/// array of the "on" indices and then "fusing" these indices in operator +/// implementations. This is especially relevant for aggregations but also +/// applies to scalar operations. +/// +/// We are not yet using this so this is mostly a placeholder for now. +/// +/// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf +class ARROW_EXPORT SelectionVector { + public: + explicit SelectionVector(std::shared_ptr data); + + explicit SelectionVector(const Array& arr); + + /// \brief Create SelectionVector from boolean mask + static Result> FromMask(const BooleanArray& arr); + + const int32_t* indices() const { return indices_; } + int32_t length() const; + + private: + std::shared_ptr data_; + const int32_t* indices_; +}; + +/// An index to represent that a batch does not belong to an ordered stream +constexpr int64_t kUnsequencedIndex = -1; + +/// \brief A unit of work for kernel execution. It contains a collection of +/// Array and Scalar values and an optional SelectionVector indicating that +/// there is an unmaterialized filter that either must be materialized, or (if +/// the kernel supports it) pushed down into the kernel implementation. +/// +/// ExecBatch is semantically similar to RecordBatch in that in a SQL context +/// it represents a collection of records, but constant "columns" are +/// represented by Scalar values rather than having to be converted into arrays +/// with repeated values. +/// +/// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight +/// than is desirable for this class. Microbenchmarks would help determine for +/// sure. See ARROW-8928. + +/// \addtogroup acero-internals +/// @{ + +struct ARROW_EXPORT ExecBatch { + ExecBatch() = default; + ExecBatch(std::vector values, int64_t length) + : values(std::move(values)), length(length) {} + + explicit ExecBatch(const RecordBatch& batch); + + /// \brief Infer the ExecBatch length from values. + static Result InferLength(const std::vector& values); + + /// Creates an ExecBatch with length-validation. + /// + /// If any value is given, then all values must have a common length. If the given + /// length is negative, then the length of the ExecBatch is set to this common length, + /// or to 1 if no values are given. Otherwise, the given length must equal the common + /// length, if any value is given. + static Result Make(std::vector values, int64_t length = -1); + + Result> ToRecordBatch( + std::shared_ptr schema, MemoryPool* pool = default_memory_pool()) const; + + /// The values representing positional arguments to be passed to a kernel's + /// exec function for processing. + std::vector values; + + /// A deferred filter represented as an array of indices into the values. + /// + /// For example, the filter [true, true, false, true] would be represented as + /// the selection vector [0, 1, 3]. When the selection vector is set, + /// ExecBatch::length is equal to the length of this array. + std::shared_ptr selection_vector; + + /// A predicate Expression guaranteed to evaluate to true for all rows in this batch. + Expression guarantee = literal(true); + + /// The semantic length of the ExecBatch. When the values are all scalars, + /// the length should be set to 1 for non-aggregate kernels, otherwise the + /// length is taken from the array values, except when there is a selection + /// vector. When there is a selection vector set, the length of the batch is + /// the length of the selection. Aggregate kernels can have an ExecBatch + /// formed by projecting just the partition columns from a batch in which + /// case, it would have scalar rows with length greater than 1. + /// + /// If the array values are of length 0 then the length is 0 regardless of + /// whether any values are Scalar. + int64_t length = 0; + + /// \brief index of this batch in a sorted stream of batches + /// + /// This index must be strictly monotonic starting at 0 without gaps or + /// it can be set to kUnsequencedIndex if there is no meaningful order + int64_t index = kUnsequencedIndex; + + /// \brief The sum of bytes in each buffer referenced by the batch + /// + /// Note: Scalars are not counted + /// Note: Some values may referenced only part of a buffer, for + /// example, an array with an offset. The actual data + /// visible to this batch will be smaller than the total + /// buffer size in this case. + int64_t TotalBufferSize() const; + + /// \brief Return the value at the i-th index + template + inline const Datum& operator[](index_type i) const { + return values[i]; + } + + bool Equals(const ExecBatch& other) const; + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + ExecBatch Slice(int64_t offset, int64_t length) const; + + Result SelectValues(const std::vector& ids) const; + + /// \brief A convenience for returning the types from the batch. + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + std::string ToString() const; +}; + +inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); } +inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); } + +ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*); + +/// @} + +/// \defgroup compute-internals Utilities for calling functions, useful for those +/// extending the function registry +/// +/// @{ + +struct ExecValue { + ArraySpan array = {}; + const Scalar* scalar = NULLPTR; + + ExecValue(Scalar* scalar) // NOLINT implicit conversion + : scalar(scalar) {} + + ExecValue(ArraySpan array) // NOLINT implicit conversion + : array(std::move(array)) {} + + ExecValue(const ArrayData& array) { // NOLINT implicit conversion + this->array.SetMembers(array); + } + + ExecValue() = default; + ExecValue(const ExecValue& other) = default; + ExecValue& operator=(const ExecValue& other) = default; + ExecValue(ExecValue&& other) = default; + ExecValue& operator=(ExecValue&& other) = default; + + int64_t length() const { return this->is_array() ? this->array.length : 1; } + + bool is_array() const { return this->scalar == NULLPTR; } + bool is_scalar() const { return !this->is_array(); } + + void SetArray(const ArrayData& array) { + this->array.SetMembers(array); + this->scalar = NULLPTR; + } + + void SetScalar(const Scalar* scalar) { this->scalar = scalar; } + + template + const ExactType& scalar_as() const { + return ::arrow::internal::checked_cast(*this->scalar); + } + + /// XXX: here temporarily for compatibility with datum, see + /// e.g. MakeStructExec in scalar_nested.cc + int64_t null_count() const { + if (this->is_array()) { + return this->array.GetNullCount(); + } else { + return this->scalar->is_valid ? 0 : 1; + } + } + + const DataType* type() const { + if (this->is_array()) { + return array.type; + } else { + return scalar->type.get(); + } + } +}; + +struct ARROW_EXPORT ExecResult { + // The default value of the variant is ArraySpan + std::variant> value; + + int64_t length() const { + if (this->is_array_span()) { + return this->array_span()->length; + } else { + return this->array_data()->length; + } + } + + const DataType* type() const { + if (this->is_array_span()) { + return this->array_span()->type; + } else { + return this->array_data()->type.get(); + } + } + + const ArraySpan* array_span() const { return &std::get(this->value); } + ArraySpan* array_span_mutable() { return &std::get(this->value); } + + bool is_array_span() const { return this->value.index() == 0; } + + const std::shared_ptr& array_data() const { + return std::get>(this->value); + } + ArrayData* array_data_mutable() { + return std::get>(this->value).get(); + } + + bool is_array_data() const { return this->value.index() == 1; } +}; + +/// \brief A "lightweight" column batch object which contains no +/// std::shared_ptr objects and does not have any memory ownership +/// semantics. Can represent a view onto an "owning" ExecBatch. +struct ARROW_EXPORT ExecSpan { + ExecSpan() = default; + ExecSpan(const ExecSpan& other) = default; + ExecSpan& operator=(const ExecSpan& other) = default; + ExecSpan(ExecSpan&& other) = default; + ExecSpan& operator=(ExecSpan&& other) = default; + + explicit ExecSpan(std::vector values, int64_t length) + : length(length), values(std::move(values)) {} + + explicit ExecSpan(const ExecBatch& batch) { + this->length = batch.length; + this->values.resize(batch.values.size()); + for (size_t i = 0; i < batch.values.size(); ++i) { + const Datum& in_value = batch[i]; + ExecValue* out_value = &this->values[i]; + if (in_value.is_array()) { + out_value->SetArray(*in_value.array()); + } else { + out_value->SetScalar(in_value.scalar().get()); + } + } + } + + /// \brief Return the value at the i-th index + template + inline const ExecValue& operator[](index_type i) const { + return values[i]; + } + + /// \brief A convenience for the number of values / arguments. + int num_values() const { return static_cast(values.size()); } + + std::vector GetTypes() const { + std::vector result; + for (const auto& value : this->values) { + result.emplace_back(value.type()); + } + return result; + } + + ExecBatch ToExecBatch() const { + ExecBatch result; + result.length = this->length; + for (const ExecValue& value : this->values) { + if (value.is_array()) { + result.values.push_back(value.array.ToArrayData()); + } else { + result.values.push_back(value.scalar->GetSharedPtr()); + } + } + return result; + } + + int64_t length = 0; + std::vector values; +}; + +/// \defgroup compute-call-function One-shot calls to compute functions +/// +/// @{ + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const std::vector& args, + ExecContext* ctx = NULLPTR); + +/// \brief One-shot invoker for all types of functions. +/// +/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs, +/// and wrapping of outputs. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + const FunctionOptions* options, ExecContext* ctx = NULLPTR); + +/// \brief Variant of CallFunction which uses a function's default options. +/// +/// NB: Some functions require FunctionOptions be provided. +ARROW_EXPORT +Result CallFunction(const std::string& func_name, const ExecBatch& batch, + ExecContext* ctx = NULLPTR); + +/// @} + +/// \defgroup compute-function-executor One-shot calls to obtain function executors +/// +/// @{ + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, std::vector in_types, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// \brief One-shot executor provider for all types of functions. +/// +/// This function creates and initializes a `FunctionExecutor` appropriate +/// for the given function name, input types (taken from the Datum arguments) +/// and function options. +ARROW_EXPORT +Result> GetFunctionExecutor( + const std::string& func_name, const std::vector& args, + const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h new file mode 100644 index 0000000000000000000000000000000000000000..9a36a6d3368fb9ee0486c9dba9ab86ba10764dc7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h @@ -0,0 +1,295 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/compute/type_fwd.h" +#include "arrow/datum.h" +#include "arrow/type_fwd.h" +#include "arrow/util/small_vector.h" + +namespace arrow { +namespace compute { + +/// \defgroup expression-core Expressions to describe data transformations +/// +/// @{ + +/// An unbound expression which maps a single Datum to another Datum. +/// An expression is one of +/// - A literal Datum. +/// - A reference to a single (potentially nested) field of the input Datum. +/// - A call to a compute function, with arguments specified by other Expressions. +class ARROW_EXPORT Expression { + public: + struct Call { + std::string function_name; + std::vector arguments; + std::shared_ptr options; + // Cached hash value + size_t hash; + + // post-Bind properties: + std::shared_ptr function; + const Kernel* kernel = NULLPTR; + std::shared_ptr kernel_state; + TypeHolder type; + + void ComputeHash(); + }; + + std::string ToString() const; + bool Equals(const Expression& other) const; + size_t hash() const; + struct Hash { + size_t operator()(const Expression& expr) const { return expr.hash(); } + }; + + /// Bind this expression to the given input type, looking up Kernels and field types. + /// Some expression simplification may be performed and implicit casts will be inserted. + /// Any state necessary for execution will be initialized and returned. + Result Bind(const TypeHolder& in, ExecContext* = NULLPTR) const; + Result Bind(const Schema& in_schema, ExecContext* = NULLPTR) const; + + // XXX someday + // Clone all KernelState in this bound expression. If any function referenced by this + // expression has mutable KernelState, it is not safe to execute or apply simplification + // passes to it (or copies of it!) from multiple threads. Cloning state produces new + // KernelStates where necessary to ensure that Expressions may be manipulated safely + // on multiple threads. + // Result CloneState() const; + // Status SetState(ExpressionState); + + /// Return true if all an expression's field references have explicit types + /// and all of its functions' kernels are looked up. + bool IsBound() const; + + /// Return true if this expression is composed only of Scalar literals, field + /// references, and calls to ScalarFunctions. + bool IsScalarExpression() const; + + /// Return true if this expression is literal and entirely null. + bool IsNullLiteral() const; + + /// Return true if this expression could evaluate to true. Will return true for any + /// unbound or non-boolean Expressions. IsSatisfiable does not (currently) do any + /// canonicalization or simplification of the expression, so even Expressions + /// which are unsatisfiable may spuriously return `true` here. This function is + /// intended for use in predicate pushdown where a filter expression is simplified + /// by a guarantee, so it assumes that trying to simplify again would be redundant. + bool IsSatisfiable() const; + + // XXX someday + // Result GetPipelines(); + + bool is_valid() const { return impl_ != NULLPTR; } + + /// Access a Call or return nullptr if this expression is not a call + const Call* call() const; + /// Access a Datum or return nullptr if this expression is not a literal + const Datum* literal() const; + /// Access a FieldRef or return nullptr if this expression is not a field_ref + const FieldRef* field_ref() const; + + /// The type to which this expression will evaluate + const DataType* type() const; + // XXX someday + // NullGeneralization::type nullable() const; + + struct Parameter { + FieldRef ref; + + // post-bind properties + TypeHolder type; + ::arrow::internal::SmallVector indices; + }; + const Parameter* parameter() const; + + Expression() = default; + explicit Expression(Call call); + explicit Expression(Datum literal); + explicit Expression(Parameter parameter); + + private: + using Impl = std::variant; + std::shared_ptr impl_; + + ARROW_FRIEND_EXPORT friend bool Identical(const Expression& l, const Expression& r); +}; + +inline bool operator==(const Expression& l, const Expression& r) { return l.Equals(r); } +inline bool operator!=(const Expression& l, const Expression& r) { return !l.Equals(r); } + +ARROW_EXPORT void PrintTo(const Expression&, std::ostream*); + +// Factories + +ARROW_EXPORT +Expression literal(Datum lit); + +template +Expression literal(Arg&& arg) { + return literal(Datum(std::forward(arg))); +} + +ARROW_EXPORT +Expression field_ref(FieldRef ref); + +ARROW_EXPORT +Expression call(std::string function, std::vector arguments, + std::shared_ptr options = NULLPTR); + +template ::value>::type> +Expression call(std::string function, std::vector arguments, + Options options) { + return call(std::move(function), std::move(arguments), + std::make_shared(std::move(options))); +} + +/// Assemble a list of all fields referenced by an Expression at any depth. +ARROW_EXPORT +std::vector FieldsInExpression(const Expression&); + +/// Check if the expression references any fields. +ARROW_EXPORT +bool ExpressionHasFieldRefs(const Expression&); + +struct ARROW_EXPORT KnownFieldValues; + +/// Assemble a mapping from field references to known values. This derives known values +/// from "equal" and "is_null" Expressions referencing a field and a literal. +ARROW_EXPORT +Result ExtractKnownFieldValues( + const Expression& guaranteed_true_predicate); + +/// @} + +/// \defgroup expression-passes Functions for modification of Expressions +/// +/// @{ +/// +/// These transform bound expressions. Some transforms utilize a guarantee, which is +/// provided as an Expression which is guaranteed to evaluate to true. The +/// guaranteed_true_predicate need not be bound, but canonicalization is currently +/// deferred to producers of guarantees. For example in order to be recognized as a +/// guarantee on a field value, an Expression must be a call to "equal" with field_ref LHS +/// and literal RHS. Flipping the arguments, "is_in" with a one-long value_set, ... or +/// other semantically identical Expressions will not be recognized. + +/// Weak canonicalization which establishes guarantees for subsequent passes. Even +/// equivalent Expressions may result in different canonicalized expressions. +/// TODO this could be a strong canonicalization +ARROW_EXPORT +Result Canonicalize(Expression, ExecContext* = NULLPTR); + +/// Simplify Expressions based on literal arguments (for example, add(null, x) will always +/// be null so replace the call with a null literal). Includes early evaluation of all +/// calls whose arguments are entirely literal. +ARROW_EXPORT +Result FoldConstants(Expression); + +/// Simplify Expressions by replacing with known values of the fields which it references. +ARROW_EXPORT +Result ReplaceFieldsWithKnownValues(const KnownFieldValues& known_values, + Expression); + +/// Simplify an expression by replacing subexpressions based on a guarantee: +/// a boolean expression which is guaranteed to evaluate to `true`. For example, this is +/// used to remove redundant function calls from a filter expression or to replace a +/// reference to a constant-value field with a literal. +ARROW_EXPORT +Result SimplifyWithGuarantee(Expression, + const Expression& guaranteed_true_predicate); + +/// Replace all named field refs (e.g. "x" or "x.y") with field paths (e.g. [0] or [1,3]) +/// +/// This isn't usually needed and does not offer any simplification by itself. However, +/// it can be useful to normalize an expression to paths to make it simpler to work with. +ARROW_EXPORT Result RemoveNamedRefs(Expression expression); + +/// @} + +// Execution + +/// Create an ExecBatch suitable for passing to ExecuteScalarExpression() from a +/// RecordBatch which may have missing or incorrectly ordered columns. +/// Missing fields will be replaced with null scalars. +ARROW_EXPORT Result MakeExecBatch(const Schema& full_schema, + const Datum& partial, + Expression guarantee = literal(true)); + +/// Execute a scalar expression against the provided state and input ExecBatch. This +/// expression must be bound. +ARROW_EXPORT +Result ExecuteScalarExpression(const Expression&, const ExecBatch& input, + ExecContext* = NULLPTR); + +/// Convenience function for invoking against a RecordBatch +ARROW_EXPORT +Result ExecuteScalarExpression(const Expression&, const Schema& full_schema, + const Datum& partial_input, ExecContext* = NULLPTR); + +// Serialization + +ARROW_EXPORT +Result> Serialize(const Expression&); + +ARROW_EXPORT +Result Deserialize(std::shared_ptr); + +/// \defgroup expression-convenience Helpers for convenient expression creation +/// +/// @{ + +ARROW_EXPORT Expression project(std::vector values, + std::vector names); + +ARROW_EXPORT Expression equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression not_equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression less(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression less_equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression greater(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression greater_equal(Expression lhs, Expression rhs); + +ARROW_EXPORT Expression is_null(Expression lhs, bool nan_is_null = false); + +ARROW_EXPORT Expression is_valid(Expression lhs); + +ARROW_EXPORT Expression and_(Expression lhs, Expression rhs); +ARROW_EXPORT Expression and_(const std::vector&); +ARROW_EXPORT Expression or_(Expression lhs, Expression rhs); +ARROW_EXPORT Expression or_(const std::vector&); +ARROW_EXPORT Expression not_(Expression operand); + +/// @} + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..1adb3e96c97c8cbaa20f99c29eea41c535aae64d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h @@ -0,0 +1,752 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/compute/exec.h" +#include "arrow/datum.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +// macOS defines PREALLOCATE as a preprocessor macro in the header sys/vnode.h. +// No other BSD seems to do so. The name is used as an identifier in MemAllocation enum. +#if defined(__APPLE__) && defined(PREALLOCATE) +#undef PREALLOCATE +#endif + +namespace arrow { +namespace compute { + +class FunctionOptions; + +/// \brief Base class for opaque kernel-specific state. For example, if there +/// is some kind of initialization required. +struct ARROW_EXPORT KernelState { + virtual ~KernelState() = default; +}; + +/// \brief Context/state for the execution of a particular kernel. +class ARROW_EXPORT KernelContext { + public: + // Can pass optional backreference; not used consistently for the + // moment but will be made so in the future + explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR) + : exec_ctx_(exec_ctx), kernel_(kernel) {} + + /// \brief Allocate buffer from the context's memory pool. The contents are + /// not initialized. + Result> Allocate(int64_t nbytes); + + /// \brief Allocate buffer for bitmap from the context's memory pool. Like + /// Allocate, the contents of the buffer are not initialized but the last + /// byte is preemptively zeroed to help avoid ASAN or valgrind issues. + Result> AllocateBitmap(int64_t num_bits); + + /// \brief Assign the active KernelState to be utilized for each stage of + /// kernel execution. Ownership and memory lifetime of the KernelState must + /// be minded separately. + void SetState(KernelState* state) { state_ = state; } + + // Set kernel that is being invoked since some kernel + // implementations will examine the kernel state. + void SetKernel(const Kernel* kernel) { kernel_ = kernel; } + + KernelState* state() { return state_; } + + /// \brief Configuration related to function execution that is to be shared + /// across multiple kernels. + ExecContext* exec_context() { return exec_ctx_; } + + /// \brief The memory pool to use for allocations. For now, it uses the + /// MemoryPool contained in the ExecContext used to create the KernelContext. + MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); } + + const Kernel* kernel() const { return kernel_; } + + private: + ExecContext* exec_ctx_; + KernelState* state_ = NULLPTR; + const Kernel* kernel_ = NULLPTR; +}; + +/// \brief An type-checking interface to permit customizable validation rules +/// for use with InputType and KernelSignature. This is for scenarios where the +/// acceptance is not an exact type instance, such as a TIMESTAMP type for a +/// specific TimeUnit, but permitting any time zone. +struct ARROW_EXPORT TypeMatcher { + virtual ~TypeMatcher() = default; + + /// \brief Return true if this matcher accepts the data type. + virtual bool Matches(const DataType& type) const = 0; + + /// \brief A human-interpretable string representation of what the type + /// matcher checks for, usable when printing KernelSignature or formatting + /// error messages. + virtual std::string ToString() const = 0; + + /// \brief Return true if this TypeMatcher contains the same matching rule as + /// the other. Currently depends on RTTI. + virtual bool Equals(const TypeMatcher& other) const = 0; +}; + +namespace match { + +/// \brief Match any DataType instance having the same DataType::id. +ARROW_EXPORT std::shared_ptr SameTypeId(Type::type type_id); + +/// \brief Match any TimestampType instance having the same unit, but the time +/// zones can be different. +ARROW_EXPORT std::shared_ptr TimestampTypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr Time32TypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr Time64TypeUnit(TimeUnit::type unit); +ARROW_EXPORT std::shared_ptr DurationTypeUnit(TimeUnit::type unit); + +// \brief Match any integer type +ARROW_EXPORT std::shared_ptr Integer(); + +// Match types using 32-bit varbinary representation +ARROW_EXPORT std::shared_ptr BinaryLike(); + +// Match types using 64-bit varbinary representation +ARROW_EXPORT std::shared_ptr LargeBinaryLike(); + +// Match any fixed binary type +ARROW_EXPORT std::shared_ptr FixedSizeBinaryLike(); + +// \brief Match any primitive type (boolean or any type representable as a C +// Type) +ARROW_EXPORT std::shared_ptr Primitive(); + +// \brief Match any integer type that can be used as run-end in run-end encoded +// arrays +ARROW_EXPORT std::shared_ptr RunEndInteger(); + +/// \brief Match run-end encoded types that use any valid run-end type and +/// encode specific value types +/// +/// @param[in] value_type_matcher a matcher that is applied to the values field +ARROW_EXPORT std::shared_ptr RunEndEncoded( + std::shared_ptr value_type_matcher); + +/// \brief Match run-end encoded types that use any valid run-end type and +/// encode specific value types +/// +/// @param[in] value_type_id a type id that the type of the values field should match +ARROW_EXPORT std::shared_ptr RunEndEncoded(Type::type value_type_id); + +/// \brief Match run-end encoded types that encode specific run-end and value types +/// +/// @param[in] run_end_type_matcher a matcher that is applied to the run_ends field +/// @param[in] value_type_matcher a matcher that is applied to the values field +ARROW_EXPORT std::shared_ptr RunEndEncoded( + std::shared_ptr run_end_type_matcher, + std::shared_ptr value_type_matcher); + +} // namespace match + +/// \brief An object used for type-checking arguments to be passed to a kernel +/// and stored in a KernelSignature. The type-checking rule can be supplied +/// either with an exact DataType instance or a custom TypeMatcher. +class ARROW_EXPORT InputType { + public: + /// \brief The kind of type-checking rule that the InputType contains. + enum Kind { + /// \brief Accept any value type. + ANY_TYPE, + + /// \brief A fixed arrow::DataType and will only exact match having this + /// exact type (e.g. same TimestampType unit, same decimal scale and + /// precision, or same nested child types). + EXACT_TYPE, + + /// \brief Uses a TypeMatcher implementation to check the type. + USE_TYPE_MATCHER + }; + + /// \brief Accept any value type + InputType() : kind_(ANY_TYPE) {} + + /// \brief Accept an exact value type. + InputType(std::shared_ptr type) // NOLINT implicit construction + : kind_(EXACT_TYPE), type_(std::move(type)) {} + + /// \brief Use the passed TypeMatcher to type check. + InputType(std::shared_ptr type_matcher) // NOLINT implicit construction + : kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {} + + /// \brief Match any type with the given Type::type. Uses a TypeMatcher for + /// its implementation. + InputType(Type::type type_id) // NOLINT implicit construction + : InputType(match::SameTypeId(type_id)) {} + + InputType(const InputType& other) { CopyInto(other); } + + void operator=(const InputType& other) { CopyInto(other); } + + InputType(InputType&& other) { MoveInto(std::forward(other)); } + + void operator=(InputType&& other) { MoveInto(std::forward(other)); } + + // \brief Match any input (array, scalar of any type) + static InputType Any() { return InputType(); } + + /// \brief Return true if this input type matches the same type cases as the + /// other. + bool Equals(const InputType& other) const; + + bool operator==(const InputType& other) const { return this->Equals(other); } + + bool operator!=(const InputType& other) const { return !(*this == other); } + + /// \brief Return hash code. + size_t Hash() const; + + /// \brief Render a human-readable string representation. + std::string ToString() const; + + /// \brief Return true if the Datum matches this argument kind in + /// type (and only allows scalar or array-like Datums). + bool Matches(const Datum& value) const; + + /// \brief Return true if the type matches this InputType + bool Matches(const DataType& type) const; + + /// \brief The type matching rule that this InputType uses. + Kind kind() const { return kind_; } + + /// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType + /// must match. Otherwise this function should not be used and will assert in + /// debug builds. + const std::shared_ptr& type() const; + + /// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for + /// checking the type of a value. Otherwise this function should not be used + /// and will assert in debug builds. + const TypeMatcher& type_matcher() const; + + private: + void CopyInto(const InputType& other) { + this->kind_ = other.kind_; + this->type_ = other.type_; + this->type_matcher_ = other.type_matcher_; + } + + void MoveInto(InputType&& other) { + this->kind_ = other.kind_; + this->type_ = std::move(other.type_); + this->type_matcher_ = std::move(other.type_matcher_); + } + + Kind kind_; + + // For EXACT_TYPE Kind + std::shared_ptr type_; + + // For USE_TYPE_MATCHER Kind + std::shared_ptr type_matcher_; +}; + +/// \brief Container to capture both exact and input-dependent output types. +class ARROW_EXPORT OutputType { + public: + /// \brief An enum indicating whether the value type is an invariant fixed + /// value or one that's computed by a kernel-defined resolver function. + enum ResolveKind { FIXED, COMPUTED }; + + /// Type resolution function. Given input types, return output type. This + /// function MAY may use the kernel state to decide the output type based on + /// the FunctionOptions. + /// + /// This function SHOULD _not_ be used to check for arity, that is to be + /// performed one or more layers above. + using Resolver = + std::function(KernelContext*, const std::vector&)>; + + /// \brief Output an exact type + OutputType(std::shared_ptr type) // NOLINT implicit construction + : kind_(FIXED), type_(std::move(type)) {} + + /// \brief Output a computed type depending on actual input types + template + OutputType(Fn resolver) // NOLINT implicit construction + : kind_(COMPUTED), resolver_(std::move(resolver)) {} + + OutputType(const OutputType& other) { + this->kind_ = other.kind_; + this->type_ = other.type_; + this->resolver_ = other.resolver_; + } + + OutputType(OutputType&& other) { + this->kind_ = other.kind_; + this->type_ = std::move(other.type_); + this->resolver_ = other.resolver_; + } + + OutputType& operator=(const OutputType&) = default; + OutputType& operator=(OutputType&&) = default; + + /// \brief Return the type of the expected output value of the kernel given + /// the input argument types. The resolver may make use of state information + /// kept in the KernelContext. + Result Resolve(KernelContext* ctx, + const std::vector& args) const; + + /// \brief The exact output value type for the FIXED kind. + const std::shared_ptr& type() const; + + /// \brief For use with COMPUTED resolution strategy. It may be more + /// convenient to invoke this with OutputType::Resolve returned from this + /// method. + const Resolver& resolver() const; + + /// \brief Render a human-readable string representation. + std::string ToString() const; + + /// \brief Return the kind of type resolution of this output type, whether + /// fixed/invariant or computed by a resolver. + ResolveKind kind() const { return kind_; } + + private: + ResolveKind kind_; + + // For FIXED resolution + std::shared_ptr type_; + + // For COMPUTED resolution + Resolver resolver_ = NULLPTR; +}; + +/// \brief Holds the input types and output type of the kernel. +/// +/// VarArgs functions with minimum N arguments should pass up to N input types to be +/// used to validate the input types of a function invocation. The first N-1 types +/// will be matched against the first N-1 arguments, and the last type will be +/// matched against the remaining arguments. +class ARROW_EXPORT KernelSignature { + public: + KernelSignature(std::vector in_types, OutputType out_type, + bool is_varargs = false); + + /// \brief Convenience ctor since make_shared can be awkward + static std::shared_ptr Make(std::vector in_types, + OutputType out_type, + bool is_varargs = false); + + /// \brief Return true if the signature if compatible with the list of input + /// value descriptors. + bool MatchesInputs(const std::vector& types) const; + + /// \brief Returns true if the input types of each signature are + /// equal. Well-formed functions should have a deterministic output type + /// given input types, but currently it is the responsibility of the + /// developer to ensure this. + bool Equals(const KernelSignature& other) const; + + bool operator==(const KernelSignature& other) const { return this->Equals(other); } + + bool operator!=(const KernelSignature& other) const { return !(*this == other); } + + /// \brief Compute a hash code for the signature + size_t Hash() const; + + /// \brief The input types for the kernel. For VarArgs functions, this should + /// generally contain a single validator to use for validating all of the + /// function arguments. + const std::vector& in_types() const { return in_types_; } + + /// \brief The output type for the kernel. Use Resolve to return the + /// exact output given input argument types, since many kernels' + /// output types depend on their input types (or their type + /// metadata). + const OutputType& out_type() const { return out_type_; } + + /// \brief Render a human-readable string representation + std::string ToString() const; + + bool is_varargs() const { return is_varargs_; } + + private: + std::vector in_types_; + OutputType out_type_; + bool is_varargs_; + + // For caching the hash code after it's computed the first time + mutable uint64_t hash_code_; +}; + +/// \brief A function may contain multiple variants of a kernel for a given +/// type combination for different SIMD levels. Based on the active system's +/// CPU info or the user's preferences, we can elect to use one over the other. +struct SimdLevel { + enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX }; +}; + +/// \brief The strategy to use for propagating or otherwise populating the +/// validity bitmap of a kernel output. +struct NullHandling { + enum type { + /// Compute the output validity bitmap by intersecting the validity bitmaps + /// of the arguments using bitwise-and operations. This means that values + /// in the output are valid/non-null only if the corresponding values in + /// all input arguments were valid/non-null. Kernel generally need not + /// touch the bitmap thereafter, but a kernel's exec function is permitted + /// to alter the bitmap after the null intersection is computed if it needs + /// to. + INTERSECTION, + + /// Kernel expects a pre-allocated buffer to write the result bitmap + /// into. The preallocated memory is not zeroed (except for the last byte), + /// so the kernel should ensure to completely populate the bitmap. + COMPUTED_PREALLOCATE, + + /// Kernel allocates and sets the validity bitmap of the output. + COMPUTED_NO_PREALLOCATE, + + /// Kernel output is never null and a validity bitmap does not need to be + /// allocated. + OUTPUT_NOT_NULL + }; +}; + +/// \brief The preference for memory preallocation of fixed-width type outputs +/// in kernel execution. +struct MemAllocation { + enum type { + // For data types that support pre-allocation (i.e. fixed-width), the + // kernel expects to be provided a pre-allocated data buffer to write + // into. Non-fixed-width types must always allocate their own data + // buffers. The allocation made for the same length as the execution batch, + // so vector kernels yielding differently sized output should not use this. + // + // It is valid for the data to not be preallocated but the validity bitmap + // is (or is computed using the intersection/bitwise-and method). + // + // For variable-size output types like BinaryType or StringType, or for + // nested types, this option has no effect. + PREALLOCATE, + + // The kernel is responsible for allocating its own data buffer for + // fixed-width type outputs. + NO_PREALLOCATE + }; +}; + +struct Kernel; + +/// \brief Arguments to pass to an KernelInit function. A struct is used to help +/// avoid API breakage should the arguments passed need to be expanded. +struct KernelInitArgs { + /// \brief A pointer to the kernel being initialized. The init function may + /// depend on the kernel's KernelSignature or other data contained there. + const Kernel* kernel; + + /// \brief The types of the input arguments that the kernel is + /// about to be executed against. + const std::vector& inputs; + + /// \brief Opaque options specific to this kernel. May be nullptr for functions + /// that do not require options. + const FunctionOptions* options; +}; + +/// \brief Common initializer function for all kernel types. +using KernelInit = std::function>( + KernelContext*, const KernelInitArgs&)>; + +/// \brief Base type for kernels. Contains the function signature and +/// optionally the state initialization function, along with some common +/// attributes +struct ARROW_EXPORT Kernel { + Kernel() = default; + + Kernel(std::shared_ptr sig, KernelInit init) + : signature(std::move(sig)), init(std::move(init)) {} + + Kernel(std::vector in_types, OutputType out_type, KernelInit init) + : Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init)) {} + + /// \brief The "signature" of the kernel containing the InputType input + /// argument validators and OutputType output type resolver. + std::shared_ptr signature; + + /// \brief Create a new KernelState for invocations of this kernel, e.g. to + /// set up any options or state relevant for execution. + KernelInit init; + + /// \brief Create a vector of new KernelState for invocations of this kernel. + static Status InitAll(KernelContext*, const KernelInitArgs&, + std::vector>*); + + /// \brief Indicates whether execution can benefit from parallelization + /// (splitting large chunks into smaller chunks and using multiple + /// threads). Some kernels may not support parallel execution at + /// all. Synchronization and concurrency-related issues are currently the + /// responsibility of the Kernel's implementation. + bool parallelizable = true; + + /// \brief Indicates the level of SIMD instruction support in the host CPU is + /// required to use the function. The intention is for functions to be able to + /// contain multiple kernels with the same signature but different levels of SIMD, + /// so that the most optimized kernel supported on a host's processor can be chosen. + SimdLevel::type simd_level = SimdLevel::NONE; + + // Additional kernel-specific data + std::shared_ptr data; +}; + +/// \brief The scalar kernel execution API that must be implemented for SCALAR +/// kernel types. This includes both stateless and stateful kernels. Kernels +/// depending on some execution state access that state via subclasses of +/// KernelState set on the KernelContext object. Implementations should +/// endeavor to write into pre-allocated memory if they are able, though for +/// some kernels (e.g. in cases when a builder like StringBuilder) must be +/// employed this may not be possible. +using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*); + +/// \brief Kernel data structure for implementations of ScalarFunction. In +/// addition to the members found in Kernel, contains the null handling +/// and memory pre-allocation preferences. +struct ARROW_EXPORT ScalarKernel : public Kernel { + ScalarKernel() = default; + + ScalarKernel(std::shared_ptr sig, ArrayKernelExec exec, + KernelInit init = NULLPTR) + : Kernel(std::move(sig), init), exec(exec) {} + + ScalarKernel(std::vector in_types, OutputType out_type, ArrayKernelExec exec, + KernelInit init = NULLPTR) + : Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {} + + /// \brief Perform a single invocation of this kernel. Depending on the + /// implementation, it may only write into preallocated memory, while in some + /// cases it will allocate its own memory. Any required state is managed + /// through the KernelContext. + ArrayKernelExec exec; + + /// \brief Writing execution results into larger contiguous allocations + /// requires that the kernel be able to write into sliced output ArrayData*, + /// including sliced output validity bitmaps. Some kernel implementations may + /// not be able to do this, so setting this to false disables this + /// functionality. + bool can_write_into_slices = true; + + // For scalar functions preallocated data and intersecting arg validity + // bitmaps is a reasonable default + NullHandling::type null_handling = NullHandling::INTERSECTION; + MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE; +}; + +// ---------------------------------------------------------------------- +// VectorKernel (for VectorFunction) + +/// \brief Kernel data structure for implementations of VectorFunction. In +/// contains an optional finalizer function, the null handling and memory +/// pre-allocation preferences (which have different defaults from +/// ScalarKernel), and some other execution-related options. +struct ARROW_EXPORT VectorKernel : public Kernel { + /// \brief See VectorKernel::finalize member for usage + using FinalizeFunc = std::function*)>; + + /// \brief Function for executing a stateful VectorKernel against a + /// ChunkedArray input. Does not need to be defined for all VectorKernels + using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out); + + VectorKernel() = default; + + VectorKernel(std::vector in_types, OutputType out_type, ArrayKernelExec exec, + KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR) + : Kernel(std::move(in_types), std::move(out_type), std::move(init)), + exec(exec), + finalize(std::move(finalize)) {} + + VectorKernel(std::shared_ptr sig, ArrayKernelExec exec, + KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR) + : Kernel(std::move(sig), std::move(init)), + exec(exec), + finalize(std::move(finalize)) {} + + /// \brief Perform a single invocation of this kernel. Any required state is + /// managed through the KernelContext. + ArrayKernelExec exec; + + /// \brief Execute the kernel on a ChunkedArray. Does not need to be defined + ChunkedExec exec_chunked = NULLPTR; + + /// \brief For VectorKernel, convert intermediate results into finalized + /// results. Mutates input argument. Some kernels may accumulate state + /// (example: hashing-related functions) through processing chunked inputs, and + /// then need to attach some accumulated state to each of the outputs of + /// processing each chunk of data. + FinalizeFunc finalize; + + /// Since vector kernels generally are implemented rather differently from + /// scalar/elementwise kernels (and they may not even yield arrays of the same + /// size), so we make the developer opt-in to any memory preallocation rather + /// than having to turn it off. + NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE; + MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE; + + /// \brief Writing execution results into larger contiguous allocations + /// requires that the kernel be able to write into sliced output ArrayData*, + /// including sliced output validity bitmaps. Some kernel implementations may + /// not be able to do this, so setting this to false disables this + /// functionality. + bool can_write_into_slices = true; + + /// Some vector kernels can do chunkwise execution using ExecSpanIterator, + /// in some cases accumulating some state. Other kernels (like Take) need to + /// be passed whole arrays and don't work on ChunkedArray inputs + bool can_execute_chunkwise = true; + + /// Some kernels (like unique and value_counts) yield non-chunked output from + /// chunked-array inputs. This option controls how the results are boxed when + /// returned from ExecVectorFunction + /// + /// true -> ChunkedArray + /// false -> Array + bool output_chunked = true; +}; + +// ---------------------------------------------------------------------- +// ScalarAggregateKernel (for ScalarAggregateFunction) + +using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&); +using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*); +// Finalize returns Datum to permit multiple return values +using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*); + +/// \brief Kernel data structure for implementations of +/// ScalarAggregateFunction. The four necessary components of an aggregation +/// kernel are the init, consume, merge, and finalize functions. +/// +/// * init: creates a new KernelState for a kernel. +/// * consume: processes an ExecSpan and updates the KernelState found in the +/// KernelContext. +/// * merge: combines one KernelState with another. +/// * finalize: produces the end result of the aggregation using the +/// KernelState in the KernelContext. +struct ARROW_EXPORT ScalarAggregateKernel : public Kernel { + ScalarAggregateKernel(std::shared_ptr sig, KernelInit init, + ScalarAggregateConsume consume, ScalarAggregateMerge merge, + ScalarAggregateFinalize finalize, const bool ordered) + : Kernel(std::move(sig), std::move(init)), + consume(consume), + merge(merge), + finalize(finalize), + ordered(ordered) {} + + ScalarAggregateKernel(std::vector in_types, OutputType out_type, + KernelInit init, ScalarAggregateConsume consume, + ScalarAggregateMerge merge, ScalarAggregateFinalize finalize, + const bool ordered) + : ScalarAggregateKernel( + KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init), consume, merge, finalize, ordered) {} + + /// \brief Merge a vector of KernelStates into a single KernelState. + /// The merged state will be returned and will be set on the KernelContext. + static Result> MergeAll( + const ScalarAggregateKernel* kernel, KernelContext* ctx, + std::vector> states); + + ScalarAggregateConsume consume; + ScalarAggregateMerge merge; + ScalarAggregateFinalize finalize; + /// \brief Whether this kernel requires ordering + /// Some aggregations, such as, "first", requires some kind of input order. The + /// order can be implicit, e.g., the order of the input data, or explicit, e.g. + /// the ordering specified with a window aggregation. + /// The caller of the aggregate kernel is responsible for passing data in some + /// defined order to the kernel. The flag here is a way for the kernel to tell + /// the caller that data passed to the kernel must be defined in some order. + bool ordered = false; +}; + +// ---------------------------------------------------------------------- +// HashAggregateKernel (for HashAggregateFunction) + +using HashAggregateResize = Status (*)(KernelContext*, int64_t); +using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&); +using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&); + +// Finalize returns Datum to permit multiple return values +using HashAggregateFinalize = Status (*)(KernelContext*, Datum*); + +/// \brief Kernel data structure for implementations of +/// HashAggregateFunction. The four necessary components of an aggregation +/// kernel are the init, consume, merge, and finalize functions. +/// +/// * init: creates a new KernelState for a kernel. +/// * resize: ensure that the KernelState can accommodate the specified number of groups. +/// * consume: processes an ExecSpan (which includes the argument as well +/// as an array of group identifiers) and updates the KernelState found in the +/// KernelContext. +/// * merge: combines one KernelState with another. +/// * finalize: produces the end result of the aggregation using the +/// KernelState in the KernelContext. +struct ARROW_EXPORT HashAggregateKernel : public Kernel { + HashAggregateKernel() = default; + + HashAggregateKernel(std::shared_ptr sig, KernelInit init, + HashAggregateResize resize, HashAggregateConsume consume, + HashAggregateMerge merge, HashAggregateFinalize finalize, + const bool ordered) + : Kernel(std::move(sig), std::move(init)), + resize(resize), + consume(consume), + merge(merge), + finalize(finalize), + ordered(ordered) {} + + HashAggregateKernel(std::vector in_types, OutputType out_type, + KernelInit init, HashAggregateConsume consume, + HashAggregateResize resize, HashAggregateMerge merge, + HashAggregateFinalize finalize, const bool ordered) + : HashAggregateKernel( + KernelSignature::Make(std::move(in_types), std::move(out_type)), + std::move(init), resize, consume, merge, finalize, ordered) {} + + HashAggregateResize resize; + HashAggregateConsume consume; + HashAggregateMerge merge; + HashAggregateFinalize finalize; + /// @brief whether the summarizer requires ordering + /// This is similar to ScalarAggregateKernel. See ScalarAggregateKernel + /// for detailed doc of this variable. + bool ordered = false; +}; + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..89f32ceb0f906e0d50bf063da22f33c3a856fe5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { + +struct Datum; +struct TypeHolder; + +namespace compute { + +class Function; +class ScalarAggregateFunction; +class FunctionExecutor; +class FunctionOptions; +class FunctionRegistry; + +/// \brief Return the process-global function registry. +// Defined in registry.cc +ARROW_EXPORT FunctionRegistry* GetFunctionRegistry(); + +class CastOptions; + +struct ExecBatch; +class ExecContext; +class KernelContext; + +struct Kernel; +struct ScalarKernel; +struct ScalarAggregateKernel; +struct VectorKernel; + +struct KernelState; + +class Expression; + +ARROW_EXPORT ExecContext* default_exec_context(); +ARROW_EXPORT ExecContext* threaded_exec_context(); + +} // namespace compute +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0e6ba709d974daebf81cf9e6cdb7aa8b947cc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/result.h" + +namespace arrow { + +template +Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out, + UnaryOperation unary_op) { + for (; first != last; ++first, (void)++out) { + ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first)); + } + return Status::OK(); +} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h new file mode 100644 index 0000000000000000000000000000000000000000..71920e49f4aa2b1d92312b4aabaffafe35d323c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h @@ -0,0 +1,221 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" + +namespace arrow { +namespace internal { + +struct BitmapWordAlignParams { + int64_t leading_bits; + int64_t trailing_bits; + int64_t trailing_bit_offset; + const uint8_t* aligned_start; + int64_t aligned_bits; + int64_t aligned_words; +}; + +// Compute parameters for accessing a bitmap using aligned word instructions. +// The returned parameters describe: +// - a leading area of size `leading_bits` before the aligned words +// - a word-aligned area of size `aligned_bits` +// - a trailing area of size `trailing_bits` after the aligned words +template +inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset, + int64_t length) { + static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES), + "ALIGN_IN_BYTES should be a positive power of two"); + constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8; + + BitmapWordAlignParams p; + + // Compute a "bit address" that we can align up to ALIGN_IN_BITS. + // We don't care about losing the upper bits since we are only interested in the + // difference between both addresses. + const uint64_t bit_addr = + reinterpret_cast(data) * 8 + static_cast(bit_offset); + const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS); + + p.leading_bits = std::min(length, aligned_bit_addr - bit_addr); + p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS; + p.aligned_bits = p.aligned_words * ALIGN_IN_BITS; + p.trailing_bits = length - p.leading_bits - p.aligned_bits; + p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits; + + p.aligned_start = data + (bit_offset + p.leading_bits) / 8; + return p; +} +} // namespace internal + +namespace util { + +// Functions to check if the provided Arrow object is aligned by the specified alignment + +/// \brief Special alignment value to use data type-specific alignment +/// +/// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment +/// functions, then the function will ensure each buffer is suitably aligned +/// for the data type of the array. For example, given an int32 buffer the values +/// buffer's address must be a multiple of 4. Given a large_string buffer the offsets +/// buffer's address must be a multiple of 8. +constexpr int64_t kValueAlignment = -3; + +/// \brief Calculate if the buffer's address is a multiple of `alignment` +/// +/// If `alignment` is less than or equal to 0 then this method will always return true +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment); +/// \brief Calculate if all buffers in the array data are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array data to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment); +/// \brief Calculate if all buffers in the array are aligned +/// +/// This will also check the buffers in the dictionary and any children +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment); + +// Following functions require an additional boolean vector which stores the +// alignment check bits of the constituent objects. +// For example, needs_alignment vector for a ChunkedArray will contain the +// check bits of the constituent Arrays. +// The boolean vector check was introduced to minimize the repetitive checks +// of the constituent objects during the EnsureAlignment function where certain +// objects can be ignored for further checking if we already know that they are +// completely aligned. + +/// \brief Calculate which (if any) chunks in a chunked array are unaligned +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the check +/// it must be set to a valid vector. Extra elements will be added to the end +/// of the vector for each chunk that is checked. `true` will be stored if +/// the chunk is unaligned. +/// \param offset the index of the chunk to start checking +/// \return true if all chunks (starting at `offset`) are aligned, false otherwise +ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment, + std::vector* needs_alignment, int offset = 0); + +/// \brief calculate which (if any) columns in a record batch are unaligned +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment, + std::vector* needs_alignment); + +/// \brief calculate which (if any) columns in a table are unaligned +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param needs_alignment an output vector that will store the results of the +/// check. It must be set to a valid vector. Extra elements will be added +/// to the end of the vector for each column that is checked. `true` will be +/// stored if the column is unaligned. +ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment, + std::vector* needs_alignment); + +/// \brief return a buffer that has the given alignment and the same data as the input +/// buffer +/// +/// If the input buffer is already aligned then this method will return the input buffer +/// If the input buffer is not already aligned then this method will allocate a new +/// buffer. The alignment of the new buffer will have at least +/// max(kDefaultBufferAlignment, alignment) bytes of alignment. +/// +/// \param buffer the buffer to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate a new buffer if the +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr buffer, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array data where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array_data the array data to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array_data, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return an array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr array, + int64_t alignment, + MemoryPool* memory_pool); + +/// \brief return a chunked array where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param array the chunked array to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr array, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a record batch where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param batch the batch to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment( + std::shared_ptr batch, int64_t alignment, MemoryPool* memory_pool); + +/// \brief return a table where all buffers are aligned by the given alignment +/// +/// If any input buffer is already aligned then this method will reuse that same input +/// buffer. +/// +/// \param table the table to check +/// \param alignment the alignment (in bytes) to check for +/// \param memory_pool a memory pool that will be used to allocate new buffers if any +/// input buffer is not sufficiently aligned +ARROW_EXPORT Result> EnsureAlignment(std::shared_ptr table, + int64_t alignment, + MemoryPool* memory_pool); + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h new file mode 100644 index 0000000000000000000000000000000000000000..01e3ced2d1f61b8eb3719208c13a5dc4e111e771 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/launder.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +template +class AlignedStorage { + public: + static constexpr bool can_memcpy = std::is_trivial::value; + + constexpr T* get() noexcept { + return arrow::internal::launder(reinterpret_cast(&data_)); + } + + constexpr const T* get() const noexcept { + // Use fully qualified name to avoid ambiguities with MSVC (ARROW-14800) + return arrow::internal::launder(reinterpret_cast(&data_)); + } + + void destroy() noexcept { + if (!std::is_trivially_destructible::value) { + get()->~T(); + } + } + + template + void construct(A&&... args) noexcept { + new (&data_) T(std::forward(args)...); + } + + template + void assign(V&& v) noexcept { + *get() = std::forward(v); + } + + void move_construct(AlignedStorage* other) noexcept { + new (&data_) T(std::move(*other->get())); + } + + void move_assign(AlignedStorage* other) noexcept { *get() = std::move(*other->get()); } + + template + static typename std::enable_if::type move_construct_several( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + memcpy(dest->get(), src->get(), memcpy_length * sizeof(T)); + } + + template + static typename std::enable_if::type + move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + memcpy(dest->get(), src->get(), memcpy_length * sizeof(T)); + } + + template + static typename std::enable_if::type move_construct_several( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + for (size_t i = 0; i < n; ++i) { + new (dest[i].get()) T(std::move(*src[i].get())); + } + } + + template + static typename std::enable_if::type + move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, size_t n, + size_t memcpy_length) noexcept { + for (size_t i = 0; i < n; ++i) { + new (dest[i].get()) T(std::move(*src[i].get())); + src[i].destroy(); + } + } + + static void move_construct_several(AlignedStorage* ARROW_RESTRICT src, + AlignedStorage* ARROW_RESTRICT dest, + size_t n) noexcept { + move_construct_several(src, dest, n, n); + } + + static void move_construct_several_and_destroy_source( + AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, + size_t n) noexcept { + move_construct_several_and_destroy_source(src, dest, n, n); + } + + static void destroy_several(AlignedStorage* p, size_t n) noexcept { + if (!std::is_trivially_destructible::value) { + for (size_t i = 0; i < n; ++i) { + p[i].destroy(); + } + } + } + + private: +#if !defined(__clang__) && defined(__GNUC__) && defined(__i386__) + // Workaround for GCC bug on i386: + // alignof(int64 | float64) can give different results depending on the + // compilation context, leading to internal ABI mismatch manifesting + // in incorrect propagation of Result between + // compilation units. + // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88115) + static constexpr size_t alignment() { + if (std::is_integral_v && sizeof(T) == 8) { + return 4; + } else if (std::is_floating_point_v && sizeof(T) == 8) { + return 4; + } + return alignof(T); + } + + typename std::aligned_storage::type data_; +#else + typename std::aligned_storage::type data_; +#endif +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..f9bcd534567c6c231192cc174a717997583dfb3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h @@ -0,0 +1,2058 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/async_generator_fwd.h" +#include "arrow/util/async_util.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/io_util.h" +#include "arrow/util/iterator.h" +#include "arrow/util/mutex.h" +#include "arrow/util/queue.h" +#include "arrow/util/thread_pool.h" + +namespace arrow { + +// The methods in this file create, modify, and utilize AsyncGenerator which is an +// iterator of futures. This allows an asynchronous source (like file input) to be run +// through a pipeline in the same way that iterators can be used to create pipelined +// workflows. +// +// In order to support pipeline parallelism we introduce the concept of asynchronous +// reentrancy. This is different than synchronous reentrancy. With synchronous code a +// function is reentrant if the function can be called again while a previous call to that +// function is still running. Unless otherwise specified none of these generators are +// synchronously reentrant. Care should be taken to avoid calling them in such a way (and +// the utilities Visit/Collect/Await take care to do this). +// +// Asynchronous reentrancy on the other hand means the function is called again before the +// future returned by the function is marked finished (but after the call to get the +// future returns). Some of these generators are async-reentrant while others (e.g. +// those that depend on ordered processing like decompression) are not. Read the MakeXYZ +// function comments to determine which generators support async reentrancy. +// +// Note: Generators that are not asynchronously reentrant can still support readahead +// (\see MakeSerialReadaheadGenerator). +// +// Readahead operators, and some other operators, may introduce queueing. Any operators +// that introduce buffering should detail the amount of buffering they introduce in their +// MakeXYZ function comments. +// +// A generator should always be fully consumed before it is destroyed. +// A generator should not mark a future complete with an error status or a terminal value +// until all outstanding futures have completed. Generators that spawn multiple +// concurrent futures may need to hold onto an error while other concurrent futures wrap +// up. +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of AsyncGenerator, + /// an empty function indicates the end of iteration. + static AsyncGenerator End() { return AsyncGenerator(); } + + static bool IsEnd(const AsyncGenerator& val) { return !val; } +}; + +template +Future AsyncGeneratorEnd() { + return Future::MakeFinished(IterationTraits::End()); +} + +/// returning a future that completes when all have been visited +template +Future<> VisitAsyncGenerator(AsyncGenerator generator, Visitor visitor) { + struct LoopBody { + struct Callback { + Result> operator()(const T& next) { + if (IsIterationEnd(next)) { + return Break(); + } else { + auto visited = visitor(next); + if (visited.ok()) { + return Continue(); + } else { + return visited; + } + } + } + + Visitor visitor; + }; + + Future> operator()() { + Callback callback{visitor}; + auto next = generator(); + return next.Then(std::move(callback)); + } + + AsyncGenerator generator; + Visitor visitor; + }; + + return Loop(LoopBody{std::move(generator), std::move(visitor)}); +} + +/// \brief Wait for an async generator to complete, discarding results. +template +Future<> DiscardAllFromAsyncGenerator(AsyncGenerator generator) { + std::function visitor = [](const T&) { return Status::OK(); }; + return VisitAsyncGenerator(generator, visitor); +} + +/// \brief Collect the results of an async generator into a vector +template +Future> CollectAsyncGenerator(AsyncGenerator generator) { + auto vec = std::make_shared>(); + auto loop_body = [generator = std::move(generator), + vec = std::move(vec)]() -> Future>> { + auto next = generator(); + return next.Then([vec](const T& result) -> Result>> { + if (IsIterationEnd(result)) { + return Break(*vec); + } else { + vec->push_back(result); + return Continue(); + } + }); + }; + return Loop(std::move(loop_body)); +} + +/// \see MakeMappedGenerator +template +class MappingGenerator { + public: + MappingGenerator(AsyncGenerator source, std::function(const T&)> map) + : state_(std::make_shared(std::move(source), std::move(map))) {} + + Future operator()() { + auto future = Future::Make(); + bool should_trigger; + { + auto guard = state_->mutex.Lock(); + if (state_->finished) { + return AsyncGeneratorEnd(); + } + should_trigger = state_->waiting_jobs.empty(); + state_->waiting_jobs.push_back(future); + } + if (should_trigger) { + state_->source().AddCallback(Callback{state_}); + } + return future; + } + + private: + struct State { + State(AsyncGenerator source, std::function(const T&)> map) + : source(std::move(source)), + map(std::move(map)), + waiting_jobs(), + mutex(), + finished(false) {} + + void Purge() { + // This might be called by an original callback (if the source iterator fails or + // ends) or by a mapped callback (if the map function fails or ends prematurely). + // Either way it should only be called once and after finished is set so there is no + // need to guard access to `waiting_jobs`. + while (!waiting_jobs.empty()) { + waiting_jobs.front().MarkFinished(IterationTraits::End()); + waiting_jobs.pop_front(); + } + } + + AsyncGenerator source; + std::function(const T&)> map; + std::deque> waiting_jobs; + util::Mutex mutex; + bool finished; + }; + + struct Callback; + + struct MappedCallback { + void operator()(const Result& maybe_next) { + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + if (end) { + { + auto guard = state->mutex.Lock(); + should_purge = !state->finished; + state->finished = true; + } + } + sink.MarkFinished(maybe_next); + if (should_purge) { + state->Purge(); + } + } + std::shared_ptr state; + Future sink; + }; + + struct Callback { + void operator()(const Result& maybe_next) { + Future sink; + bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next); + bool should_purge = false; + bool should_trigger; + { + auto guard = state->mutex.Lock(); + // A MappedCallback may have purged or be purging the queue; + // we shouldn't do anything here. + if (state->finished) return; + if (end) { + should_purge = !state->finished; + state->finished = true; + } + sink = state->waiting_jobs.front(); + state->waiting_jobs.pop_front(); + should_trigger = !end && !state->waiting_jobs.empty(); + } + if (should_purge) { + state->Purge(); + } + if (should_trigger) { + state->source().AddCallback(Callback{state}); + } + if (maybe_next.ok()) { + const T& val = maybe_next.ValueUnsafe(); + if (IsIterationEnd(val)) { + sink.MarkFinished(IterationTraits::End()); + } else { + Future mapped_fut = state->map(val); + mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)}); + } + } else { + sink.MarkFinished(maybe_next.status()); + } + } + + std::shared_ptr state; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that will apply the map function to each element of +/// source. The map function is not called on the end token. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeMappedGenerator(AsyncGenerator source_generator, MapFn map) { + auto map_callback = [map = std::move(map)](const T& val) mutable -> Future { + return ToFuture(map(val)); + }; + return MappingGenerator(std::move(source_generator), std::move(map_callback)); +} + +/// \brief Create a generator that will apply the map function to +/// each element of source. The map function is not called on the end +/// token. The result of the map function should be another +/// generator; all these generators will then be flattened to produce +/// a single stream of items. +/// +/// Note: This function makes a copy of `map` for each item +/// Note: Errors returned from the `map` function will be propagated +/// +/// If the source generator is async-reentrant then this generator will be also +template , + typename V = typename EnsureFuture::type::ValueType> +AsyncGenerator MakeFlatMappedGenerator(AsyncGenerator source_generator, MapFn map) { + return MakeConcatenatedGenerator( + MakeMappedGenerator(std::move(source_generator), std::move(map))); +} + +/// \see MakeSequencingGenerator +template +class SequencingGenerator { + public: + SequencingGenerator(AsyncGenerator source, ComesAfter compare, IsNext is_next, + T initial_value) + : state_(std::make_shared(std::move(source), std::move(compare), + std::move(is_next), std::move(initial_value))) {} + + Future operator()() { + { + auto guard = state_->mutex.Lock(); + // We can send a result immediately if the top of the queue is either an + // error or the next item + if (!state_->queue.empty() && + (!state_->queue.top().ok() || + state_->is_next(state_->previous_value, *state_->queue.top()))) { + auto result = std::move(state_->queue.top()); + if (result.ok()) { + state_->previous_value = *result; + } + state_->queue.pop(); + return Future::MakeFinished(result); + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + // The next item is not in the queue so we will need to wait + auto new_waiting_fut = Future::Make(); + state_->waiting_future = new_waiting_fut; + guard.Unlock(); + state_->source().AddCallback(Callback{state_}); + return new_waiting_fut; + } + } + + private: + struct WrappedComesAfter { + bool operator()(const Result& left, const Result& right) { + if (!left.ok() || !right.ok()) { + // Should never happen + return false; + } + return compare(*left, *right); + } + ComesAfter compare; + }; + + struct State { + State(AsyncGenerator source, ComesAfter compare, IsNext is_next, T initial_value) + : source(std::move(source)), + is_next(std::move(is_next)), + previous_value(std::move(initial_value)), + waiting_future(), + queue(WrappedComesAfter{compare}), + finished(false), + mutex() {} + + AsyncGenerator source; + IsNext is_next; + T previous_value; + Future waiting_future; + std::priority_queue, std::vector>, WrappedComesAfter> queue; + bool finished; + util::Mutex mutex; + }; + + class Callback { + public: + explicit Callback(std::shared_ptr state) : state_(std::move(state)) {} + + void operator()(const Result result) { + Future to_deliver; + bool finished; + { + auto guard = state_->mutex.Lock(); + bool ready_to_deliver = false; + if (!result.ok()) { + // Clear any cached results + while (!state_->queue.empty()) { + state_->queue.pop(); + } + ready_to_deliver = true; + state_->finished = true; + } else if (IsIterationEnd(result.ValueUnsafe())) { + ready_to_deliver = state_->queue.empty(); + state_->finished = true; + } else { + ready_to_deliver = state_->is_next(state_->previous_value, *result); + } + + if (ready_to_deliver && state_->waiting_future.is_valid()) { + to_deliver = state_->waiting_future; + if (result.ok()) { + state_->previous_value = *result; + } + } else { + state_->queue.push(result); + } + // Capture state_->finished so we can access it outside the mutex + finished = state_->finished; + } + // Must deliver result outside of the mutex + if (to_deliver.is_valid()) { + to_deliver.MarkFinished(result); + } else { + // Otherwise, if we didn't get the next item (or a terminal item), we + // need to keep looking + if (!finished) { + state_->source().AddCallback(Callback{state_}); + } + } + } + + private: + const std::shared_ptr state_; + }; + + const std::shared_ptr state_; +}; + +/// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter +/// and IsNext determine the sequence order. +/// +/// ComesAfter should be a BinaryPredicate that only returns true if a comes after b +/// +/// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if +/// `b` follows immediately after `a`. It should return true given `initial_value` and +/// `b` if `b` is the first item in the sequence. +/// +/// This operator will queue unboundedly while waiting for the next item. It is intended +/// for jittery sources that might scatter an ordered sequence. It is NOT intended to +/// sort. Using it to try and sort could result in excessive RAM usage. This generator +/// will queue up to N blocks where N is the max "out of order"ness of the source. +/// +/// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3 +/// blocks beyond where it belongs. +/// +/// This generator is not async-reentrant but it consists only of a simple log(n) +/// insertion into a priority queue. +template +AsyncGenerator MakeSequencingGenerator(AsyncGenerator source_generator, + ComesAfter compare, IsNext is_next, + T initial_value) { + return SequencingGenerator( + std::move(source_generator), std::move(compare), std::move(is_next), + std::move(initial_value)); +} + +/// \see MakeTransformedGenerator +template +class TransformingGenerator { + // The transforming generator state will be referenced as an async generator but will + // also be referenced via callback to various futures. If the async generator owner + // moves it around we need the state to be consistent for future callbacks. + struct TransformingGeneratorState + : std::enable_shared_from_this { + TransformingGeneratorState(AsyncGenerator generator, Transformer transformer) + : generator_(std::move(generator)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Future operator()() { + while (true) { + auto maybe_next_result = Pump(); + if (!maybe_next_result.ok()) { + return Future::MakeFinished(maybe_next_result.status()); + } + auto maybe_next = std::move(maybe_next_result).ValueUnsafe(); + if (maybe_next.has_value()) { + return Future::MakeFinished(*std::move(maybe_next)); + } + + auto next_fut = generator_(); + // If finished already, process results immediately inside the loop to avoid + // stack overflow + if (next_fut.is_finished()) { + auto next_result = next_fut.result(); + if (next_result.ok()) { + last_value_ = *next_result; + } else { + return Future::MakeFinished(next_result.status()); + } + // Otherwise, if not finished immediately, add callback to process results + } else { + auto self = this->shared_from_this(); + return next_fut.Then([self](const T& next_result) { + self->last_value_ = next_result; + return (*self)(); + }); + } + } + } + + // See comment on TransformingIterator::Pump + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + ARROW_ASSIGN_OR_RAISE(TransformFlow next, transformer_(*last_value_)); + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + AsyncGenerator generator_; + Transformer transformer_; + std::optional last_value_; + bool finished_; + }; + + public: + explicit TransformingGenerator(AsyncGenerator generator, + Transformer transformer) + : state_(std::make_shared(std::move(generator), + std::move(transformer))) {} + + Future operator()() { return (*state_)(); } + + protected: + std::shared_ptr state_; +}; + +/// \brief Transform an async generator using a transformer function returning a new +/// AsyncGenerator +/// +/// The transform function here behaves exactly the same as the transform function in +/// MakeTransformedIterator and you can safely use the same transform function to +/// transform both synchronous and asynchronous streams. +/// +/// This generator is not async-reentrant +/// +/// This generator may queue up to 1 instance of T but will not delay +template +AsyncGenerator MakeTransformedGenerator(AsyncGenerator generator, + Transformer transformer) { + return TransformingGenerator(generator, transformer); +} + +/// \see MakeSerialReadaheadGenerator +template +class SerialReadaheadGenerator { + public: + SerialReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future operator()() { + if (state_->first_) { + // Lazy generator, need to wait for the first ask to prime the pump + state_->first_ = false; + auto next = state_->source_(); + return next.Then(Callback{state_}, ErrCallback{state_}); + } + + // This generator is not async-reentrant. We won't be called until the last + // future finished so we know there is something in the queue + auto finished = state_->finished_.load(); + if (finished && state_->readahead_queue_.IsEmpty()) { + return AsyncGeneratorEnd(); + } + + std::shared_ptr> next; + if (!state_->readahead_queue_.Read(next)) { + return Status::UnknownError("Could not read from readahead_queue"); + } + + auto last_available = state_->spaces_available_.fetch_add(1); + if (last_available == 0 && !finished) { + // Reader idled out, we need to restart it + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return *next; + } + + private: + struct State { + State(AsyncGenerator source, int max_readahead) + : first_(true), + source_(std::move(source)), + finished_(false), + // There is one extra "space" for the in-flight request + spaces_available_(max_readahead + 1), + // The SPSC queue has size-1 "usable" slots so we need to overallocate 1 + readahead_queue_(max_readahead + 1) {} + + Status Pump(const std::shared_ptr& self) { + // Can't do readahead_queue.write(source().Then(...)) because then the + // callback might run immediately and add itself to the queue before this gets added + // to the queue messing up the order. + auto next_slot = std::make_shared>(); + auto written = readahead_queue_.Write(next_slot); + if (!written) { + return Status::UnknownError("Could not write to readahead_queue"); + } + // If this Pump is being called from a callback it is possible for the source to + // poll and read from the queue between the Write and this spot where we fill the + // value in. However, it is not possible for the future to read this value we are + // writing. That is because this callback (the callback for future X) must be + // finished before future X is marked complete and this source is not pulled + // reentrantly so it will not poll for future X+1 until this callback has completed. + *next_slot = source_().Then(Callback{self}, ErrCallback{self}); + return Status::OK(); + } + + // Only accessed by the consumer end + bool first_; + // Accessed by both threads + AsyncGenerator source_; + std::atomic finished_; + // The queue has a size but it is not atomic. We keep track of how many spaces are + // left in the queue here so we know if we've just written the last value and we need + // to stop reading ahead or if we've just read from a full queue and we need to + // restart reading ahead + std::atomic spaces_available_; + // Needs to be a queue of shared_ptr and not Future because we set the value of the + // future after we add it to the queue + util::SpscQueue>> readahead_queue_; + }; + + struct Callback { + Result operator()(const T& next) { + if (IsIterationEnd(next)) { + state_->finished_.store(true); + return next; + } + auto last_available = state_->spaces_available_.fetch_sub(1); + if (last_available > 1) { + ARROW_RETURN_NOT_OK(state_->Pump(state_)); + } + return next; + } + + std::shared_ptr state_; + }; + + struct ErrCallback { + Result operator()(const Status& st) { + state_->finished_.store(true); + return st; + } + + std::shared_ptr state_; + }; + + std::shared_ptr state_; +}; + +/// \see MakeFromFuture +template +class FutureFirstGenerator { + public: + explicit FutureFirstGenerator(Future> future) + : state_(std::make_shared(std::move(future))) {} + + Future operator()() { + if (state_->source_) { + return state_->source_(); + } else { + auto state = state_; + return state_->future_.Then([state](const AsyncGenerator& source) { + state->source_ = source; + return state->source_(); + }); + } + } + + private: + struct State { + explicit State(Future> future) : future_(future), source_() {} + + Future> future_; + AsyncGenerator source_; + }; + + std::shared_ptr state_; +}; + +/// \brief Transform a Future> into an AsyncGenerator +/// that waits for the future to complete as part of the first item. +/// +/// This generator is not async-reentrant (even if the generator yielded by future is) +/// +/// This generator does not queue +template +AsyncGenerator MakeFromFuture(Future> future) { + return FutureFirstGenerator(std::move(future)); +} + +/// \brief Create a generator that will pull from the source into a queue. Unlike +/// MakeReadaheadGenerator this will not pull reentrantly from the source. +/// +/// The source generator does not need to be async-reentrant +/// +/// This generator is not async-reentrant (even if the source is) +/// +/// This generator may queue up to max_readahead additional instances of T +template +AsyncGenerator MakeSerialReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return SerialReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Create a generator that immediately pulls from the source +/// +/// Typical generators do not pull from their source until they themselves +/// are pulled. This generator does not follow that convention and will call +/// generator() once before it returns. The returned generator will otherwise +/// mirror the source. +/// +/// This generator forwards async-reentrant pressure to the source +/// This generator buffers one item (the first result) until it is delivered. +template +AsyncGenerator MakeAutoStartingGenerator(AsyncGenerator generator) { + struct AutostartGenerator { + Future operator()() { + if (first_future->is_valid()) { + Future result = *first_future; + *first_future = Future(); + return result; + } + return source(); + } + + std::shared_ptr> first_future; + AsyncGenerator source; + }; + + std::shared_ptr> first_future = std::make_shared>(generator()); + return AutostartGenerator{std::move(first_future), std::move(generator)}; +} + +/// \see MakeReadaheadGenerator +template +class ReadaheadGenerator { + public: + ReadaheadGenerator(AsyncGenerator source_generator, int max_readahead) + : state_(std::make_shared(std::move(source_generator), max_readahead)) {} + + Future AddMarkFinishedContinuation(Future fut) { + auto state = state_; + return fut.Then( + [state](const T& result) -> Future { + state->MarkFinishedIfDone(result); + if (state->finished.load()) { + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + } else { + state->num_running.fetch_sub(1); + } + return result; + }, + [state](const Status& err) -> Future { + // If there is an error we need to make sure all running + // tasks finish before we return the error. + state->finished.store(true); + if (state->num_running.fetch_sub(1) == 1) { + state->final_future.MarkFinished(); + } + return state->final_future.Then([err]() -> Result { return err; }); + }); + } + + Future operator()() { + if (state_->readahead_queue.empty()) { + // This is the first request, let's pump the underlying queue + state_->num_running.store(state_->max_readahead); + for (int i = 0; i < state_->max_readahead; i++) { + auto next = state_->source_generator(); + auto next_after_check = AddMarkFinishedContinuation(std::move(next)); + state_->readahead_queue.push(std::move(next_after_check)); + } + } + // Pop one and add one + auto result = state_->readahead_queue.front(); + state_->readahead_queue.pop(); + if (state_->finished.load()) { + state_->readahead_queue.push(AsyncGeneratorEnd()); + } else { + state_->num_running.fetch_add(1); + auto back_of_queue = state_->source_generator(); + auto back_of_queue_after_check = + AddMarkFinishedContinuation(std::move(back_of_queue)); + state_->readahead_queue.push(std::move(back_of_queue_after_check)); + } + return result; + } + + private: + struct State { + State(AsyncGenerator source_generator, int max_readahead) + : source_generator(std::move(source_generator)), max_readahead(max_readahead) {} + + void MarkFinishedIfDone(const T& next_result) { + if (IsIterationEnd(next_result)) { + finished.store(true); + } + } + + AsyncGenerator source_generator; + int max_readahead; + Future<> final_future = Future<>::Make(); + std::atomic num_running{0}; + std::atomic finished{false}; + std::queue> readahead_queue; + }; + + std::shared_ptr state_; +}; + +/// \brief A generator where the producer pushes items on a queue. +/// +/// No back-pressure is applied, so this generator is mostly useful when +/// producing the values is neither CPU- nor memory-expensive (e.g. fetching +/// filesystem metadata). +/// +/// This generator is not async-reentrant. +template +class PushGenerator { + struct State { + State() {} + + util::Mutex mutex; + std::deque> result_q; + std::optional> consumer_fut; + bool finished = false; + }; + + public: + /// Producer API for PushGenerator + class Producer { + public: + explicit Producer(const std::shared_ptr& state) : weak_state_(state) {} + + /// \brief Push a value on the queue + /// + /// True is returned if the value was pushed, false if the generator is + /// already closed or destroyed. If the latter, it is recommended to stop + /// producing any further values. + bool Push(Result result) { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Closed early + return false; + } + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(std::move(result)); + } else { + state->result_q.push_back(std::move(result)); + } + return true; + } + + /// \brief Tell the consumer we have finished producing + /// + /// It is allowed to call this and later call Push() again ("early close"). + /// In this case, calls to Push() after the queue is closed are silently + /// ignored. This can help implementing non-trivial cancellation cases. + /// + /// True is returned on success, false if the generator is already closed + /// or destroyed. + bool Close() { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return false; + } + auto lock = state->mutex.Lock(); + if (state->finished) { + // Already closed + return false; + } + state->finished = true; + if (state->consumer_fut.has_value()) { + auto fut = std::move(state->consumer_fut.value()); + state->consumer_fut.reset(); + lock.Unlock(); // unlock before potentially invoking a callback + fut.MarkFinished(IterationTraits::End()); + } + return true; + } + + /// Return whether the generator was closed or destroyed. + bool is_closed() const { + auto state = weak_state_.lock(); + if (!state) { + // Generator was destroyed + return true; + } + auto lock = state->mutex.Lock(); + return state->finished; + } + + private: + const std::weak_ptr weak_state_; + }; + + PushGenerator() : state_(std::make_shared()) {} + + /// Read an item from the queue + Future operator()() const { + auto lock = state_->mutex.Lock(); + assert(!state_->consumer_fut.has_value()); // Non-reentrant + if (!state_->result_q.empty()) { + auto fut = Future::MakeFinished(std::move(state_->result_q.front())); + state_->result_q.pop_front(); + return fut; + } + if (state_->finished) { + return AsyncGeneratorEnd(); + } + auto fut = Future::Make(); + state_->consumer_fut = fut; + return fut; + } + + /// \brief Return producer-side interface + /// + /// The returned object must be used by the producer to push values on the queue. + /// Only a single Producer object should be instantiated. + Producer producer() { return Producer{state_}; } + + private: + const std::shared_ptr state_; +}; + +/// \brief Create a generator that pulls reentrantly from a source +/// This generator will pull reentrantly from a source, ensuring that max_readahead +/// requests are active at any given time. +/// +/// The source generator must be async-reentrant +/// +/// This generator itself is async-reentrant. +/// +/// This generator may queue up to max_readahead instances of T +template +AsyncGenerator MakeReadaheadGenerator(AsyncGenerator source_generator, + int max_readahead) { + return ReadaheadGenerator(std::move(source_generator), max_readahead); +} + +/// \brief Creates a generator that will yield finished futures from a vector +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeVectorGenerator(std::vector vec) { + struct State { + explicit State(std::vector vec_) : vec(std::move(vec_)), vec_idx(0) {} + + std::vector vec; + std::atomic vec_idx; + }; + + auto state = std::make_shared(std::move(vec)); + return [state]() { + auto idx = state->vec_idx.fetch_add(1); + if (idx >= state->vec.size()) { + // Eagerly return memory + state->vec.clear(); + return AsyncGeneratorEnd(); + } + return Future::MakeFinished(state->vec[idx]); + }; +} + +/// \see MakeMergedGenerator +template +class MergedGenerator { + // Note, the implementation of this class is quite complex at the moment (PRs to + // simplify are always welcome) + // + // Terminology is borrowed from rxjs. This is a pull based implementation of the + // mergeAll operator. The "outer subscription" refers to the async + // generator that the caller provided when creating this. The outer subscription + // yields generators. + // + // Each of these generators is then subscribed to (up to max_subscriptions) and these + // are referred to as "inner subscriptions". + // + // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For + // each inner subscription we will cache up to 1 value. This means we may have more + // values than we have been asked for. In our example, if a caller asks for one record + // batch we will start scanning `max_subscriptions` different files. For each file we + // will only queue up to 1 batch (so a separate readahead is needed on the file if batch + // readahead is desired). + // + // If the caller is slow we may accumulate ready-to-deliver items. These are stored + // in `delivered_jobs`. + // + // If the caller is very quick we may accumulate requests. These are stored in + // `waiting_jobs`. + // + // It may be helpful to consider an example, in the scanner the outer subscription + // is some kind of asynchronous directory listing. The inner subscription is + // then a scan on a file yielded by the directory listing. + // + // An "outstanding" request is when we have polled either the inner or outer + // subscription but that future hasn't completed yet. + // + // There are three possible "events" that can happen. + // * A caller could request the next future + // * An outer callback occurs when the next subscription is ready (e.g. the directory + // listing has produced a new file) + // * An inner callback occurs when one of the inner subscriptions emits a value (e.g. + // a file scan emits a record batch) + // + // Any time an event happens the logic is broken into two phases. First, we grab the + // lock and modify the shared state. While doing this we figure out what callbacks we + // will need to execute. Then, we give up the lock and execute these callbacks. It is + // important to execute these callbacks without the lock to avoid deadlock. + public: + explicit MergedGenerator(AsyncGenerator> source, + int max_subscriptions) + : state_(std::make_shared(std::move(source), max_subscriptions)) {} + + Future operator()() { + // A caller has requested a future + Future waiting_future; + std::shared_ptr delivered_job; + bool mark_generator_complete = false; + { + auto guard = state_->mutex.Lock(); + if (!state_->delivered_jobs.empty()) { + // If we have a job sitting around we can deliver it + delivered_job = std::move(state_->delivered_jobs.front()); + state_->delivered_jobs.pop_front(); + if (state_->IsCompleteUnlocked(guard)) { + // It's possible this waiting job was the only thing left to handle and + // we have now completed the generator. + mark_generator_complete = true; + } else { + // Since we had a job sitting around we also had an inner subscription + // that had paused. We are going to restart this inner subscription and + // so there will be a new outstanding request. + state_->outstanding_requests++; + } + } else if (state_->broken || + (!state_->first && state_->num_running_subscriptions == 0)) { + // If we are broken or exhausted then prepare a terminal item but + // we won't complete it until we've finished. + Result end_res = IterationEnd(); + if (!state_->final_error.ok()) { + end_res = state_->final_error; + state_->final_error = Status::OK(); + } + return state_->all_finished.Then([end_res]() -> Result { return end_res; }); + } else { + // Otherwise we just queue the request and it will be completed when one of the + // ongoing inner subscriptions delivers a result + waiting_future = Future::Make(); + state_->waiting_jobs.push_back(std::make_shared>(waiting_future)); + } + if (state_->first) { + // On the first request we are going to try and immediately fill our queue + // of subscriptions. We assume we are going to be able to start them all. + state_->outstanding_requests += + static_cast(state_->active_subscriptions.size()); + state_->num_running_subscriptions += + static_cast(state_->active_subscriptions.size()); + } + } + // If we grabbed a finished item from the delivered_jobs queue then we may need + // to mark the generator finished or issue a request for a new item to fill in + // the spot we just vacated. Notice that we issue that request to the same + // subscription that delivered it (deliverer). + if (delivered_job) { + if (mark_generator_complete) { + state_->all_finished.MarkFinished(); + } else { + delivered_job->deliverer().AddCallback( + InnerCallback(state_, delivered_job->index)); + } + return std::move(delivered_job->value); + } + // On the first call we try and fill up our subscriptions. It's possible the outer + // generator only has a few items and we can't fill up to what we were hoping. In + // that case we have to bail early. + if (state_->first) { + state_->first = false; + mark_generator_complete = false; + for (int i = 0; i < static_cast(state_->active_subscriptions.size()); i++) { + state_->PullSource().AddCallback( + OuterCallback{state_, static_cast(i)}); + // If we have to bail early then we need to update the shared state again so + // we need to reacquire the lock. + auto guard = state_->mutex.Lock(); + if (state_->source_exhausted) { + int excess_requests = + static_cast(state_->active_subscriptions.size()) - i - 1; + state_->outstanding_requests -= excess_requests; + state_->num_running_subscriptions -= excess_requests; + if (excess_requests > 0) { + // It's possible that we are completing the generator by reducing the number + // of outstanding requests (e.g. this happens when the outer subscription and + // all inner subscriptions are synchronous) + mark_generator_complete = state_->IsCompleteUnlocked(guard); + } + break; + } + } + if (mark_generator_complete) { + state_->MarkFinishedAndPurge(); + } + } + return waiting_future; + } + + private: + struct DeliveredJob { + explicit DeliveredJob(AsyncGenerator deliverer_, Result value_, + std::size_t index_) + : deliverer(deliverer_), value(std::move(value_)), index(index_) {} + + // The generator that delivered this result, we will request another item + // from this generator once the result is delivered + AsyncGenerator deliverer; + // The result we received from the generator + Result value; + // The index of the generator (in active_subscriptions) that delivered this + // result. This is used if we need to replace a finished generator. + std::size_t index; + }; + + struct State { + State(AsyncGenerator> source, int max_subscriptions) + : source(std::move(source)), + active_subscriptions(max_subscriptions), + delivered_jobs(), + waiting_jobs(), + mutex(), + first(true), + broken(false), + source_exhausted(false), + outstanding_requests(0), + num_running_subscriptions(0), + final_error(Status::OK()) {} + + Future> PullSource() { + // Need to guard access to source() so we don't pull sync-reentrantly which + // is never valid. + auto lock = mutex.Lock(); + return source(); + } + + void SignalErrorUnlocked(const util::Mutex::Guard& guard) { + broken = true; + // Empty any results that have arrived but not asked for. + while (!delivered_jobs.empty()) { + delivered_jobs.pop_front(); + } + } + + // This function is called outside the mutex but it will only ever be + // called once + void MarkFinishedAndPurge() { + all_finished.MarkFinished(); + while (!waiting_jobs.empty()) { + waiting_jobs.front()->MarkFinished(IterationEnd()); + waiting_jobs.pop_front(); + } + } + + // This is called outside the mutex but it is only ever called + // once and Future<>::AddCallback is thread-safe + void MarkFinalError(const Status& err, Future maybe_sink) { + if (maybe_sink.is_valid()) { + // Someone is waiting for this error so lets mark it complete when + // all the work is done + all_finished.AddCallback([maybe_sink, err](const Status& status) mutable { + maybe_sink.MarkFinished(err); + }); + } else { + // No one is waiting for this error right now so it will be delivered + // next. + final_error = err; + } + } + + bool IsCompleteUnlocked(const util::Mutex::Guard& guard) { + return outstanding_requests == 0 && + (broken || (source_exhausted && num_running_subscriptions == 0 && + delivered_jobs.empty())); + } + + bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) { + --outstanding_requests; + return IsCompleteUnlocked(guard); + } + + // The outer generator. Each item we pull from this will be its own generator + // and become an inner subscription + AsyncGenerator> source; + // active_subscriptions and delivered_jobs will be bounded by max_subscriptions + std::vector> active_subscriptions; + // Results delivered by the inner subscriptions that weren't yet asked for by the + // caller + std::deque> delivered_jobs; + // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the + // backpressure + std::deque>> waiting_jobs; + // A future that will be marked complete when the terminal item has arrived and all + // outstanding futures have completed. It is used to hold off emission of an error + // until all outstanding work is done. + Future<> all_finished = Future<>::Make(); + util::Mutex mutex; + // A flag cleared when the caller firsts asks for a future. Used to start polling. + bool first; + // A flag set when an error arrives, prevents us from issuing new requests. + bool broken; + // A flag set when the outer subscription has been exhausted. Prevents us from + // pulling it further (even though it would be generally harmless) and lets us know we + // are finishing up. + bool source_exhausted; + // The number of futures that we have requested from either the outer or inner + // subscriptions that have not yet completed. We cannot mark all_finished until this + // reaches 0. This will never be greater than max_subscriptions + int outstanding_requests; + // The number of running subscriptions. We ramp this up to `max_subscriptions` as + // soon as the first item is requested and then it stays at that level (each exhausted + // inner subscription is replaced by a new inner subscription) until the outer + // subscription is exhausted at which point this descends to 0 (and source_exhausted) + // is then set to true. + int num_running_subscriptions; + // If an error arrives, and the caller hasn't asked for that item, we store the error + // here. It is analagous to delivered_jobs but for errors instead of finished + // results. + Status final_error; + }; + + struct InnerCallback { + InnerCallback(std::shared_ptr state, std::size_t index, bool recursive = false) + : state(std::move(state)), index(index), recursive(recursive) {} + + void operator()(const Result& maybe_next_ref) { + // An item has been delivered by one of the inner subscriptions + Future next_fut; + const Result* maybe_next = &maybe_next_ref; + + // When an item is delivered (and the caller has asked for it) we grab the + // next item from the inner subscription. To avoid this behavior leading to an + // infinite loop (this can happen if the caller's callback asks for the next item) + // we use a while loop. + while (true) { + Future sink; + bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next); + bool pull_next_sub = false; + bool was_broken = false; + bool should_mark_gen_complete = false; + bool should_mark_final_error = false; + { + auto guard = state->mutex.Lock(); + if (state->broken) { + // We've errored out previously so ignore the result. If anyone was waiting + // for this they will get IterationEnd when we purge + was_broken = true; + } else { + if (!sub_finished) { + // There is a result to deliver. Either we can deliver it now or we will + // queue it up + if (state->waiting_jobs.empty()) { + state->delivered_jobs.push_back(std::make_shared( + state->active_subscriptions[index], *maybe_next, index)); + } else { + sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + + // If this is the first error then we transition the state to a broken state + if (!maybe_next->ok()) { + should_mark_final_error = true; + state->SignalErrorUnlocked(guard); + } + } + + // If we finished this inner subscription then we need to grab a new inner + // subscription to take its spot. If we can't (because we're broken or + // exhausted) then we aren't going to be starting any new futures and so + // the number of running subscriptions drops. + pull_next_sub = sub_finished && !state->source_exhausted && !was_broken; + if (sub_finished && !pull_next_sub) { + state->num_running_subscriptions--; + } + // There are three situations we won't pull again. If an error occurred or we + // are already finished or if no one was waiting for our result and so we queued + // it up. We will decrement outstanding_requests and possibly mark the + // generator completed. + if (state->broken || (!sink.is_valid() && !sub_finished) || + (sub_finished && state->source_exhausted)) { + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } + } + + // Now we have given up the lock and we can take all the actions we decided we + // need to take. + if (should_mark_final_error) { + state->MarkFinalError(maybe_next->status(), std::move(sink)); + } + + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + + // An error occurred elsewhere so there is no need to mark any future + // finished (will happen during the purge) or pull from anything + if (was_broken) { + return; + } + + if (pull_next_sub) { + if (recursive) { + was_empty = true; + return; + } + // We pulled an end token so we need to start a new subscription + // in our spot + state->PullSource().AddCallback(OuterCallback{state, index}); + } else if (sink.is_valid()) { + // We pulled a valid result and there was someone waiting for it + // so lets fetch the next result from our subscription + sink.MarkFinished(*maybe_next); + next_fut = state->active_subscriptions[index](); + if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) { + return; + } + // Already completed. Avoid very deep recursion by looping + // here instead of relying on the callback. + maybe_next = &next_fut.result(); + continue; + } + // else: We pulled a valid result but no one was waiting for it so + // we can just stop. + return; + } + } + std::shared_ptr state; + std::size_t index; + bool recursive; + bool was_empty = false; + }; + + struct OuterCallback { + void operator()(const Result>& initial_maybe_next) { + Result> maybe_next = initial_maybe_next; + while (true) { + // We have been given a new inner subscription + bool should_continue = false; + bool should_mark_gen_complete = false; + bool should_deliver_error = false; + bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next); + Future error_sink; + { + auto guard = state->mutex.Lock(); + if (!maybe_next.ok() || source_exhausted || state->broken) { + // If here then we will not pull any more from the outer source + if (!state->broken && !maybe_next.ok()) { + state->SignalErrorUnlocked(guard); + // If here then we are the first error so we need to deliver it + should_deliver_error = true; + if (!state->waiting_jobs.empty()) { + error_sink = std::move(*state->waiting_jobs.front()); + state->waiting_jobs.pop_front(); + } + } + if (source_exhausted) { + state->source_exhausted = true; + state->num_running_subscriptions--; + } + if (state->MarkTaskFinishedUnlocked(guard)) { + should_mark_gen_complete = true; + } + } else { + state->active_subscriptions[index] = *maybe_next; + should_continue = true; + } + } + if (should_deliver_error) { + state->MarkFinalError(maybe_next.status(), std::move(error_sink)); + } + if (should_mark_gen_complete) { + state->MarkFinishedAndPurge(); + } + if (should_continue) { + // There is a possibility that a large sequence of immediately available inner + // callbacks could lead to a stack overflow. To avoid this we need to + // synchronously loop through inner/outer callbacks until we either find an + // unfinished future or we find an actual item to deliver. + Future next_item = (*maybe_next)(); + if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) { + // By setting recursive to true we signal to the inner callback that, if it is + // empty, instead of adding a new outer callback, it should just immediately + // return, flagging was_empty so that we know we need to check the next + // subscription. + InnerCallback immediate_inner(state, index, /*recursive=*/true); + immediate_inner(next_item.result()); + if (immediate_inner.was_empty) { + Future> next_source = state->PullSource(); + if (next_source.TryAddCallback([this] { + return OuterCallback{state, index}; + })) { + // We hit an unfinished future so we can stop looping + return; + } + // The current subscription was immediately and synchronously empty + // and we were able to synchronously pull the next subscription so we + // can keep looping. + maybe_next = next_source.result(); + continue; + } + } + } + return; + } + } + std::shared_ptr state; + std::size_t index; + }; + + std::shared_ptr state_; +}; + +/// \brief Create a generator that takes in a stream of generators and pulls from up to +/// max_subscriptions at a time +/// +/// Note: This may deliver items out of sequence. For example, items from the third +/// AsyncGenerator generated by the source may be emitted before some items from the first +/// AsyncGenerator generated by the source. +/// +/// This generator will pull from source async-reentrantly unless max_subscriptions is 1 +/// This generator will not pull from the individual subscriptions reentrantly. Add +/// readahead to the individual subscriptions if that is desired. +/// This generator is async-reentrant +/// +/// This generator may queue up to max_subscriptions instances of T +template +AsyncGenerator MakeMergedGenerator(AsyncGenerator> source, + int max_subscriptions) { + return MergedGenerator(std::move(source), max_subscriptions); +} + +template +Result> MakeSequencedMergedGenerator( + AsyncGenerator> source, int max_subscriptions) { + if (max_subscriptions < 0) { + return Status::Invalid("max_subscriptions must be a positive integer"); + } + if (max_subscriptions == 1) { + return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1"); + } + AsyncGenerator> autostarting_source = MakeMappedGenerator( + std::move(source), + [](const AsyncGenerator& sub) { return MakeAutoStartingGenerator(sub); }); + AsyncGenerator> sub_readahead = + MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1); + return MakeConcatenatedGenerator(std::move(sub_readahead)); +} + +/// \brief Create a generator that takes in a stream of generators and pulls from each +/// one in sequence. +/// +/// This generator is async-reentrant but will never pull from source reentrantly and +/// will never pull from any subscription reentrantly. +/// +/// This generator may queue 1 instance of T +/// +/// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that +/// forwards async-reentrant requests instead of buffering them (which is what +/// MergedGenerator does) +template +AsyncGenerator MakeConcatenatedGenerator(AsyncGenerator> source) { + return MergedGenerator(std::move(source), 1); +} + +template +struct Enumerated { + T value; + int index; + bool last; +}; + +template +struct IterationTraits> { + static Enumerated End() { return Enumerated{IterationEnd(), -1, false}; } + static bool IsEnd(const Enumerated& val) { return val.index < 0; } +}; + +/// \see MakeEnumeratedGenerator +template +class EnumeratingGenerator { + public: + EnumeratingGenerator(AsyncGenerator source, T initial_value) + : state_(std::make_shared(std::move(source), std::move(initial_value))) {} + + Future> operator()() { + if (state_->finished) { + return AsyncGeneratorEnd>(); + } else { + auto state = state_; + return state->source().Then([state](const T& next) { + auto finished = IsIterationEnd(next); + auto prev = Enumerated{state->prev_value, state->prev_index, finished}; + state->prev_value = next; + state->prev_index++; + state->finished = finished; + return prev; + }); + } + } + + private: + struct State { + State(AsyncGenerator source, T initial_value) + : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) { + finished = IsIterationEnd(prev_value); + } + + AsyncGenerator source; + T prev_value; + int prev_index; + bool finished; + }; + + std::shared_ptr state_; +}; + +/// Wrap items from a source generator with positional information +/// +/// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be +/// processed in a "first-available" fashion and later resequenced which can reduce the +/// impact of sources with erratic performance (e.g. a filesystem where some items may +/// take longer to read than others). +/// +/// TODO(ARROW-12371) Would require this generator be async-reentrant +/// +/// \see MakeSequencingGenerator for an example of putting items back in order +/// +/// This generator is not async-reentrant +/// +/// This generator buffers one item (so it knows which item is the last item) +template +AsyncGenerator> MakeEnumeratedGenerator(AsyncGenerator source) { + return FutureFirstGenerator>( + source().Then([source](const T& initial_value) -> AsyncGenerator> { + return EnumeratingGenerator(std::move(source), initial_value); + })); +} + +/// \see MakeTransferredGenerator +template +class TransferringGenerator { + public: + explicit TransferringGenerator(AsyncGenerator source, internal::Executor* executor) + : source_(std::move(source)), executor_(executor) {} + + Future operator()() { return executor_->Transfer(source_()); } + + private: + AsyncGenerator source_; + internal::Executor* executor_; +}; + +/// \brief Transfer a future to an underlying executor. +/// +/// Continuations run on the returned future will be run on the given executor +/// if they cannot be run synchronously. +/// +/// This is often needed to move computation off I/O threads or other external +/// completion sources and back on to the CPU executor so the I/O thread can +/// stay busy and focused on I/O +/// +/// Keep in mind that continuations called on an already completed future will +/// always be run synchronously and so no transfer will happen in that case. +/// +/// This generator is async reentrant if the source is +/// +/// This generator will not queue +template +AsyncGenerator MakeTransferredGenerator(AsyncGenerator source, + internal::Executor* executor) { + return TransferringGenerator(std::move(source), executor); +} + +/// \see MakeBackgroundGenerator +template +class BackgroundGenerator { + public: + explicit BackgroundGenerator(Iterator it, internal::Executor* io_executor, int max_q, + int q_restart) + : state_(std::make_shared(io_executor, std::move(it), max_q, q_restart)), + cleanup_(std::make_shared(state_.get())) {} + + Future operator()() { + auto guard = state_->mutex.Lock(); + Future waiting_future; + if (state_->queue.empty()) { + if (state_->finished) { + return AsyncGeneratorEnd(); + } else { + waiting_future = Future::Make(); + state_->waiting_future = waiting_future; + } + } else { + auto next = Future::MakeFinished(std::move(state_->queue.front())); + state_->queue.pop(); + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(next)); + } + return next; + } + // This should only trigger the very first time this method is called + if (state_->NeedsRestart()) { + return state_->RestartTask(state_, std::move(guard), std::move(waiting_future)); + } + return waiting_future; + } + + protected: + static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits::max()}; + + struct State { + State(internal::Executor* io_executor, Iterator it, int max_q, int q_restart) + : io_executor(io_executor), + max_q(max_q), + q_restart(q_restart), + it(std::move(it)), + reading(false), + finished(false), + should_shutdown(false) {} + + void ClearQueue() { + while (!queue.empty()) { + queue.pop(); + } + } + + bool TaskIsRunning() const { return task_finished.is_valid(); } + + bool NeedsRestart() const { + return !finished && !reading && static_cast(queue.size()) <= q_restart; + } + + void DoRestartTask(std::shared_ptr state, util::Mutex::Guard guard) { + // If we get here we are actually going to start a new task so let's create a + // task_finished future for it + state->task_finished = Future<>::Make(); + state->reading = true; + auto spawn_status = io_executor->Spawn( + [state]() { BackgroundGenerator::WorkerTask(std::move(state)); }); + if (!spawn_status.ok()) { + // If we can't spawn a new task then send an error to the consumer (either via a + // waiting future or the queue) and mark ourselves finished + state->finished = true; + state->task_finished = Future<>(); + if (waiting_future.has_value()) { + auto to_deliver = std::move(waiting_future.value()); + waiting_future.reset(); + guard.Unlock(); + to_deliver.MarkFinished(spawn_status); + } else { + ClearQueue(); + queue.push(spawn_status); + } + } + } + + Future RestartTask(std::shared_ptr state, util::Mutex::Guard guard, + Future next) { + if (TaskIsRunning()) { + // If the task is still cleaning up we need to wait for it to finish before + // restarting. We also want to block the consumer until we've restarted the + // reader to avoid multiple restarts + return task_finished.Then([state, next]() { + // This may appear dangerous (recursive mutex) but we should be guaranteed the + // outer guard has been released by this point. We know... + // * task_finished is not already finished (it would be invalid in that case) + // * task_finished will not be marked complete until we've given up the mutex + auto guard_ = state->mutex.Lock(); + state->DoRestartTask(state, std::move(guard_)); + return next; + }); + } + // Otherwise we can restart immediately + DoRestartTask(std::move(state), std::move(guard)); + return next; + } + + internal::Executor* io_executor; + const int max_q; + const int q_restart; + Iterator it; + std::atomic worker_thread_id{kUnlikelyThreadId}; + + // If true, the task is actively pumping items from the queue and does not need a + // restart + bool reading; + // Set to true when a terminal item arrives + bool finished; + // Signal to the background task to end early because consumers have given up on it + bool should_shutdown; + // If the queue is empty, the consumer will create a waiting future and wait for it + std::queue> queue; + std::optional> waiting_future; + // Every background task is given a future to complete when it is entirely finished + // processing and ready for the next task to start or for State to be destroyed + Future<> task_finished; + util::Mutex mutex; + }; + + // Cleanup task that will be run when all consumer references to the generator are lost + struct Cleanup { + explicit Cleanup(State* state) : state(state) {} + ~Cleanup() { + /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and + /// there is no need to perform this check. + /// + /// It's a deadlock if we enter cleanup from + /// the worker thread but it can happen if the consumer doesn't transfer away + assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId()); + Future<> finish_fut; + { + auto lock = state->mutex.Lock(); + if (!state->TaskIsRunning()) { + return; + } + // Signal the current task to stop and wait for it to finish + state->should_shutdown = true; + finish_fut = state->task_finished; + } + // Using future as a condition variable here + Status st = finish_fut.status(); + ARROW_UNUSED(st); + } + State* state; + }; + + static void WorkerTask(std::shared_ptr state) { + state->worker_thread_id.store(::arrow::internal::GetThreadId()); + // We need to capture the state to read while outside the mutex + bool reading = true; + while (reading) { + auto next = state->it.Next(); + // Need to capture state->waiting_future inside the mutex to mark finished outside + Future waiting_future; + { + auto guard = state->mutex.Lock(); + + if (state->should_shutdown) { + state->finished = true; + break; + } + + if (!next.ok() || IsIterationEnd(*next)) { + // Terminal item. Mark finished to true, send this last item, and quit + state->finished = true; + if (!next.ok()) { + state->ClearQueue(); + } + } + // At this point we are going to send an item. Either we will add it to the + // queue or deliver it to a waiting future. + if (state->waiting_future.has_value()) { + waiting_future = std::move(state->waiting_future.value()); + state->waiting_future.reset(); + } else { + state->queue.push(std::move(next)); + // We just filled up the queue so it is time to quit. We may need to notify + // a cleanup task so we transition to Quitting + if (static_cast(state->queue.size()) >= state->max_q) { + state->reading = false; + } + } + reading = state->reading && !state->finished; + } + // This should happen outside the mutex. Presumably there is a + // transferring generator on the other end that will quickly transfer any + // callbacks off of this thread so we can continue looping. Still, best not to + // rely on that + if (waiting_future.is_valid()) { + waiting_future.MarkFinished(next); + } + } + // Once we've sent our last item we can notify any waiters that we are done and so + // either state can be cleaned up or a new background task can be started + Future<> task_finished; + { + auto guard = state->mutex.Lock(); + // After we give up the mutex state can be safely deleted. We will no longer + // reference it. We can safely transition to idle now. + task_finished = state->task_finished; + state->task_finished = Future<>(); + state->worker_thread_id.store(kUnlikelyThreadId); + } + task_finished.MarkFinished(); + } + + std::shared_ptr state_; + // state_ is held by both the generator and the background thread so it won't be cleaned + // up when all consumer references are relinquished. cleanup_ is only held by the + // generator so it will be destructed when the last consumer reference is gone. We use + // this to cleanup / stop the background generator in case the consuming end stops + // listening (e.g. due to a downstream error) + std::shared_ptr cleanup_; +}; + +constexpr int kDefaultBackgroundMaxQ = 32; +constexpr int kDefaultBackgroundQRestart = 16; + +/// \brief Create an AsyncGenerator by iterating over an Iterator on a background +/// thread +/// +/// The parameter max_q and q_restart control queue size and background thread task +/// management. If the background task is fast you typically don't want it creating a +/// thread task for every item. Instead the background thread will run until it fills +/// up a readahead queue. +/// +/// Once the queue has filled up the background thread task will terminate (allowing other +/// I/O tasks to use the thread). Once the queue has been drained enough (specified by +/// q_restart) then the background thread task will be restarted. If q_restart is too low +/// then you may exhaust the queue waiting for the background thread task to start running +/// again. If it is too high then it will be constantly stopping and restarting the +/// background queue task +/// +/// The "background thread" is a logical thread and will run as tasks on the io_executor. +/// This thread may stop and start when the queue fills up but there will only be one +/// active background thread task at any given time. You MUST transfer away from this +/// background generator. Otherwise there could be a race condition if a callback on the +/// background thread deletes the last consumer reference to the background generator. You +/// can transfer onto the same executor as the background thread, it is only necessary to +/// create a new thread task, not to switch executors. +/// +/// This generator is not async-reentrant +/// +/// This generator will queue up to max_q blocks +template +static Result> MakeBackgroundGenerator( + Iterator iterator, internal::Executor* io_executor, + int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) { + if (max_q < q_restart) { + return Status::Invalid("max_q must be >= q_restart"); + } + return BackgroundGenerator(std::move(iterator), io_executor, max_q, q_restart); +} + +/// \brief Create an AsyncGenerator by iterating over an Iterator synchronously +/// +/// This should only be used if you know the source iterator does not involve any +/// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending +/// on the complexity of the iterator, it may lead to deadlock. +/// +/// If you are not certain if there will be I/O then it is better to use +/// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator +/// equivalent of Future::MakeFinished +/// +/// It is impossible to call this in an async-reentrant manner since the returned +/// future will be completed by the time it is polled. +/// +/// This generator does not queue +template +static Result> MakeBlockingGenerator( + std::shared_ptr> iterator) { + return [it = std::move(iterator)]() mutable -> Future { + return Future::MakeFinished(it->Next()); + }; +} + +template +static Result> MakeBlockingGenerator(Iterator iterator) { + return MakeBlockingGenerator(std::make_shared>(std::move(iterator))); +} + +/// \see MakeGeneratorIterator +template +class GeneratorIterator { + public: + explicit GeneratorIterator(AsyncGenerator source) : source_(std::move(source)) {} + + Result Next() { return source_().result(); } + + private: + AsyncGenerator source_; +}; + +/// \brief Convert an AsyncGenerator to an Iterator which blocks until each future +/// is finished +template +Iterator MakeGeneratorIterator(AsyncGenerator source) { + return Iterator(GeneratorIterator(std::move(source))); +} + +/// \brief Add readahead to an iterator using a background thread. +/// +/// Under the hood this is converting the iterator to a generator using +/// MakeBackgroundGenerator, adding readahead to the converted generator with +/// MakeReadaheadGenerator, and then converting back to an iterator using +/// MakeGeneratorIterator. +template +Result> MakeReadaheadIterator(Iterator it, int readahead_queue_size) { + ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1)); + auto max_q = readahead_queue_size; + auto q_restart = std::max(1, max_q / 2); + ARROW_ASSIGN_OR_RAISE( + auto background_generator, + MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart)); + // Capture io_executor to keep it alive as long as owned_bg_generator is still + // referenced + AsyncGenerator owned_bg_generator = [io_executor, background_generator]() { + return background_generator(); + }; + return MakeGeneratorIterator(std::move(owned_bg_generator)); +} + +/// \brief Make a generator that returns a single pre-generated future +/// +/// This generator is async-reentrant. +template +std::function()> MakeSingleFutureGenerator(Future future) { + assert(future.is_valid()); + auto state = std::make_shared>(std::move(future)); + return [state]() -> Future { + auto fut = std::move(*state); + if (fut.is_valid()) { + return fut; + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that immediately ends. +/// +/// This generator is async-reentrant. +template +std::function()> MakeEmptyGenerator() { + return []() -> Future { return AsyncGeneratorEnd(); }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeFailingGenerator(Status st) { + assert(!st.ok()); + auto state = std::make_shared(std::move(st)); + return [state]() -> Future { + auto st = std::move(*state); + if (!st.ok()) { + return std::move(st); + } else { + return AsyncGeneratorEnd(); + } + }; +} + +/// \brief Make a generator that always fails with a given error +/// +/// This overload allows inferring the return type from the argument. +template +AsyncGenerator MakeFailingGenerator(const Result& result) { + return MakeFailingGenerator(result.status()); +} + +/// \brief Prepend initial_values onto a generator +/// +/// This generator is async-reentrant but will buffer requests and will not +/// pull from following_values async-reentrantly. +template +AsyncGenerator MakeGeneratorStartsWith(std::vector initial_values, + AsyncGenerator following_values) { + auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values)); + auto gen_gen = MakeVectorGenerator>( + {std::move(initial_values_vec_gen), std::move(following_values)}); + return MakeConcatenatedGenerator(std::move(gen_gen)); +} + +template +struct CancellableGenerator { + Future operator()() { + if (stop_token.IsStopRequested()) { + return stop_token.Poll(); + } + return source(); + } + + AsyncGenerator source; + StopToken stop_token; +}; + +/// \brief Allow an async generator to be cancelled +/// +/// This generator is async-reentrant +template +AsyncGenerator MakeCancellable(AsyncGenerator source, StopToken stop_token) { + return CancellableGenerator{std::move(source), std::move(stop_token)}; +} + +template +class DefaultIfEmptyGenerator { + public: + DefaultIfEmptyGenerator(AsyncGenerator source, T or_value) + : state_(std::make_shared(std::move(source), std::move(or_value))) {} + + Future operator()() { + if (state_->first) { + state_->first = false; + struct { + T or_value; + + Result operator()(const T& value) { + if (IterationTraits::IsEnd(value)) { + return std::move(or_value); + } + return value; + } + } Continuation; + Continuation.or_value = std::move(state_->or_value); + return state_->source().Then(std::move(Continuation)); + } + return state_->source(); + } + + private: + struct State { + AsyncGenerator source; + T or_value; + bool first; + State(AsyncGenerator source_, T or_value_) + : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {} + }; + std::shared_ptr state_; +}; + +/// \brief If the generator is empty, return the given value, else +/// forward the values from the generator. +/// +/// This generator is async-reentrant. +template +AsyncGenerator MakeDefaultIfEmptyGenerator(AsyncGenerator source, T or_value) { + return DefaultIfEmptyGenerator(std::move(source), std::move(or_value)); +} +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h new file mode 100644 index 0000000000000000000000000000000000000000..d9ed63bdbce2260e6c717769a5f91dfe1cca9f89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h @@ -0,0 +1,460 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/cancel.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/iterator.h" +#include "arrow/util/mutex.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/tracing.h" + +namespace arrow { + +using internal::FnOnce; + +namespace util { + +/// A utility which keeps tracks of, and schedules, asynchronous tasks +/// +/// An asynchronous task has a synchronous component and an asynchronous component. +/// The synchronous component typically schedules some kind of work on an external +/// resource (e.g. the I/O thread pool or some kind of kernel-based asynchronous +/// resource like io_uring). The asynchronous part represents the work +/// done on that external resource. Executing the synchronous part will be referred +/// to as "submitting the task" since this usually includes submitting the asynchronous +/// portion to the external thread pool. +/// +/// By default the scheduler will submit the task (execute the synchronous part) as +/// soon as it is added, assuming the underlying thread pool hasn't terminated or the +/// scheduler hasn't aborted. In this mode, the scheduler is simply acting as +/// a simple task group. +/// +/// A task scheduler starts with an initial task. That task, and all subsequent tasks +/// are free to add subtasks. Once all submitted tasks finish the scheduler will +/// finish. Note, it is not an error to add additional tasks after a scheduler has +/// aborted. These tasks will be ignored and never submitted. The scheduler returns a +/// future which will complete when all submitted tasks have finished executing. Once all +/// tasks have been finished the scheduler is invalid and should no longer be used. +/// +/// Task failure (either the synchronous portion or the asynchronous portion) will cause +/// the scheduler to enter an aborted state. The first such failure will be reported in +/// the final task future. +class ARROW_EXPORT AsyncTaskScheduler { + public: + /// Destructor for AsyncTaskScheduler + /// + /// The lifetime of the task scheduled is managed automatically. The scheduler + /// will remain valid while any tasks are running (and can always be safely accessed) + /// within tasks) and will be destroyed as soon as all tasks have finished. + virtual ~AsyncTaskScheduler() = default; + /// An interface for a task + /// + /// Users may want to override this, for example, to add priority + /// information for use by a queue. + class Task { + public: + virtual ~Task() = default; + /// Submit the task + /// + /// This will be called by the scheduler at most once when there + /// is space to run the task. This is expected to be a fairly quick + /// function that simply submits the actual task work to an external + /// resource (e.g. I/O thread pool). + /// + /// If this call fails then the scheduler will enter an aborted state. + virtual Result> operator()() = 0; + /// The cost of the task + /// + /// A ThrottledAsyncTaskScheduler can be used to limit the number of concurrent tasks. + /// A custom cost may be used, for example, if you would like to limit the number of + /// tasks based on the total expected RAM usage of the tasks (this is done in the + /// scanner) + virtual int cost() const { return 1; } + /// The name of the task + /// + /// This is used for debugging and traceability. The returned view must remain + /// valid for the lifetime of the task. + virtual std::string_view name() const = 0; + + /// a span tied to the lifetime of the task, for internal use only + tracing::Span span; + }; + + /// Add a task to the scheduler + /// + /// If the scheduler is in an aborted state this call will return false and the task + /// will never be run. This is harmless and does not need to be guarded against. + /// + /// The return value for this call can usually be ignored. There is little harm in + /// attempting to add tasks to an aborted scheduler. It is only included for callers + /// that want to avoid future task generation to save effort. + /// + /// \param task the task to submit + /// + /// A task's name must remain valid for the duration of the task. It is used for + /// debugging (e.g. when debugging a deadlock to see which tasks still remain) and for + /// traceability (the name will be used for spans assigned to the task) + /// + /// \return true if the task was submitted or queued, false if the task was ignored + virtual bool AddTask(std::unique_ptr task) = 0; + + /// Adds an async generator to the scheduler + /// + /// The async generator will be visited, one item at a time. Submitting a task + /// will consist of polling the generator for the next future. The generator's future + /// will then represent the task itself. + /// + /// This visits the task serially without readahead. If readahead or parallelism + /// is desired then it should be added in the generator itself. + /// + /// The generator itself will be kept alive until all tasks have been completed. + /// However, if the scheduler is aborted, the generator will be destroyed as soon as the + /// next item would be requested. + /// + /// \param generator the generator to submit to the scheduler + /// \param visitor a function which visits each generator future as it completes + /// \param name a name which will be used for each submitted task + template + bool AddAsyncGenerator(std::function()> generator, + std::function visitor, std::string_view name); + + template + struct SimpleTask : public Task { + SimpleTask(Callable callable, std::string_view name) + : callable(std::move(callable)), name_(name) {} + SimpleTask(Callable callable, std::string name) + : callable(std::move(callable)), owned_name_(std::move(name)) { + name_ = *owned_name_; + } + Result> operator()() override { return callable(); } + std::string_view name() const override { return name_; } + Callable callable; + std::string_view name_; + std::optional owned_name_; + }; + + /// Add a task with cost 1 to the scheduler + /// + /// \param callable a "submit" function that should return a future + /// \param name a name for the task + /// + /// `name` must remain valid until the task has been submitted AND the returned + /// future completes. It is used for debugging and tracing. + /// + /// \see AddTask for more details + template + bool AddSimpleTask(Callable callable, std::string_view name) { + return AddTask(std::make_unique>(std::move(callable), name)); + } + + /// Add a task with cost 1 to the scheduler + /// + /// This is an overload of \see AddSimpleTask that keeps `name` alive + /// in the task. + template + bool AddSimpleTask(Callable callable, std::string name) { + return AddTask( + std::make_unique>(std::move(callable), std::move(name))); + } + + /// Construct a scheduler + /// + /// \param initial_task The initial task which is responsible for adding + /// the first subtasks to the scheduler. + /// \param abort_callback A callback that will be triggered immediately after a task + /// fails while other tasks may still be running. Nothing needs to be done here, + /// when a task fails the scheduler will stop accepting new tasks and eventually + /// return the error. However, this callback can be used to more quickly end + /// long running tasks that have already been submitted. Defaults to doing + /// nothing. + /// \param stop_token An optional stop token that will allow cancellation of the + /// scheduler. This will be checked before each task is submitted and, in the + /// event of a cancellation, the scheduler will enter an aborted state. This is + /// a graceful cancellation and submitted tasks will still complete. + /// \return A future that will be completed when the initial task and all subtasks have + /// finished. + static Future<> Make( + FnOnce initial_task, + FnOnce abort_callback = [](const Status&) {}, + StopToken stop_token = StopToken::Unstoppable()); + + /// A span tracking execution of the scheduler's tasks, for internal use only + virtual const tracing::Span& span() const = 0; +}; + +class ARROW_EXPORT ThrottledAsyncTaskScheduler : public AsyncTaskScheduler { + public: + /// An interface for a task queue + /// + /// A queue's methods will not be called concurrently + class Queue { + public: + virtual ~Queue() = default; + /// Push a task to the queue + /// + /// \param task the task to enqueue + virtual void Push(std::unique_ptr task) = 0; + /// Pop the next task from the queue + virtual std::unique_ptr Pop() = 0; + /// Peek the next task in the queue + virtual const Task& Peek() = 0; + /// Check if the queue is empty + virtual bool Empty() = 0; + /// Purge the queue of all items + virtual void Purge() = 0; + virtual std::size_t Size() const = 0; + }; + + class Throttle { + public: + virtual ~Throttle() = default; + /// Acquire amt permits + /// + /// If nullopt is returned then the permits were immediately + /// acquired and the caller can proceed. If a future is returned then the caller + /// should wait for the future to complete first. When the returned future completes + /// the permits have NOT been acquired and the caller must call Acquire again + /// + /// \param amt the number of permits to acquire + virtual std::optional> TryAcquire(int amt) = 0; + /// Release amt permits + /// + /// This will possibly complete waiting futures and should probably not be + /// called while holding locks. + /// + /// \param amt the number of permits to release + virtual void Release(int amt) = 0; + + /// The size of the largest task that can run + /// + /// Incoming tasks will have their cost latched to this value to ensure + /// they can still run (although they will be the only thing allowed to + /// run at that time). + virtual int Capacity() = 0; + + /// Pause the throttle + /// + /// Any tasks that have been submitted already will continue. However, no new tasks + /// will be run until the throttle is resumed. + virtual void Pause() = 0; + /// Resume the throttle + /// + /// Allows task to be submitted again. If there is a max_concurrent_cost limit then + /// it will still apply. + virtual void Resume() = 0; + }; + + /// Pause the throttle + /// + /// Any tasks that have been submitted already will continue. However, no new tasks + /// will be run until the throttle is resumed. + virtual void Pause() = 0; + /// Resume the throttle + /// + /// Allows task to be submitted again. If there is a max_concurrent_cost limit then + /// it will still apply. + virtual void Resume() = 0; + /// Return the number of tasks queued but not yet submitted + virtual std::size_t QueueSize() = 0; + + /// Create a throttled view of a scheduler + /// + /// Tasks added via this view will be subjected to the throttle and, if the tasks cannot + /// run immediately, will be placed into a queue. + /// + /// Although a shared_ptr is returned it should generally be assumed that the caller + /// is being given exclusive ownership. The shared_ptr is used to share the view with + /// queued and submitted tasks and the lifetime of those is unpredictable. It is + /// important the caller keep the returned pointer alive for as long as they plan to add + /// tasks to the view. + /// + /// \param scheduler a scheduler to submit tasks to after throttling + /// + /// This can be the root scheduler, another throttled scheduler, or a task group. These + /// are all composable. + /// + /// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time + /// + /// If a task is added that has a cost greater than max_concurrent_cost then its cost + /// will be reduced to max_concurrent_cost so that it is still possible for the task to + /// run. + /// + /// \param queue the queue to use when tasks cannot be submitted + /// + /// By default a FIFO queue will be used. However, a custom queue can be provided if + /// some tasks have higher priority than other tasks. + static std::shared_ptr Make( + AsyncTaskScheduler* scheduler, int max_concurrent_cost, + std::unique_ptr queue = NULLPTR); + + /// @brief Create a ThrottledAsyncTaskScheduler using a custom throttle + /// + /// \see Make + static std::shared_ptr MakeWithCustomThrottle( + AsyncTaskScheduler* scheduler, std::unique_ptr throttle, + std::unique_ptr queue = NULLPTR); +}; + +/// A utility to keep track of a collection of tasks +/// +/// Often it is useful to keep track of some state that only needs to stay alive +/// for some small collection of tasks, or to perform some kind of final cleanup +/// when a collection of tasks is finished. +/// +/// For example, when scanning, we need to keep the file reader alive while all scan +/// tasks run for a given file, and then we can gracefully close it when we finish the +/// file. +class ARROW_EXPORT AsyncTaskGroup : public AsyncTaskScheduler { + public: + /// Destructor for the task group + /// + /// The destructor might trigger the finish callback. If the finish callback fails + /// then the error will be reported as a task on the scheduler. + /// + /// Failure to destroy the async task group will not prevent the scheduler from + /// finishing. If the scheduler finishes before the async task group is done then + /// the finish callback will be run immediately when the async task group finishes. + /// + /// If the scheduler has aborted then the finish callback will not run. + ~AsyncTaskGroup() = default; + /// Create an async task group + /// + /// The finish callback will not run until the task group is destroyed and all + /// tasks are finished so you will generally want to reset / destroy the returned + /// unique_ptr at some point. + /// + /// \param scheduler The underlying scheduler to submit tasks to + /// \param finish_callback A callback that will be run only after the task group has + /// been destroyed and all tasks added by the group have + /// finished. + /// + /// Note: in error scenarios the finish callback may not run. However, it will still, + /// of course, be destroyed. + static std::unique_ptr Make(AsyncTaskScheduler* scheduler, + FnOnce finish_callback); +}; + +/// Create a task group that is also throttled +/// +/// This is a utility factory that creates a throttled view of a scheduler and then +/// wraps that throttled view with a task group that destroys the throttle when finished. +/// +/// \see ThrottledAsyncTaskScheduler +/// \see AsyncTaskGroup +/// \param target the underlying scheduler to submit tasks to +/// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time +/// \param queue the queue to use when tasks cannot be submitted +/// \param finish_callback A callback that will be run only after the task group has +/// been destroyed and all tasks added by the group have finished +ARROW_EXPORT std::unique_ptr MakeThrottledAsyncTaskGroup( + AsyncTaskScheduler* target, int max_concurrent_cost, + std::unique_ptr queue, + FnOnce finish_callback); + +// Defined down here to avoid circular dependency between AsyncTaskScheduler and +// AsyncTaskGroup +template +bool AsyncTaskScheduler::AddAsyncGenerator(std::function()> generator, + std::function visitor, + std::string_view name) { + struct State { + State(std::function()> generator, std::function visitor, + std::unique_ptr task_group, std::string_view name) + : generator(std::move(generator)), + visitor(std::move(visitor)), + task_group(std::move(task_group)), + name(name) {} + std::function()> generator; + std::function visitor; + std::unique_ptr task_group; + std::string_view name; + }; + struct SubmitTask : public Task { + explicit SubmitTask(std::unique_ptr state_holder) + : state_holder(std::move(state_holder)) {} + + struct SubmitTaskCallback { + SubmitTaskCallback(std::unique_ptr state_holder, Future<> task_completion) + : state_holder(std::move(state_holder)), + task_completion(std::move(task_completion)) {} + void operator()(const Result& maybe_item) { + if (!maybe_item.ok()) { + task_completion.MarkFinished(maybe_item.status()); + return; + } + const auto& item = *maybe_item; + if (IsIterationEnd(item)) { + task_completion.MarkFinished(); + return; + } + Status visit_st = state_holder->visitor(item); + if (!visit_st.ok()) { + task_completion.MarkFinished(std::move(visit_st)); + return; + } + state_holder->task_group->AddTask( + std::make_unique(std::move(state_holder))); + task_completion.MarkFinished(); + } + std::unique_ptr state_holder; + Future<> task_completion; + }; + + Result> operator()() { + Future<> task = Future<>::Make(); + // Consume as many items as we can (those that are already finished) + // synchronously to avoid recursion / stack overflow. + while (true) { + Future next = state_holder->generator(); + if (next.TryAddCallback( + [&] { return SubmitTaskCallback(std::move(state_holder), task); })) { + return task; + } + ARROW_ASSIGN_OR_RAISE(T item, next.result()); + if (IsIterationEnd(item)) { + task.MarkFinished(); + return task; + } + ARROW_RETURN_NOT_OK(state_holder->visitor(item)); + } + } + + std::string_view name() const { return state_holder->name; } + + std::unique_ptr state_holder; + }; + std::unique_ptr task_group = + AsyncTaskGroup::Make(this, [] { return Status::OK(); }); + AsyncTaskGroup* task_group_view = task_group.get(); + std::unique_ptr state_holder = std::make_unique( + std::move(generator), std::move(visitor), std::move(task_group), name); + task_group_view->AddTask(std::make_unique(std::move(state_holder))); + return true; +} + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..d8a91ea76b3906ec8d8b55bdb282cdb8da874cfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h @@ -0,0 +1,492 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_traits.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +enum class DecimalStatus { + kSuccess, + kDivideByZero, + kOverflow, + kRescaleDataLoss, +}; + +template +class ARROW_EXPORT GenericBasicDecimal { + protected: + struct LittleEndianArrayTag {}; + +#if ARROW_LITTLE_ENDIAN + static constexpr int kHighWordIndex = NWORDS - 1; + static constexpr int kLowWordIndex = 0; +#else + static constexpr int kHighWordIndex = 0; + static constexpr int kLowWordIndex = NWORDS - 1; +#endif + + public: + static constexpr int kBitWidth = BIT_WIDTH; + static constexpr int kByteWidth = kBitWidth / 8; + static constexpr int kNumWords = NWORDS; + + // A constructor tag to introduce a little-endian encoded array + static constexpr LittleEndianArrayTag LittleEndianArray{}; + + using WordArray = std::array; + + /// \brief Empty constructor creates a decimal with a value of 0. + constexpr GenericBasicDecimal() noexcept : array_({0}) {} + + /// \brief Create a decimal from the two's complement representation. + /// + /// Input array is assumed to be in native endianness. + explicit constexpr GenericBasicDecimal(const WordArray& array) noexcept + : array_(array) {} + + /// \brief Create a decimal from the two's complement representation. + /// + /// Input array is assumed to be in little endianness, with native endian elements. + GenericBasicDecimal(LittleEndianArrayTag, const WordArray& array) noexcept + : GenericBasicDecimal(bit_util::little_endian::ToNative(array)) {} + + /// \brief Create a decimal from any integer not wider than 64 bits. + template ::value && (sizeof(T) <= sizeof(uint64_t)), T>::type> + constexpr GenericBasicDecimal(T value) noexcept // NOLINT(runtime/explicit) + : array_(WordsFromLowBits(value)) {} + + /// \brief Create a decimal from an array of bytes. + /// + /// Bytes are assumed to be in native-endian byte order. + explicit GenericBasicDecimal(const uint8_t* bytes) { + memcpy(array_.data(), bytes, sizeof(array_)); + } + + /// \brief Get the bits of the two's complement representation of the number. + /// + /// The elements are in native endian order. The bits within each uint64_t element + /// are in native endian order. For example, on a little endian machine, + /// BasicDecimal128(123).native_endian_array() = {123, 0}; + /// but on a big endian machine, + /// BasicDecimal128(123).native_endian_array() = {0, 123}; + constexpr const WordArray& native_endian_array() const { return array_; } + + /// \brief Get the bits of the two's complement representation of the number. + /// + /// The elements are in little endian order. However, the bits within each + /// uint64_t element are in native endian order. + /// For example, BasicDecimal128(123).little_endian_array() = {123, 0}; + WordArray little_endian_array() const { + return bit_util::little_endian::FromNative(array_); + } + + const uint8_t* native_endian_bytes() const { + return reinterpret_cast(array_.data()); + } + + uint8_t* mutable_native_endian_bytes() { + return reinterpret_cast(array_.data()); + } + + /// \brief Return the raw bytes of the value in native-endian byte order. + std::array ToBytes() const { + std::array out{{0}}; + memcpy(out.data(), array_.data(), kByteWidth); + return out; + } + + /// \brief Copy the raw bytes of the value in native-endian byte order. + void ToBytes(uint8_t* out) const { memcpy(out, array_.data(), kByteWidth); } + + /// Return 1 if positive or zero, -1 if strictly negative. + int64_t Sign() const { + return 1 | (static_cast(array_[kHighWordIndex]) >> 63); + } + + bool IsNegative() const { return static_cast(array_[kHighWordIndex]) < 0; } + + explicit operator bool() const { return array_ != WordArray{}; } + + friend bool operator==(const GenericBasicDecimal& left, + const GenericBasicDecimal& right) { + return left.array_ == right.array_; + } + + friend bool operator!=(const GenericBasicDecimal& left, + const GenericBasicDecimal& right) { + return left.array_ != right.array_; + } + + protected: + WordArray array_; + + template + static constexpr uint64_t SignExtend(T low_bits) noexcept { + return low_bits >= T{} ? uint64_t{0} : ~uint64_t{0}; + } + + template + static constexpr WordArray WordsFromLowBits(T low_bits) { + WordArray words{}; + if (low_bits < T{}) { + for (auto& word : words) { + word = ~uint64_t{0}; + } + } + words[kLowWordIndex] = static_cast(low_bits); + return words; + } +}; + +/// Represents a signed 128-bit integer in two's complement. +/// +/// This class is also compiled into LLVM IR - so, it should not have cpp references like +/// streams and boost. +class ARROW_EXPORT BasicDecimal128 : public GenericBasicDecimal { + public: + static constexpr int kMaxPrecision = 38; + static constexpr int kMaxScale = 38; + + using GenericBasicDecimal::GenericBasicDecimal; + + constexpr BasicDecimal128() noexcept : GenericBasicDecimal() {} + + /// \brief Create a BasicDecimal128 from the two's complement representation. +#if ARROW_LITTLE_ENDIAN + constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept + : BasicDecimal128(WordArray{low, static_cast(high)}) {} +#else + constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept + : BasicDecimal128(WordArray{static_cast(high), low}) {} +#endif + + /// \brief Negate the current value (in-place) + BasicDecimal128& Negate(); + + /// \brief Absolute value (in-place) + BasicDecimal128& Abs(); + + /// \brief Absolute value + static BasicDecimal128 Abs(const BasicDecimal128& left); + + /// \brief Add a number to this one. The result is truncated to 128 bits. + BasicDecimal128& operator+=(const BasicDecimal128& right); + + /// \brief Subtract a number from this one. The result is truncated to 128 bits. + BasicDecimal128& operator-=(const BasicDecimal128& right); + + /// \brief Multiply this number by another number. The result is truncated to 128 bits. + BasicDecimal128& operator*=(const BasicDecimal128& right); + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \param[out] result the quotient + /// \param[out] remainder the remainder after the division + DecimalStatus Divide(const BasicDecimal128& divisor, BasicDecimal128* result, + BasicDecimal128* remainder) const; + + /// \brief In-place division. + BasicDecimal128& operator/=(const BasicDecimal128& right); + + /// \brief Bitwise "or" between two BasicDecimal128. + BasicDecimal128& operator|=(const BasicDecimal128& right); + + /// \brief Bitwise "and" between two BasicDecimal128. + BasicDecimal128& operator&=(const BasicDecimal128& right); + + /// \brief Shift left by the given number of bits. + BasicDecimal128& operator<<=(uint32_t bits); + + BasicDecimal128 operator<<(uint32_t bits) const { + auto res = *this; + res <<= bits; + return res; + } + + /// \brief Shift right by the given number of bits. + /// + /// Negative values will sign-extend. + BasicDecimal128& operator>>=(uint32_t bits); + + BasicDecimal128 operator>>(uint32_t bits) const { + auto res = *this; + res >>= bits; + return res; + } + + /// \brief Get the high bits of the two's complement representation of the number. + constexpr int64_t high_bits() const { +#if ARROW_LITTLE_ENDIAN + return static_cast(array_[1]); +#else + return static_cast(array_[0]); +#endif + } + + /// \brief Get the low bits of the two's complement representation of the number. + constexpr uint64_t low_bits() const { +#if ARROW_LITTLE_ENDIAN + return array_[0]; +#else + return array_[1]; +#endif + } + + /// \brief separate the integer and fractional parts for the given scale. + void GetWholeAndFraction(int32_t scale, BasicDecimal128* whole, + BasicDecimal128* fraction) const; + + /// \brief Scale multiplier for given scale value. + static const BasicDecimal128& GetScaleMultiplier(int32_t scale); + /// \brief Half-scale multiplier for given scale value. + static const BasicDecimal128& GetHalfScaleMultiplier(int32_t scale); + + /// \brief Convert BasicDecimal128 from one scale to another + DecimalStatus Rescale(int32_t original_scale, int32_t new_scale, + BasicDecimal128* out) const; + + /// \brief Scale up. + BasicDecimal128 IncreaseScaleBy(int32_t increase_by) const; + + /// \brief Scale down. + /// - If 'round' is true, the right-most digits are dropped and the result value is + /// rounded up (+1 for +ve, -1 for -ve) based on the value of the dropped digits + /// (>= 10^reduce_by / 2). + /// - If 'round' is false, the right-most digits are simply dropped. + BasicDecimal128 ReduceScaleBy(int32_t reduce_by, bool round = true) const; + + /// \brief Whether this number fits in the given precision + /// + /// Return true if the number of significant digits is less or equal to `precision`. + bool FitsInPrecision(int32_t precision) const; + + /// \brief count the number of leading binary zeroes. + int32_t CountLeadingBinaryZeros() const; + + /// \brief Get the maximum valid unscaled decimal value. + static const BasicDecimal128& GetMaxValue(); + + /// \brief Get the maximum valid unscaled decimal value for the given precision. + static BasicDecimal128 GetMaxValue(int32_t precision); + + /// \brief Get the maximum decimal value (is not a valid value). + static constexpr BasicDecimal128 GetMaxSentinel() { + return BasicDecimal128(/*high=*/std::numeric_limits::max(), + /*low=*/std::numeric_limits::max()); + } + /// \brief Get the minimum decimal value (is not a valid value). + static constexpr BasicDecimal128 GetMinSentinel() { + return BasicDecimal128(/*high=*/std::numeric_limits::min(), + /*low=*/std::numeric_limits::min()); + } +}; + +ARROW_EXPORT bool operator<(const BasicDecimal128& left, const BasicDecimal128& right); +ARROW_EXPORT bool operator<=(const BasicDecimal128& left, const BasicDecimal128& right); +ARROW_EXPORT bool operator>(const BasicDecimal128& left, const BasicDecimal128& right); +ARROW_EXPORT bool operator>=(const BasicDecimal128& left, const BasicDecimal128& right); + +ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& operand); +ARROW_EXPORT BasicDecimal128 operator~(const BasicDecimal128& operand); +ARROW_EXPORT BasicDecimal128 operator+(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator*(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator/(const BasicDecimal128& left, + const BasicDecimal128& right); +ARROW_EXPORT BasicDecimal128 operator%(const BasicDecimal128& left, + const BasicDecimal128& right); + +class ARROW_EXPORT BasicDecimal256 : public GenericBasicDecimal { + public: + using GenericBasicDecimal::GenericBasicDecimal; + + static constexpr int kMaxPrecision = 76; + static constexpr int kMaxScale = 76; + + constexpr BasicDecimal256() noexcept : GenericBasicDecimal() {} + + explicit BasicDecimal256(const BasicDecimal128& value) noexcept + : BasicDecimal256(bit_util::little_endian::ToNative( + {value.low_bits(), static_cast(value.high_bits()), + SignExtend(value.high_bits()), SignExtend(value.high_bits())})) {} + + /// \brief Negate the current value (in-place) + BasicDecimal256& Negate(); + + /// \brief Absolute value (in-place) + BasicDecimal256& Abs(); + + /// \brief Absolute value + static BasicDecimal256 Abs(const BasicDecimal256& left); + + /// \brief Add a number to this one. The result is truncated to 256 bits. + BasicDecimal256& operator+=(const BasicDecimal256& right); + + /// \brief Subtract a number from this one. The result is truncated to 256 bits. + BasicDecimal256& operator-=(const BasicDecimal256& right); + + /// \brief Get the lowest bits of the two's complement representation of the number. + uint64_t low_bits() const { return bit_util::little_endian::Make(array_)[0]; } + + /// \brief separate the integer and fractional parts for the given scale. + void GetWholeAndFraction(int32_t scale, BasicDecimal256* whole, + BasicDecimal256* fraction) const; + + /// \brief Scale multiplier for given scale value. + static const BasicDecimal256& GetScaleMultiplier(int32_t scale); + /// \brief Half-scale multiplier for given scale value. + static const BasicDecimal256& GetHalfScaleMultiplier(int32_t scale); + + /// \brief Convert BasicDecimal256 from one scale to another + DecimalStatus Rescale(int32_t original_scale, int32_t new_scale, + BasicDecimal256* out) const; + + /// \brief Scale up. + BasicDecimal256 IncreaseScaleBy(int32_t increase_by) const; + + /// \brief Scale down. + /// - If 'round' is true, the right-most digits are dropped and the result value is + /// rounded up (+1 for positive, -1 for negative) based on the value of the + /// dropped digits (>= 10^reduce_by / 2). + /// - If 'round' is false, the right-most digits are simply dropped. + BasicDecimal256 ReduceScaleBy(int32_t reduce_by, bool round = true) const; + + /// \brief Whether this number fits in the given precision + /// + /// Return true if the number of significant digits is less or equal to `precision`. + bool FitsInPrecision(int32_t precision) const; + + /// \brief Multiply this number by another number. The result is truncated to 256 bits. + BasicDecimal256& operator*=(const BasicDecimal256& right); + + /// Divide this number by right and return the result. + /// + /// This operation is not destructive. + /// The answer rounds to zero. Signs work like: + /// 21 / 5 -> 4, 1 + /// -21 / 5 -> -4, -1 + /// 21 / -5 -> -4, 1 + /// -21 / -5 -> 4, -1 + /// \param[in] divisor the number to divide by + /// \param[out] result the quotient + /// \param[out] remainder the remainder after the division + DecimalStatus Divide(const BasicDecimal256& divisor, BasicDecimal256* result, + BasicDecimal256* remainder) const; + + /// \brief Shift left by the given number of bits. + BasicDecimal256& operator<<=(uint32_t bits); + + BasicDecimal256 operator<<(uint32_t bits) const { + auto res = *this; + res <<= bits; + return res; + } + + /// \brief Shift right by the given number of bits. + /// + /// Negative values will sign-extend. + BasicDecimal256& operator>>=(uint32_t bits); + + BasicDecimal256 operator>>(uint32_t bits) const { + auto res = *this; + res >>= bits; + return res; + } + + /// \brief In-place division. + BasicDecimal256& operator/=(const BasicDecimal256& right); + + /// \brief Get the maximum valid unscaled decimal value for the given precision. + static BasicDecimal256 GetMaxValue(int32_t precision); + + /// \brief Get the maximum decimal value (is not a valid value). + static constexpr BasicDecimal256 GetMaxSentinel() { +#if ARROW_LITTLE_ENDIAN + return BasicDecimal256({std::numeric_limits::max(), + std::numeric_limits::max(), + std::numeric_limits::max(), + static_cast(std::numeric_limits::max())}); +#else + return BasicDecimal256({static_cast(std::numeric_limits::max()), + std::numeric_limits::max(), + std::numeric_limits::max(), + std::numeric_limits::max()}); +#endif + } + /// \brief Get the minimum decimal value (is not a valid value). + static constexpr BasicDecimal256 GetMinSentinel() { +#if ARROW_LITTLE_ENDIAN + return BasicDecimal256( + {0, 0, 0, static_cast(std::numeric_limits::min())}); +#else + return BasicDecimal256( + {static_cast(std::numeric_limits::min()), 0, 0, 0}); +#endif + } +}; + +ARROW_EXPORT bool operator<(const BasicDecimal256& left, const BasicDecimal256& right); + +ARROW_EXPORT inline bool operator<=(const BasicDecimal256& left, + const BasicDecimal256& right) { + return !operator<(right, left); +} + +ARROW_EXPORT inline bool operator>(const BasicDecimal256& left, + const BasicDecimal256& right) { + return operator<(right, left); +} + +ARROW_EXPORT inline bool operator>=(const BasicDecimal256& left, + const BasicDecimal256& right) { + return !operator<(left, right); +} + +ARROW_EXPORT BasicDecimal256 operator-(const BasicDecimal256& operand); +ARROW_EXPORT BasicDecimal256 operator~(const BasicDecimal256& operand); +ARROW_EXPORT BasicDecimal256 operator+(const BasicDecimal256& left, + const BasicDecimal256& right); +ARROW_EXPORT BasicDecimal256 operator*(const BasicDecimal256& left, + const BasicDecimal256& right); +ARROW_EXPORT BasicDecimal256 operator/(const BasicDecimal256& left, + const BasicDecimal256& right); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h new file mode 100644 index 0000000000000000000000000000000000000000..75639ac11ae41acb5e23e3eaa91901f41472fdc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h @@ -0,0 +1,211 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include + +#include "benchmark/benchmark.h" + +#include "arrow/memory_pool.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/logging.h" // IWYU pragma: keep + +namespace arrow { + +// Benchmark changed its parameter type between releases from +// int to int64_t. As it doesn't have version macros, we need +// to apply C++ template magic. + +template +struct BenchmarkArgsType; + +// Pattern matching that extracts the vector element type of Benchmark::Args() +template +struct BenchmarkArgsType&)> { + using type = Values; +}; + +using ArgsType = + typename BenchmarkArgsType::type; + +using internal::CpuInfo; + +static const CpuInfo* cpu_info = CpuInfo::GetInstance(); + +static const int64_t kL1Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L1); +static const int64_t kL2Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L2); +static const int64_t kL3Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L3); +static const int64_t kCantFitInL3Size = kL3Size * 4; +static const std::vector kMemorySizes = {kL1Size, kL2Size, kL3Size, + kCantFitInL3Size}; +// 0 is treated as "no nulls" +static const std::vector kInverseNullProportions = {10000, 100, 10, 2, 1, 0}; + +struct GenericItemsArgs { + // number of items processed per iteration + const int64_t size; + + // proportion of nulls in generated arrays + double null_proportion; + + explicit GenericItemsArgs(benchmark::State& state) + : size(state.range(0)), state_(state) { + if (state.range(1) == 0) { + this->null_proportion = 0.0; + } else { + this->null_proportion = std::min(1., 1. / static_cast(state.range(1))); + } + } + + ~GenericItemsArgs() { + state_.counters["size"] = static_cast(size); + state_.counters["null_percent"] = null_proportion * 100; + state_.SetItemsProcessed(state_.iterations() * size); + } + + private: + benchmark::State& state_; +}; + +void BenchmarkSetArgsWithSizes(benchmark::internal::Benchmark* bench, + const std::vector& sizes = kMemorySizes) { + bench->Unit(benchmark::kMicrosecond); + + for (const auto size : sizes) { + for (const auto inverse_null_proportion : kInverseNullProportions) { + bench->Args({static_cast(size), inverse_null_proportion}); + } + } +} + +void BenchmarkSetArgs(benchmark::internal::Benchmark* bench) { + BenchmarkSetArgsWithSizes(bench, kMemorySizes); +} + +void RegressionSetArgs(benchmark::internal::Benchmark* bench) { + // Regression do not need to account for cache hierarchy, thus optimize for + // the best case. + BenchmarkSetArgsWithSizes(bench, {kL1Size}); +} + +// RAII struct to handle some of the boilerplate in regression benchmarks +struct RegressionArgs { + // size of memory tested (per iteration) in bytes + int64_t size; + + // proportion of nulls in generated arrays + double null_proportion; + + // If size_is_bytes is true, then it's a number of bytes, otherwise it's the + // number of items processed (for reporting) + explicit RegressionArgs(benchmark::State& state, bool size_is_bytes = true) + : size(state.range(0)), state_(state), size_is_bytes_(size_is_bytes) { + if (state.range(1) == 0) { + this->null_proportion = 0.0; + } else { + this->null_proportion = std::min(1., 1. / static_cast(state.range(1))); + } + } + + ~RegressionArgs() { + state_.counters["size"] = static_cast(size); + state_.counters["null_percent"] = null_proportion * 100; + if (size_is_bytes_) { + state_.SetBytesProcessed(state_.iterations() * size); + } else { + state_.SetItemsProcessed(state_.iterations() * size); + } + } + + private: + benchmark::State& state_; + bool size_is_bytes_; +}; + +class MemoryPoolMemoryManager : public benchmark::MemoryManager { + void Start() override { + memory_pool = std::make_shared(default_memory_pool()); + + MemoryPool* default_pool = default_memory_pool(); + global_allocations_start = default_pool->num_allocations(); + } + +// BENCHMARK_DONT_OPTIMIZE is used here to detect Google Benchmark +// 1.8.0. We can remove this Stop(Result*) when we require Google +// Benchmark 1.8.0 or later. +#ifndef BENCHMARK_DONT_OPTIMIZE + void Stop(Result* result) override { Stop(*result); } +#endif + + void Stop(benchmark::MemoryManager::Result& result) override { + // If num_allocations is still zero, we assume that the memory pool wasn't passed down + // so we should record them. + MemoryPool* default_pool = default_memory_pool(); + int64_t new_default_allocations = + default_pool->num_allocations() - global_allocations_start; + + // Only record metrics if (1) there were allocations and (2) we + // recorded at least one. + if (new_default_allocations > 0 && memory_pool->num_allocations() > 0) { + if (new_default_allocations > memory_pool->num_allocations()) { + // If we missed some, let's report that. + int64_t missed_allocations = + new_default_allocations - memory_pool->num_allocations(); + ARROW_LOG(WARNING) << "BenchmarkMemoryTracker recorded some allocations " + << "for a benchmark, but missed " << missed_allocations + << " allocations.\n"; + } + + result.max_bytes_used = memory_pool->max_memory(); + result.total_allocated_bytes = memory_pool->total_bytes_allocated(); + result.num_allocs = memory_pool->num_allocations(); + } + } + + public: + std::shared_ptr<::arrow::ProxyMemoryPool> memory_pool; + + protected: + int64_t global_allocations_start; +}; + +/// \brief Track memory pool allocations in benchmarks. +/// +/// Instantiate as a global variable to register the hooks into Google Benchmark +/// to collect memory metrics. Before each benchmark, a new ProxyMemoryPool is +/// created. It can then be accessed with memory_pool(). Once the benchmark is +/// complete, the hook will record the maximum memory used, the total bytes +/// allocated, and the total number of allocations. If no allocations were seen, +/// (for example, if you forgot to pass down the memory pool), then these metrics +/// will not be saved. +/// +/// Since this is used as one global variable, this will not work if multiple +/// benchmarks are run concurrently or for multi-threaded benchmarks (ones +/// that use `->ThreadRange(...)`). +class BenchmarkMemoryTracker { + public: + BenchmarkMemoryTracker() : manager_() { ::benchmark::RegisterMemoryManager(&manager_); } + ::arrow::MemoryPool* memory_pool() const { return manager_.memory_pool.get(); } + + protected: + ::arrow::MemoryPoolMemoryManager manager_; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h new file mode 100644 index 0000000000000000000000000000000000000000..94f7a5bdfa667a97bd00a91404a1dd9f64dfd2dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/type.h" +#include "arrow/util/span.h" + +namespace arrow::util { + +inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) { + // Small string: inlined. Bytes beyond size are zeroed + BinaryViewType::c_type out; + out.inlined = {size, {}}; + memcpy(&out.inlined.data, data, size); + return out; +} + +inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) { + return ToInlineBinaryView(v.data(), static_cast(v.size())); +} + +inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size, + int32_t buffer_index, int32_t offset) { + if (size <= BinaryViewType::kInlineSize) { + return ToInlineBinaryView(data, size); + } + + // Large string: store index/offset. + BinaryViewType::c_type out; + out.ref = {size, {}, buffer_index, offset}; + memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix)); + return out; +} + +inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index, + int32_t offset) { + return ToBinaryView(v.data(), static_cast(v.size()), buffer_index, offset); +} + +template +std::string_view FromBinaryView(const BinaryViewType::c_type& v, + const BufferPtr* data_buffers) { + auto* data = v.is_inline() ? v.inlined.data.data() + : data_buffers[v.ref.buffer_index]->data() + v.ref.offset; + return {reinterpret_cast(data), static_cast(v.size())}; +} +template +std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete; + +template +bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r, + const BufferPtr* l_buffers, const BufferPtr* r_buffers) { + int64_t l_size_and_prefix, r_size_and_prefix; + memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix)); + memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix)); + + if (l_size_and_prefix != r_size_and_prefix) return false; + + if (l.is_inline()) { + // The columnar spec mandates that the inlined part be zero-padded, so we can compare + // a word at a time regardless of the exact size. + int64_t l_inlined, r_inlined; + memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined)); + memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined)); + return l_inlined == r_inlined; + } + + // Sizes are equal and this is not inline, therefore both are out + // of line and have kPrefixSize first in common. + const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset; + const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset; + return memcmp(l_data + BinaryViewType::kPrefixSize, + r_data + BinaryViewType::kPrefixSize, + l.size() - BinaryViewType::kPrefixSize) == 0; +} + +} // namespace arrow::util diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h new file mode 100644 index 0000000000000000000000000000000000000000..73a1ee8600fb4e0be10f26e921083c3be5740490 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h @@ -0,0 +1,570 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { +namespace detail { + +inline uint64_t LoadWord(const uint8_t* bytes) { + return bit_util::ToLittleEndian(util::SafeLoadAs(bytes)); +} + +inline uint64_t ShiftWord(uint64_t current, uint64_t next, int64_t shift) { + if (shift == 0) { + return current; + } + return (current >> shift) | (next << (64 - shift)); +} + +// These templates are here to help with unit tests + +template +constexpr T BitNot(T x) { + return ~x; +} + +template <> +constexpr bool BitNot(bool x) { + return !x; +} + +struct BitBlockAnd { + template + static constexpr T Call(T left, T right) { + return left & right; + } +}; + +struct BitBlockAndNot { + template + static constexpr T Call(T left, T right) { + return left & BitNot(right); + } +}; + +struct BitBlockOr { + template + static constexpr T Call(T left, T right) { + return left | right; + } +}; + +struct BitBlockOrNot { + template + static constexpr T Call(T left, T right) { + return left | BitNot(right); + } +}; + +} // namespace detail + +/// \brief Return value from bit block counters: the total number of bits and +/// the number of set bits. +struct BitBlockCount { + int16_t length; + int16_t popcount; + + bool NoneSet() const { return this->popcount == 0; } + bool AllSet() const { return this->length == this->popcount; } +}; + +/// \brief A class that scans through a true/false bitmap to compute popcounts +/// 64 or 256 bits at a time. This is used to accelerate processing of +/// mostly-not-null array data. +class ARROW_EXPORT BitBlockCounter { + public: + BitBlockCounter(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8), + bits_remaining_(length), + offset_(start_offset % 8) {} + + /// \brief The bit size of each word run + static constexpr int64_t kWordBits = 64; + + /// \brief The bit size of four words run + static constexpr int64_t kFourWordsBits = kWordBits * 4; + + /// \brief Return the next run of available bits, usually 256. The returned + /// pair contains the size of run and the number of true values. The last + /// block will have a length less than 256 if the bitmap length is not a + /// multiple of 256, and will return 0-length blocks in subsequent + /// invocations. + BitBlockCount NextFourWords() { + using detail::LoadWord; + using detail::ShiftWord; + + if (!bits_remaining_) { + return {0, 0}; + } + int64_t total_popcount = 0; + if (offset_ == 0) { + if (bits_remaining_ < kFourWordsBits) { + return GetBlockSlow(kFourWordsBits); + } + total_popcount += bit_util::PopCount(LoadWord(bitmap_)); + total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 8)); + total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 16)); + total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 24)); + } else { + // When the offset is > 0, we need there to be a word beyond the last + // aligned word in the bitmap for the bit shifting logic. + if (bits_remaining_ < 5 * kFourWordsBits - offset_) { + return GetBlockSlow(kFourWordsBits); + } + auto current = LoadWord(bitmap_); + auto next = LoadWord(bitmap_ + 8); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + current = next; + next = LoadWord(bitmap_ + 16); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + current = next; + next = LoadWord(bitmap_ + 24); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + current = next; + next = LoadWord(bitmap_ + 32); + total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_)); + } + bitmap_ += bit_util::BytesForBits(kFourWordsBits); + bits_remaining_ -= kFourWordsBits; + return {256, static_cast(total_popcount)}; + } + + /// \brief Return the next run of available bits, usually 64. The returned + /// pair contains the size of run and the number of true values. The last + /// block will have a length less than 64 if the bitmap length is not a + /// multiple of 64, and will return 0-length blocks in subsequent + /// invocations. + BitBlockCount NextWord() { + using detail::LoadWord; + using detail::ShiftWord; + + if (!bits_remaining_) { + return {0, 0}; + } + int64_t popcount = 0; + if (offset_ == 0) { + if (bits_remaining_ < kWordBits) { + return GetBlockSlow(kWordBits); + } + popcount = bit_util::PopCount(LoadWord(bitmap_)); + } else { + // When the offset is > 0, we need there to be a word beyond the last + // aligned word in the bitmap for the bit shifting logic. + if (bits_remaining_ < 2 * kWordBits - offset_) { + return GetBlockSlow(kWordBits); + } + popcount = bit_util::PopCount( + ShiftWord(LoadWord(bitmap_), LoadWord(bitmap_ + 8), offset_)); + } + bitmap_ += kWordBits / 8; + bits_remaining_ -= kWordBits; + return {64, static_cast(popcount)}; + } + + private: + /// \brief Return block with the requested size when doing word-wise + /// computation is not possible due to inadequate bits remaining. + BitBlockCount GetBlockSlow(int64_t block_size) noexcept; + + const uint8_t* bitmap_; + int64_t bits_remaining_; + int64_t offset_; +}; + +/// \brief A tool to iterate through a possibly nonexistent validity bitmap, +/// to allow us to write one code path for both the with-nulls and no-nulls +/// cases without giving up a lot of performance. +class ARROW_EXPORT OptionalBitBlockCounter { + public: + // validity_bitmap may be NULLPTR + OptionalBitBlockCounter(const uint8_t* validity_bitmap, int64_t offset, int64_t length); + + // validity_bitmap may be null + OptionalBitBlockCounter(const std::shared_ptr& validity_bitmap, int64_t offset, + int64_t length); + + /// Return block count for next word when the bitmap is available otherwise + /// return a block with length up to INT16_MAX when there is no validity + /// bitmap (so all the referenced values are not null). + BitBlockCount NextBlock() { + static constexpr int64_t kMaxBlockSize = std::numeric_limits::max(); + if (has_bitmap_) { + BitBlockCount block = counter_.NextWord(); + position_ += block.length; + return block; + } else { + int16_t block_size = + static_cast(std::min(kMaxBlockSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + + // Like NextBlock, but returns a word-sized block even when there is no + // validity bitmap + BitBlockCount NextWord() { + static constexpr int64_t kWordSize = 64; + if (has_bitmap_) { + BitBlockCount block = counter_.NextWord(); + position_ += block.length; + return block; + } else { + int16_t block_size = static_cast(std::min(kWordSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + + private: + const bool has_bitmap_; + int64_t position_; + int64_t length_; + BitBlockCounter counter_; +}; + +/// \brief A class that computes popcounts on the result of bitwise operations +/// between two bitmaps, 64 bits at a time. A 64-bit word is loaded from each +/// bitmap, then the popcount is computed on e.g. the bitwise-and of the two +/// words. +class ARROW_EXPORT BinaryBitBlockCounter { + public: + BinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, int64_t length) + : left_bitmap_(util::MakeNonNull(left_bitmap) + left_offset / 8), + left_offset_(left_offset % 8), + right_bitmap_(util::MakeNonNull(right_bitmap) + right_offset / 8), + right_offset_(right_offset % 8), + bits_remaining_(length) {} + + /// \brief Return the popcount of the bitwise-and of the next run of + /// available bits, up to 64. The returned pair contains the size of run and + /// the number of true values. The last block will have a length less than 64 + /// if the bitmap length is not a multiple of 64, and will return 0-length + /// blocks in subsequent invocations. + BitBlockCount NextAndWord() { return NextWord(); } + + /// \brief Computes "x & ~y" block for each available run of bits. + BitBlockCount NextAndNotWord() { return NextWord(); } + + /// \brief Computes "x | y" block for each available run of bits. + BitBlockCount NextOrWord() { return NextWord(); } + + /// \brief Computes "x | ~y" block for each available run of bits. + BitBlockCount NextOrNotWord() { return NextWord(); } + + private: + template + BitBlockCount NextWord() { + using detail::LoadWord; + using detail::ShiftWord; + + if (!bits_remaining_) { + return {0, 0}; + } + // When the offset is > 0, we need there to be a word beyond the last aligned + // word in the bitmap for the bit shifting logic. + constexpr int64_t kWordBits = BitBlockCounter::kWordBits; + const int64_t bits_required_to_use_words = + std::max(left_offset_ == 0 ? 64 : 64 + (64 - left_offset_), + right_offset_ == 0 ? 64 : 64 + (64 - right_offset_)); + if (bits_remaining_ < bits_required_to_use_words) { + const int16_t run_length = + static_cast(std::min(bits_remaining_, kWordBits)); + int16_t popcount = 0; + for (int64_t i = 0; i < run_length; ++i) { + if (Op::Call(bit_util::GetBit(left_bitmap_, left_offset_ + i), + bit_util::GetBit(right_bitmap_, right_offset_ + i))) { + ++popcount; + } + } + // This code path should trigger _at most_ 2 times. In the "two times" + // case, the first time the run length will be a multiple of 8. + left_bitmap_ += run_length / 8; + right_bitmap_ += run_length / 8; + bits_remaining_ -= run_length; + return {run_length, popcount}; + } + + int64_t popcount = 0; + if (left_offset_ == 0 && right_offset_ == 0) { + popcount = + bit_util::PopCount(Op::Call(LoadWord(left_bitmap_), LoadWord(right_bitmap_))); + } else { + auto left_word = + ShiftWord(LoadWord(left_bitmap_), LoadWord(left_bitmap_ + 8), left_offset_); + auto right_word = + ShiftWord(LoadWord(right_bitmap_), LoadWord(right_bitmap_ + 8), right_offset_); + popcount = bit_util::PopCount(Op::Call(left_word, right_word)); + } + left_bitmap_ += kWordBits / 8; + right_bitmap_ += kWordBits / 8; + bits_remaining_ -= kWordBits; + return {64, static_cast(popcount)}; + } + + const uint8_t* left_bitmap_; + int64_t left_offset_; + const uint8_t* right_bitmap_; + int64_t right_offset_; + int64_t bits_remaining_; +}; + +class ARROW_EXPORT OptionalBinaryBitBlockCounter { + public: + // Any bitmap may be NULLPTR + OptionalBinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length); + + // Any bitmap may be null + OptionalBinaryBitBlockCounter(const std::shared_ptr& left_bitmap, + int64_t left_offset, + const std::shared_ptr& right_bitmap, + int64_t right_offset, int64_t length); + + BitBlockCount NextAndBlock() { + static constexpr int64_t kMaxBlockSize = std::numeric_limits::max(); + switch (has_bitmap_) { + case HasBitmap::BOTH: { + BitBlockCount block = binary_counter_.NextAndWord(); + position_ += block.length; + return block; + } + case HasBitmap::ONE: { + BitBlockCount block = unary_counter_.NextWord(); + position_ += block.length; + return block; + } + case HasBitmap::NONE: + default: { + const int16_t block_size = + static_cast(std::min(kMaxBlockSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + } + + BitBlockCount NextOrNotBlock() { + static constexpr int64_t kMaxBlockSize = std::numeric_limits::max(); + switch (has_bitmap_) { + case HasBitmap::BOTH: { + BitBlockCount block = binary_counter_.NextOrNotWord(); + position_ += block.length; + return block; + } + case HasBitmap::ONE: { + BitBlockCount block = unary_counter_.NextWord(); + position_ += block.length; + return block; + } + case HasBitmap::NONE: + default: { + const int16_t block_size = + static_cast(std::min(kMaxBlockSize, length_ - position_)); + position_ += block_size; + // All values are non-null + return {block_size, block_size}; + } + } + } + + private: + enum class HasBitmap : int { BOTH, ONE, NONE }; + + const HasBitmap has_bitmap_; + int64_t position_; + int64_t length_; + BitBlockCounter unary_counter_; + BinaryBitBlockCounter binary_counter_; + + static HasBitmap HasBitmapFromBitmaps(bool has_left, bool has_right) { + switch (static_cast(has_left) + static_cast(has_right)) { + case 0: + return HasBitmap::NONE; + case 1: + return HasBitmap::ONE; + default: // 2 + return HasBitmap::BOTH; + } + } +}; + +// Functional-style bit block visitors. + +template +static Status VisitBitBlocks(const uint8_t* bitmap, int64_t offset, int64_t length, + VisitNotNull&& visit_not_null, VisitNull&& visit_null) { + internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length); + int64_t position = 0; + while (position < length) { + internal::BitBlockCount block = bit_counter.NextBlock(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_null()); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(bitmap, offset + position)) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } else { + ARROW_RETURN_NOT_OK(visit_null()); + } + } + } + } + return Status::OK(); +} + +template +static void VisitBitBlocksVoid(const uint8_t* bitmap, int64_t offset, int64_t length, + VisitNotNull&& visit_not_null, VisitNull&& visit_null) { + internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length); + int64_t position = 0; + while (position < length) { + internal::BitBlockCount block = bit_counter.NextBlock(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_not_null(position); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_null(); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(bitmap, offset + position)) { + visit_not_null(position); + } else { + visit_null(); + } + } + } + } +} + +template +static Status VisitTwoBitBlocks(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length, VisitNotNull&& visit_not_null, + VisitNull&& visit_null) { + if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) { + // At most one bitmap is present + if (left_bitmap == NULLPTR) { + return VisitBitBlocks(right_bitmap, right_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } else { + return VisitBitBlocks(left_bitmap, left_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } + } + BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset, + length); + int64_t position = 0; + while (position < length) { + BitBlockCount block = bit_counter.NextAndWord(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + ARROW_RETURN_NOT_OK(visit_null()); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(left_bitmap, left_offset + position) && + bit_util::GetBit(right_bitmap, right_offset + position)) { + ARROW_RETURN_NOT_OK(visit_not_null(position)); + } else { + ARROW_RETURN_NOT_OK(visit_null()); + } + } + } + } + return Status::OK(); +} + +template +static void VisitTwoBitBlocksVoid(const uint8_t* left_bitmap, int64_t left_offset, + const uint8_t* right_bitmap, int64_t right_offset, + int64_t length, VisitNotNull&& visit_not_null, + VisitNull&& visit_null) { + if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) { + // At most one bitmap is present + if (left_bitmap == NULLPTR) { + return VisitBitBlocksVoid(right_bitmap, right_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } else { + return VisitBitBlocksVoid(left_bitmap, left_offset, length, + std::forward(visit_not_null), + std::forward(visit_null)); + } + } + BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset, + length); + int64_t position = 0; + while (position < length) { + BitBlockCount block = bit_counter.NextAndWord(); + if (block.AllSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_not_null(position); + } + } else if (block.NoneSet()) { + for (int64_t i = 0; i < block.length; ++i, ++position) { + visit_null(); + } + } else { + for (int64_t i = 0; i < block.length; ++i, ++position) { + if (bit_util::GetBit(left_bitmap, left_offset + position) && + bit_util::GetBit(right_bitmap, right_offset + position)) { + visit_not_null(position); + } else { + visit_null(); + } + } + } + } +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..a436a50b86fe14f84699cba679f6cac882514c19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h @@ -0,0 +1,515 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_reader.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +struct BitRun { + int64_t length; + // Whether bits are set at this point. + bool set; + + std::string ToString() const { + return std::string("{Length: ") + std::to_string(length) + + ", set=" + std::to_string(set) + "}"; + } +}; + +inline bool operator==(const BitRun& lhs, const BitRun& rhs) { + return lhs.length == rhs.length && lhs.set == rhs.set; +} + +inline bool operator!=(const BitRun& lhs, const BitRun& rhs) { + return lhs.length != rhs.length || lhs.set != rhs.set; +} + +class BitRunReaderLinear { + public: + BitRunReaderLinear(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : reader_(bitmap, start_offset, length) {} + + BitRun NextRun() { + BitRun rl = {/*length=*/0, reader_.IsSet()}; + // Advance while the values are equal and not at the end of list. + while (reader_.position() < reader_.length() && reader_.IsSet() == rl.set) { + rl.length++; + reader_.Next(); + } + return rl; + } + + private: + BitmapReader reader_; +}; + +#if ARROW_LITTLE_ENDIAN +/// A convenience class for counting the number of contiguous set/unset bits +/// in a bitmap. +class ARROW_EXPORT BitRunReader { + public: + /// \brief Constructs new BitRunReader. + /// + /// \param[in] bitmap source data + /// \param[in] start_offset bit offset into the source data + /// \param[in] length number of bits to copy + BitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length); + + /// Returns a new BitRun containing the number of contiguous + /// bits with the same value. length == 0 indicates the + /// end of the bitmap. + BitRun NextRun() { + if (ARROW_PREDICT_FALSE(position_ >= length_)) { + return {/*length=*/0, false}; + } + // This implementation relies on a efficient implementations of + // CountTrailingZeros and assumes that runs are more often then + // not. The logic is to incrementally find the next bit change + // from the current position. This is done by zeroing all + // bits in word_ up to position_ and using the TrailingZeroCount + // to find the index of the next set bit. + + // The runs alternate on each call, so flip the bit. + current_run_bit_set_ = !current_run_bit_set_; + + int64_t start_position = position_; + int64_t start_bit_offset = start_position & 63; + // Invert the word for proper use of CountTrailingZeros and + // clear bits so CountTrailingZeros can do it magic. + word_ = ~word_ & ~bit_util::LeastSignificantBitMask(start_bit_offset); + + // Go forward until the next change from unset to set. + int64_t new_bits = bit_util::CountTrailingZeros(word_) - start_bit_offset; + position_ += new_bits; + + if (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) && + ARROW_PREDICT_TRUE(position_ < length_)) { + // Continue extending position while we can advance an entire word. + // (updates position_ accordingly). + AdvanceUntilChange(); + } + + return {/*length=*/position_ - start_position, current_run_bit_set_}; + } + + private: + void AdvanceUntilChange() { + int64_t new_bits = 0; + do { + // Advance the position of the bitmap for loading. + bitmap_ += sizeof(uint64_t); + LoadNextWord(); + new_bits = bit_util::CountTrailingZeros(word_); + // Continue calculating run length. + position_ += new_bits; + } while (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) && + ARROW_PREDICT_TRUE(position_ < length_) && new_bits > 0); + } + + void LoadNextWord() { return LoadWord(length_ - position_); } + + // Helper method for Loading the next word. + void LoadWord(int64_t bits_remaining) { + word_ = 0; + // we need at least an extra byte in this case. + if (ARROW_PREDICT_TRUE(bits_remaining >= 64)) { + std::memcpy(&word_, bitmap_, 8); + } else { + int64_t bytes_to_load = bit_util::BytesForBits(bits_remaining); + auto word_ptr = reinterpret_cast(&word_); + std::memcpy(word_ptr, bitmap_, bytes_to_load); + // Ensure stoppage at last bit in bitmap by reversing the next higher + // order bit. + bit_util::SetBitTo(word_ptr, bits_remaining, + !bit_util::GetBit(word_ptr, bits_remaining - 1)); + } + + // Two cases: + // 1. For unset, CountTrailingZeros works naturally so we don't + // invert the word. + // 2. Otherwise invert so we can use CountTrailingZeros. + if (current_run_bit_set_) { + word_ = ~word_; + } + } + const uint8_t* bitmap_; + int64_t position_; + int64_t length_; + uint64_t word_; + bool current_run_bit_set_; +}; +#else +using BitRunReader = BitRunReaderLinear; +#endif + +struct SetBitRun { + int64_t position; + int64_t length; + + bool AtEnd() const { return length == 0; } + + std::string ToString() const { + return std::string("{pos=") + std::to_string(position) + + ", len=" + std::to_string(length) + "}"; + } + + bool operator==(const SetBitRun& other) const { + return position == other.position && length == other.length; + } + bool operator!=(const SetBitRun& other) const { + return position != other.position || length != other.length; + } +}; + +template +class BaseSetBitRunReader { + public: + /// \brief Constructs new SetBitRunReader. + /// + /// \param[in] bitmap source data + /// \param[in] start_offset bit offset into the source data + /// \param[in] length number of bits to copy + ARROW_NOINLINE + BaseSetBitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(util::MakeNonNull(bitmap)), + length_(length), + remaining_(length_), + current_word_(0), + current_num_bits_(0) { + if (Reverse) { + bitmap_ += (start_offset + length) / 8; + const int8_t end_bit_offset = static_cast((start_offset + length) % 8); + if (length > 0 && end_bit_offset) { + // Get LSBs from last byte + ++bitmap_; + current_num_bits_ = + std::min(static_cast(length), static_cast(end_bit_offset)); + current_word_ = LoadPartialWord(8 - end_bit_offset, current_num_bits_); + } + } else { + bitmap_ += start_offset / 8; + const int8_t bit_offset = static_cast(start_offset % 8); + if (length > 0 && bit_offset) { + // Get MSBs from first byte + current_num_bits_ = + std::min(static_cast(length), static_cast(8 - bit_offset)); + current_word_ = LoadPartialWord(bit_offset, current_num_bits_); + } + } + } + + ARROW_NOINLINE + SetBitRun NextRun() { + int64_t pos = 0; + int64_t len = 0; + if (current_num_bits_) { + const auto run = FindCurrentRun(); + assert(remaining_ >= 0); + if (run.length && current_num_bits_) { + // The run ends in current_word_ + return AdjustRun(run); + } + pos = run.position; + len = run.length; + } + if (!len) { + // We didn't get any ones in current_word_, so we can skip any zeros + // in the following words + SkipNextZeros(); + if (remaining_ == 0) { + return {0, 0}; + } + assert(current_num_bits_); + pos = position(); + } else if (!current_num_bits_) { + if (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + current_num_bits_ = 64; + } else if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + } else { + // No bits remaining, perhaps we found a run? + return AdjustRun({pos, len}); + } + // If current word starts with a zero, we got a full run + if (!(current_word_ & kFirstBit)) { + return AdjustRun({pos, len}); + } + } + // Current word should now start with a set bit + len += CountNextOnes(); + return AdjustRun({pos, len}); + } + + protected: + int64_t position() const { + if (Reverse) { + return remaining_; + } else { + return length_ - remaining_; + } + } + + SetBitRun AdjustRun(SetBitRun run) { + if (Reverse) { + assert(run.position >= run.length); + run.position -= run.length; + } + return run; + } + + uint64_t LoadFullWord() { + uint64_t word; + if (Reverse) { + bitmap_ -= 8; + } + memcpy(&word, bitmap_, 8); + if (!Reverse) { + bitmap_ += 8; + } + return bit_util::ToLittleEndian(word); + } + + uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) { + assert(num_bits > 0); + uint64_t word = 0; + const int64_t num_bytes = bit_util::BytesForBits(num_bits); + if (Reverse) { + // Read in the most significant bytes of the word + bitmap_ -= num_bytes; + memcpy(reinterpret_cast(&word) + 8 - num_bytes, bitmap_, num_bytes); + // XXX MostSignificantBitmask + return (bit_util::ToLittleEndian(word) << bit_offset) & + ~bit_util::LeastSignificantBitMask(64 - num_bits); + } else { + memcpy(&word, bitmap_, num_bytes); + bitmap_ += num_bytes; + return (bit_util::ToLittleEndian(word) >> bit_offset) & + bit_util::LeastSignificantBitMask(num_bits); + } + } + + void SkipNextZeros() { + assert(current_num_bits_ == 0); + while (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + const auto num_zeros = CountFirstZeros(current_word_); + if (num_zeros < 64) { + // Run of zeros ends here + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ = 64 - num_zeros; + remaining_ -= num_zeros; + assert(remaining_ >= 0); + assert(current_num_bits_ >= 0); + return; + } + remaining_ -= 64; + } + // Run of zeros continues in last bitmap word + if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + const auto num_zeros = + std::min(current_num_bits_, CountFirstZeros(current_word_)); + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ -= num_zeros; + remaining_ -= num_zeros; + assert(remaining_ >= 0); + assert(current_num_bits_ >= 0); + } + } + + int64_t CountNextOnes() { + assert(current_word_ & kFirstBit); + + int64_t len; + if (~current_word_) { + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + remaining_ -= num_ones; + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + if (current_num_bits_) { + // Run of ones ends here + return num_ones; + } + len = num_ones; + } else { + // current_word_ is all ones + remaining_ -= 64; + current_num_bits_ = 0; + len = 64; + } + + while (ARROW_PREDICT_TRUE(remaining_ >= 64)) { + current_word_ = LoadFullWord(); + const auto num_ones = CountFirstZeros(~current_word_); + len += num_ones; + remaining_ -= num_ones; + if (num_ones < 64) { + // Run of ones ends here + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ = 64 - num_ones; + return len; + } + } + // Run of ones continues in last bitmap word + if (remaining_ > 0) { + current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_); + current_num_bits_ = static_cast(remaining_); + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + remaining_ -= num_ones; + len += num_ones; + } + return len; + } + + SetBitRun FindCurrentRun() { + // Skip any pending zeros + const auto num_zeros = CountFirstZeros(current_word_); + if (num_zeros >= current_num_bits_) { + remaining_ -= current_num_bits_; + current_word_ = 0; + current_num_bits_ = 0; + return {0, 0}; + } + assert(num_zeros <= remaining_); + current_word_ = ConsumeBits(current_word_, num_zeros); + current_num_bits_ -= num_zeros; + remaining_ -= num_zeros; + const int64_t pos = position(); + // Count any ones + const auto num_ones = CountFirstZeros(~current_word_); + assert(num_ones <= current_num_bits_); + assert(num_ones <= remaining_); + current_word_ = ConsumeBits(current_word_, num_ones); + current_num_bits_ -= num_ones; + remaining_ -= num_ones; + return {pos, num_ones}; + } + + inline int CountFirstZeros(uint64_t word); + inline uint64_t ConsumeBits(uint64_t word, int32_t num_bits); + + const uint8_t* bitmap_; + const int64_t length_; + int64_t remaining_; + uint64_t current_word_; + int32_t current_num_bits_; + + static constexpr uint64_t kFirstBit = Reverse ? 0x8000000000000000ULL : 1; +}; + +template <> +inline int BaseSetBitRunReader::CountFirstZeros(uint64_t word) { + return bit_util::CountTrailingZeros(word); +} + +template <> +inline int BaseSetBitRunReader::CountFirstZeros(uint64_t word) { + return bit_util::CountLeadingZeros(word); +} + +template <> +inline uint64_t BaseSetBitRunReader::ConsumeBits(uint64_t word, int32_t num_bits) { + return word >> num_bits; +} + +template <> +inline uint64_t BaseSetBitRunReader::ConsumeBits(uint64_t word, int32_t num_bits) { + return word << num_bits; +} + +using SetBitRunReader = BaseSetBitRunReader; +using ReverseSetBitRunReader = BaseSetBitRunReader; + +// Functional-style bit run visitors. + +// XXX: Try to make this function small so the compiler can inline and optimize +// the `visit` function, which is normally a hot loop with vectorizable code. +// - don't inline SetBitRunReader constructor, it doesn't hurt performance +// - un-inline NextRun hurts 'many null' cases a bit, but improves normal cases +template +inline Status VisitSetBitRuns(const uint8_t* bitmap, int64_t offset, int64_t length, + Visit&& visit) { + if (bitmap == NULLPTR) { + // Assuming all set (as in a null bitmap) + return visit(static_cast(0), static_cast(length)); + } + SetBitRunReader reader(bitmap, offset, length); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + ARROW_RETURN_NOT_OK(visit(run.position, run.length)); + } + return Status::OK(); +} + +template +inline void VisitSetBitRunsVoid(const uint8_t* bitmap, int64_t offset, int64_t length, + Visit&& visit) { + if (bitmap == NULLPTR) { + // Assuming all set (as in a null bitmap) + visit(static_cast(0), static_cast(length)); + return; + } + SetBitRunReader reader(bitmap, offset, length); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + visit(run.position, run.length); + } +} + +template +inline Status VisitSetBitRuns(const std::shared_ptr& bitmap, int64_t offset, + int64_t length, Visit&& visit) { + return VisitSetBitRuns(bitmap ? bitmap->data() : NULLPTR, offset, length, + std::forward(visit)); +} + +template +inline void VisitSetBitRunsVoid(const std::shared_ptr& bitmap, int64_t offset, + int64_t length, Visit&& visit) { + VisitSetBitRunsVoid(bitmap ? bitmap->data() : NULLPTR, offset, length, + std::forward(visit)); +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..811694e43b76c7c57a20c3151b16ac8a0100e49e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h @@ -0,0 +1,529 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// From Apache Impala (incubating) as of 2016-01-29 + +#pragma once + +#include +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/bpacking.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace bit_util { + +/// Utility class to write bit/byte streams. This class can write data to either be +/// bit packed or byte aligned (and a single stream that has a mix of both). +/// This class does not allocate memory. +class BitWriter { + public: + /// buffer: buffer to write bits to. Buffer should be preallocated with + /// 'buffer_len' bytes. + BitWriter(uint8_t* buffer, int buffer_len) : buffer_(buffer), max_bytes_(buffer_len) { + Clear(); + } + + void Clear() { + buffered_values_ = 0; + byte_offset_ = 0; + bit_offset_ = 0; + } + + /// The number of current bytes written, including the current byte (i.e. may include a + /// fraction of a byte). Includes buffered values. + int bytes_written() const { + return byte_offset_ + static_cast(bit_util::BytesForBits(bit_offset_)); + } + uint8_t* buffer() const { return buffer_; } + int buffer_len() const { return max_bytes_; } + + /// Writes a value to buffered_values_, flushing to buffer_ if necessary. This is bit + /// packed. Returns false if there was not enough space. num_bits must be <= 32. + bool PutValue(uint64_t v, int num_bits); + + /// Writes v to the next aligned byte using num_bytes. If T is larger than + /// num_bytes, the extra high-order bytes will be ignored. Returns false if + /// there was not enough space. + /// Assume the v is stored in buffer_ as a little-endian format + template + bool PutAligned(T v, int num_bytes); + + /// Write a Vlq encoded int to the buffer. Returns false if there was not enough + /// room. The value is written byte aligned. + /// For more details on vlq: + /// en.wikipedia.org/wiki/Variable-length_quantity + bool PutVlqInt(uint32_t v); + + // Writes an int zigzag encoded. + bool PutZigZagVlqInt(int32_t v); + + /// Write a Vlq encoded int64 to the buffer. Returns false if there was not enough + /// room. The value is written byte aligned. + /// For more details on vlq: + /// en.wikipedia.org/wiki/Variable-length_quantity + bool PutVlqInt(uint64_t v); + + // Writes an int64 zigzag encoded. + bool PutZigZagVlqInt(int64_t v); + + /// Get a pointer to the next aligned byte and advance the underlying buffer + /// by num_bytes. + /// Returns NULL if there was not enough space. + uint8_t* GetNextBytePtr(int num_bytes = 1); + + /// Flushes all buffered values to the buffer. Call this when done writing to + /// the buffer. If 'align' is true, buffered_values_ is reset and any future + /// writes will be written to the next byte boundary. + void Flush(bool align = false); + + private: + uint8_t* buffer_; + int max_bytes_; + + /// Bit-packed values are initially written to this variable before being memcpy'd to + /// buffer_. This is faster than writing values byte by byte directly to buffer_. + uint64_t buffered_values_; + + int byte_offset_; // Offset in buffer_ + int bit_offset_; // Offset in buffered_values_ +}; + +namespace detail { + +inline uint64_t ReadLittleEndianWord(const uint8_t* buffer, int bytes_remaining) { + uint64_t le_value = 0; + if (ARROW_PREDICT_TRUE(bytes_remaining >= 8)) { + memcpy(&le_value, buffer, 8); + } else { + memcpy(&le_value, buffer, bytes_remaining); + } + return arrow::bit_util::FromLittleEndian(le_value); +} + +} // namespace detail + +/// Utility class to read bit/byte stream. This class can read bits or bytes +/// that are either byte aligned or not. It also has utilities to read multiple +/// bytes in one read (e.g. encoded int). +class BitReader { + public: + BitReader() = default; + + /// 'buffer' is the buffer to read from. The buffer's length is 'buffer_len'. + BitReader(const uint8_t* buffer, int buffer_len) : BitReader() { + Reset(buffer, buffer_len); + } + + void Reset(const uint8_t* buffer, int buffer_len) { + buffer_ = buffer; + max_bytes_ = buffer_len; + byte_offset_ = 0; + bit_offset_ = 0; + buffered_values_ = + detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_); + } + + /// Gets the next value from the buffer. Returns true if 'v' could be read or false if + /// there are not enough bytes left. + template + bool GetValue(int num_bits, T* v); + + /// Get a number of values from the buffer. Return the number of values actually read. + template + int GetBatch(int num_bits, T* v, int batch_size); + + /// Reads a 'num_bytes'-sized value from the buffer and stores it in 'v'. T + /// needs to be a little-endian native type and big enough to store + /// 'num_bytes'. The value is assumed to be byte-aligned so the stream will + /// be advanced to the start of the next byte before 'v' is read. Returns + /// false if there are not enough bytes left. + /// Assume the v was stored in buffer_ as a little-endian format + template + bool GetAligned(int num_bytes, T* v); + + /// Advances the stream by a number of bits. Returns true if succeed or false if there + /// are not enough bits left. + bool Advance(int64_t num_bits); + + /// Reads a vlq encoded int from the stream. The encoded int must start at + /// the beginning of a byte. Return false if there were not enough bytes in + /// the buffer. + bool GetVlqInt(uint32_t* v); + + // Reads a zigzag encoded int `into` v. + bool GetZigZagVlqInt(int32_t* v); + + /// Reads a vlq encoded int64 from the stream. The encoded int must start at + /// the beginning of a byte. Return false if there were not enough bytes in + /// the buffer. + bool GetVlqInt(uint64_t* v); + + // Reads a zigzag encoded int64 `into` v. + bool GetZigZagVlqInt(int64_t* v); + + /// Returns the number of bytes left in the stream, not including the current + /// byte (i.e., there may be an additional fraction of a byte). + int bytes_left() const { + return max_bytes_ - + (byte_offset_ + static_cast(bit_util::BytesForBits(bit_offset_))); + } + + /// Maximum byte length of a vlq encoded int + static constexpr int kMaxVlqByteLength = 5; + + /// Maximum byte length of a vlq encoded int64 + static constexpr int kMaxVlqByteLengthForInt64 = 10; + + private: + const uint8_t* buffer_; + int max_bytes_; + + /// Bytes are memcpy'd from buffer_ and values are read from this variable. This is + /// faster than reading values byte by byte directly from buffer_. + uint64_t buffered_values_; + + int byte_offset_; // Offset in buffer_ + int bit_offset_; // Offset in buffered_values_ +}; + +inline bool BitWriter::PutValue(uint64_t v, int num_bits) { + DCHECK_LE(num_bits, 64); + if (num_bits < 64) { + DCHECK_EQ(v >> num_bits, 0) << "v = " << v << ", num_bits = " << num_bits; + } + + if (ARROW_PREDICT_FALSE(byte_offset_ * 8 + bit_offset_ + num_bits > max_bytes_ * 8)) + return false; + + buffered_values_ |= v << bit_offset_; + bit_offset_ += num_bits; + + if (ARROW_PREDICT_FALSE(bit_offset_ >= 64)) { + // Flush buffered_values_ and write out bits of v that did not fit + buffered_values_ = arrow::bit_util::ToLittleEndian(buffered_values_); + memcpy(buffer_ + byte_offset_, &buffered_values_, 8); + buffered_values_ = 0; + byte_offset_ += 8; + bit_offset_ -= 64; + buffered_values_ = + (num_bits - bit_offset_ == 64) ? 0 : (v >> (num_bits - bit_offset_)); + } + DCHECK_LT(bit_offset_, 64); + return true; +} + +inline void BitWriter::Flush(bool align) { + int num_bytes = static_cast(bit_util::BytesForBits(bit_offset_)); + DCHECK_LE(byte_offset_ + num_bytes, max_bytes_); + auto buffered_values = arrow::bit_util::ToLittleEndian(buffered_values_); + memcpy(buffer_ + byte_offset_, &buffered_values, num_bytes); + + if (align) { + buffered_values_ = 0; + byte_offset_ += num_bytes; + bit_offset_ = 0; + } +} + +inline uint8_t* BitWriter::GetNextBytePtr(int num_bytes) { + Flush(/* align */ true); + DCHECK_LE(byte_offset_, max_bytes_); + if (byte_offset_ + num_bytes > max_bytes_) return NULL; + uint8_t* ptr = buffer_ + byte_offset_; + byte_offset_ += num_bytes; + return ptr; +} + +template +inline bool BitWriter::PutAligned(T val, int num_bytes) { + uint8_t* ptr = GetNextBytePtr(num_bytes); + if (ptr == NULL) return false; + val = arrow::bit_util::ToLittleEndian(val); + memcpy(ptr, &val, num_bytes); + return true; +} + +namespace detail { + +template +inline void GetValue_(int num_bits, T* v, int max_bytes, const uint8_t* buffer, + int* bit_offset, int* byte_offset, uint64_t* buffered_values) { +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4800) +#endif + *v = static_cast(bit_util::TrailingBits(*buffered_values, *bit_offset + num_bits) >> + *bit_offset); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + *bit_offset += num_bits; + if (*bit_offset >= 64) { + *byte_offset += 8; + *bit_offset -= 64; + + *buffered_values = + detail::ReadLittleEndianWord(buffer + *byte_offset, max_bytes - *byte_offset); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4800 4805) +#endif + // Read bits of v that crossed into new buffered_values_ + if (ARROW_PREDICT_TRUE(num_bits - *bit_offset < static_cast(8 * sizeof(T)))) { + // if shift exponent(num_bits - *bit_offset) is not less than sizeof(T), *v will not + // change and the following code may cause a runtime error that the shift exponent + // is too large + *v = *v | static_cast(bit_util::TrailingBits(*buffered_values, *bit_offset) + << (num_bits - *bit_offset)); + } +#ifdef _MSC_VER +#pragma warning(pop) +#endif + DCHECK_LE(*bit_offset, 64); + } +} + +} // namespace detail + +template +inline bool BitReader::GetValue(int num_bits, T* v) { + return GetBatch(num_bits, v, 1) == 1; +} + +template +inline int BitReader::GetBatch(int num_bits, T* v, int batch_size) { + DCHECK(buffer_ != NULL); + DCHECK_LE(num_bits, static_cast(sizeof(T) * 8)) << "num_bits: " << num_bits; + + int bit_offset = bit_offset_; + int byte_offset = byte_offset_; + uint64_t buffered_values = buffered_values_; + int max_bytes = max_bytes_; + const uint8_t* buffer = buffer_; + + const int64_t needed_bits = num_bits * static_cast(batch_size); + constexpr uint64_t kBitsPerByte = 8; + const int64_t remaining_bits = + static_cast(max_bytes - byte_offset) * kBitsPerByte - bit_offset; + if (remaining_bits < needed_bits) { + batch_size = static_cast(remaining_bits / num_bits); + } + + int i = 0; + if (ARROW_PREDICT_FALSE(bit_offset != 0)) { + for (; i < batch_size && bit_offset != 0; ++i) { + detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset, + &buffered_values); + } + } + + if (sizeof(T) == 4) { + int num_unpacked = + internal::unpack32(reinterpret_cast(buffer + byte_offset), + reinterpret_cast(v + i), batch_size - i, num_bits); + i += num_unpacked; + byte_offset += num_unpacked * num_bits / 8; + } else if (sizeof(T) == 8 && num_bits > 32) { + // Use unpack64 only if num_bits is larger than 32 + // TODO (ARROW-13677): improve the performance of internal::unpack64 + // and remove the restriction of num_bits + int num_unpacked = + internal::unpack64(buffer + byte_offset, reinterpret_cast(v + i), + batch_size - i, num_bits); + i += num_unpacked; + byte_offset += num_unpacked * num_bits / 8; + } else { + // TODO: revisit this limit if necessary + DCHECK_LE(num_bits, 32); + const int buffer_size = 1024; + uint32_t unpack_buffer[buffer_size]; + while (i < batch_size) { + int unpack_size = std::min(buffer_size, batch_size - i); + int num_unpacked = + internal::unpack32(reinterpret_cast(buffer + byte_offset), + unpack_buffer, unpack_size, num_bits); + if (num_unpacked == 0) { + break; + } + for (int k = 0; k < num_unpacked; ++k) { +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4800) +#endif + v[i + k] = static_cast(unpack_buffer[k]); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + } + i += num_unpacked; + byte_offset += num_unpacked * num_bits / 8; + } + } + + buffered_values = + detail::ReadLittleEndianWord(buffer + byte_offset, max_bytes - byte_offset); + + for (; i < batch_size; ++i) { + detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset, + &buffered_values); + } + + bit_offset_ = bit_offset; + byte_offset_ = byte_offset; + buffered_values_ = buffered_values; + + return batch_size; +} + +template +inline bool BitReader::GetAligned(int num_bytes, T* v) { + if (ARROW_PREDICT_FALSE(num_bytes > static_cast(sizeof(T)))) { + return false; + } + + int bytes_read = static_cast(bit_util::BytesForBits(bit_offset_)); + if (ARROW_PREDICT_FALSE(byte_offset_ + bytes_read + num_bytes > max_bytes_)) { + return false; + } + + // Advance byte_offset to next unread byte and read num_bytes + byte_offset_ += bytes_read; + if constexpr (std::is_same_v) { + // ARROW-18031: if we're trying to get an aligned bool, just check + // the LSB of the next byte and move on. If we memcpy + FromLittleEndian + // as usual, we have potential undefined behavior for bools if the value + // isn't 0 or 1 + *v = *(buffer_ + byte_offset_) & 1; + } else { + memcpy(v, buffer_ + byte_offset_, num_bytes); + *v = arrow::bit_util::FromLittleEndian(*v); + } + byte_offset_ += num_bytes; + + bit_offset_ = 0; + buffered_values_ = + detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_); + return true; +} + +inline bool BitReader::Advance(int64_t num_bits) { + int64_t bits_required = bit_offset_ + num_bits; + int64_t bytes_required = bit_util::BytesForBits(bits_required); + if (ARROW_PREDICT_FALSE(bytes_required > max_bytes_ - byte_offset_)) { + return false; + } + byte_offset_ += static_cast(bits_required >> 3); + bit_offset_ = static_cast(bits_required & 7); + buffered_values_ = + detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_); + return true; +} + +inline bool BitWriter::PutVlqInt(uint32_t v) { + bool result = true; + while ((v & 0xFFFFFF80UL) != 0UL) { + result &= PutAligned(static_cast((v & 0x7F) | 0x80), 1); + v >>= 7; + } + result &= PutAligned(static_cast(v & 0x7F), 1); + return result; +} + +inline bool BitReader::GetVlqInt(uint32_t* v) { + uint32_t tmp = 0; + + for (int i = 0; i < kMaxVlqByteLength; i++) { + uint8_t byte = 0; + if (ARROW_PREDICT_FALSE(!GetAligned(1, &byte))) { + return false; + } + tmp |= static_cast(byte & 0x7F) << (7 * i); + + if ((byte & 0x80) == 0) { + *v = tmp; + return true; + } + } + + return false; +} + +inline bool BitWriter::PutZigZagVlqInt(int32_t v) { + uint32_t u_v = ::arrow::util::SafeCopy(v); + u_v = (u_v << 1) ^ static_cast(v >> 31); + return PutVlqInt(u_v); +} + +inline bool BitReader::GetZigZagVlqInt(int32_t* v) { + uint32_t u; + if (!GetVlqInt(&u)) return false; + u = (u >> 1) ^ (~(u & 1) + 1); + *v = ::arrow::util::SafeCopy(u); + return true; +} + +inline bool BitWriter::PutVlqInt(uint64_t v) { + bool result = true; + while ((v & 0xFFFFFFFFFFFFFF80ULL) != 0ULL) { + result &= PutAligned(static_cast((v & 0x7F) | 0x80), 1); + v >>= 7; + } + result &= PutAligned(static_cast(v & 0x7F), 1); + return result; +} + +inline bool BitReader::GetVlqInt(uint64_t* v) { + uint64_t tmp = 0; + + for (int i = 0; i < kMaxVlqByteLengthForInt64; i++) { + uint8_t byte = 0; + if (ARROW_PREDICT_FALSE(!GetAligned(1, &byte))) { + return false; + } + tmp |= static_cast(byte & 0x7F) << (7 * i); + + if ((byte & 0x80) == 0) { + *v = tmp; + return true; + } + } + + return false; +} + +inline bool BitWriter::PutZigZagVlqInt(int64_t v) { + uint64_t u_v = ::arrow::util::SafeCopy(v); + u_v = (u_v << 1) ^ static_cast(v >> 63); + return PutVlqInt(u_v); +} + +inline bool BitReader::GetZigZagVlqInt(int64_t* v) { + uint64_t u; + if (!GetVlqInt(&u)) return false; + u = (u >> 1) ^ (~(u & 1) + 1); + *v = ::arrow::util::SafeCopy(u); + return true; +} + +} // namespace bit_util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h new file mode 100644 index 0000000000000000000000000000000000000000..1d3a1dc2459f935e5494743a253a24c5d0b1f197 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h @@ -0,0 +1,370 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_MSC_VER) +#if defined(_M_AMD64) || defined(_M_X64) +#include // IWYU pragma: keep +#include +#endif + +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#define ARROW_POPCOUNT64 __popcnt64 +#define ARROW_POPCOUNT32 __popcnt +#else +#define ARROW_POPCOUNT64 __builtin_popcountll +#define ARROW_POPCOUNT32 __builtin_popcount +#endif + +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace detail { + +template +typename std::make_unsigned::type as_unsigned(Integer x) { + return static_cast::type>(x); +} + +} // namespace detail + +namespace bit_util { + +// The number of set bits in a given unsigned byte value, pre-computed +// +// Generated with the following Python code +// output = 'static constexpr uint8_t kBytePopcount[] = {{{0}}};' +// popcounts = [str(bin(i).count('1')) for i in range(0, 256)] +// print(output.format(', '.join(popcounts))) +static constexpr uint8_t kBytePopcount[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, + 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, + 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, + 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, + 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, + 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, + 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; + +static inline uint64_t PopCount(uint64_t bitmap) { return ARROW_POPCOUNT64(bitmap); } +static inline uint32_t PopCount(uint32_t bitmap) { return ARROW_POPCOUNT32(bitmap); } + +// +// Bit-related computations on integer values +// + +// Returns the ceil of value/divisor +constexpr int64_t CeilDiv(int64_t value, int64_t divisor) { + return (value == 0) ? 0 : 1 + (value - 1) / divisor; +} + +// Return the number of bytes needed to fit the given number of bits +constexpr int64_t BytesForBits(int64_t bits) { + // This formula avoids integer overflow on very large `bits` + return (bits >> 3) + ((bits & 7) != 0); +} + +constexpr bool IsPowerOf2(int64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +constexpr bool IsPowerOf2(uint64_t value) { + return value > 0 && (value & (value - 1)) == 0; +} + +// Returns the smallest power of two that contains v. If v is already a +// power of two, it is returned as is. +static inline int64_t NextPower2(int64_t n) { + // Taken from + // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + n--; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n |= n >> 32; + n++; + return n; +} + +constexpr bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; } + +constexpr bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; } + +// Returns a mask for the bit_index lower order bits. +// Only valid for bit_index in the range [0, 64). +constexpr uint64_t LeastSignificantBitMask(int64_t bit_index) { + return (static_cast(1) << bit_index) - 1; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' +constexpr int64_t RoundUp(int64_t value, int64_t factor) { + return CeilDiv(value, factor) * factor; +} + +// Returns 'value' rounded down to the nearest multiple of 'factor' +constexpr int64_t RoundDown(int64_t value, int64_t factor) { + return (value / factor) * factor; +} + +// Returns 'value' rounded up to the nearest multiple of 'factor' when factor +// is a power of two. +// The result is undefined on overflow, i.e. if `value > 2**64 - factor`, +// since we cannot return the correct result which would be 2**64. +constexpr int64_t RoundUpToPowerOf2(int64_t value, int64_t factor) { + // DCHECK(value >= 0); + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr uint64_t RoundUpToPowerOf2(uint64_t value, uint64_t factor) { + // DCHECK(IsPowerOf2(factor)); + return (value + (factor - 1)) & ~(factor - 1); +} + +constexpr int64_t RoundUpToMultipleOf8(int64_t num) { return RoundUpToPowerOf2(num, 8); } + +constexpr int64_t RoundUpToMultipleOf64(int64_t num) { + return RoundUpToPowerOf2(num, 64); +} + +// Returns the number of bytes covering a sliced bitmap. Find the length +// rounded to cover full bytes on both extremities. +// +// The following example represents a slice (offset=10, length=9) +// +// 0 8 16 24 +// |-------|-------|------| +// [ ] (slice) +// [ ] (same slice aligned to bytes bounds, length=16) +// +// The covering bytes is the length (in bytes) of this new aligned slice. +constexpr int64_t CoveringBytes(int64_t offset, int64_t length) { + return (bit_util::RoundUp(length + offset, 8) - bit_util::RoundDown(offset, 8)) / 8; +} + +// Returns the 'num_bits' least-significant bits of 'v'. +static inline uint64_t TrailingBits(uint64_t v, int num_bits) { + if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0; + if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v; + int n = 64 - num_bits; + return (v << n) >> n; +} + +/// \brief Count the number of leading zeros in an unsigned integer. +static inline int CountLeadingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_clz(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse(&index, static_cast(value))) { // NOLINT + return 31 - static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 32 - bitpos; +#endif +} + +static inline int CountLeadingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_clzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanReverse64(&index, value)) { // NOLINT + return 63 - static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + while (value != 0) { + value >>= 1; + ++bitpos; + } + return 64 - bitpos; +#endif +} + +static inline int CountTrailingZeros(uint32_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 32; + return static_cast(__builtin_ctzl(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward(&index, value)) { + return static_cast(index); + } else { + return 32; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 32; + } + return bitpos; +#endif +} + +static inline int CountTrailingZeros(uint64_t value) { +#if defined(__clang__) || defined(__GNUC__) + if (value == 0) return 64; + return static_cast(__builtin_ctzll(value)); +#elif defined(_MSC_VER) + unsigned long index; // NOLINT + if (_BitScanForward64(&index, value)) { + return static_cast(index); + } else { + return 64; + } +#else + int bitpos = 0; + if (value) { + while (value & 1 == 0) { + value >>= 1; + ++bitpos; + } + } else { + bitpos = 64; + } + return bitpos; +#endif +} + +// Returns the minimum number of bits needed to represent an unsigned value +static inline int NumRequiredBits(uint64_t x) { return 64 - CountLeadingZeros(x); } + +// Returns ceil(log2(x)). +static inline int Log2(uint64_t x) { + // DCHECK_GT(x, 0); + return NumRequiredBits(x - 1); +} + +// +// Utilities for reading and writing individual bits by their index +// in a memory area. +// + +// Bitmask selecting the k-th bit in a byte +static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128}; + +// the bitwise complement version of kBitmask +static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127}; + +// Bitmask selecting the (k - 1) preceding bits in a byte +static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127}; +static constexpr uint8_t kPrecedingWrappingBitmask[] = {255, 1, 3, 7, 15, 31, 63, 127}; + +// the bitwise complement version of kPrecedingBitmask +static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128}; + +static constexpr bool GetBit(const uint8_t* bits, uint64_t i) { + return (bits[i >> 3] >> (i & 0x07)) & 1; +} + +// Gets the i-th bit from a byte. Should only be used with i <= 7. +static constexpr bool GetBitFromByte(uint8_t byte, uint8_t i) { + return byte & kBitmask[i]; +} + +static inline void ClearBit(uint8_t* bits, int64_t i) { + bits[i / 8] &= kFlippedBitmask[i % 8]; +} + +static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; } + +static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) { + // https://graphics.stanford.edu/~seander/bithacks.html + // "Conditionally set or clear bits without branching" + // NOTE: this seems to confuse Valgrind as it reads from potentially + // uninitialized memory + bits[i / 8] ^= static_cast(-static_cast(bit_is_set) ^ bits[i / 8]) & + kBitmask[i % 8]; +} + +/// \brief set or clear a range of bits quickly +ARROW_EXPORT +void SetBitsTo(uint8_t* bits, int64_t start_offset, int64_t length, bool bits_are_set); + +/// \brief Sets all bits in the bitmap to true +ARROW_EXPORT +void SetBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// \brief Clears all bits in the bitmap (set to false) +ARROW_EXPORT +void ClearBitmap(uint8_t* data, int64_t offset, int64_t length); + +/// Returns a mask with lower i bits set to 1. If i >= sizeof(Word)*8, all-ones will be +/// returned +/// ex: +/// ref: https://stackoverflow.com/a/59523400 +template +constexpr Word PrecedingWordBitmask(unsigned int const i) { + return static_cast(static_cast(i < sizeof(Word) * 8) + << (i & (sizeof(Word) * 8 - 1))) - + 1; +} +static_assert(PrecedingWordBitmask(0) == 0x00, ""); +static_assert(PrecedingWordBitmask(4) == 0x0f, ""); +static_assert(PrecedingWordBitmask(8) == 0xff, ""); +static_assert(PrecedingWordBitmask(8) == 0x00ff, ""); + +/// \brief Create a word with low `n` bits from `low` and high `sizeof(Word)-n` bits +/// from `high`. +/// Word ret +/// for (i = 0; i < sizeof(Word)*8; i++){ +/// ret[i]= i < n ? low[i]: high[i]; +/// } +template +constexpr Word SpliceWord(int n, Word low, Word high) { + return (high & ~PrecedingWordBitmask(n)) | (low & PrecedingWordBitmask(n)); +} + +/// \brief Pack integers into a bitmap in batches of 8 +template +void PackBits(const uint32_t* values, uint8_t* out) { + for (int i = 0; i < batch_size / 8; ++i) { + *out++ = static_cast(values[0] | values[1] << 1 | values[2] << 2 | + values[3] << 3 | values[4] << 4 | values[5] << 5 | + values[6] << 6 | values[7] << 7); + values += 8; + } +} + +} // namespace bit_util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h new file mode 100644 index 0000000000000000000000000000000000000000..5bd2ad44140834487b02d5899d3515e7b7eafefc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Generate Bitmap with all position to `value` except for one found +/// at `straggler_pos`. +ARROW_EXPORT +Result> BitmapAllButOne(MemoryPool* pool, int64_t length, + int64_t straggler_pos, bool value = true); + +/// \brief Convert vector of bytes to bitmap buffer +ARROW_EXPORT +Result> BytesToBits(const std::vector&, + MemoryPool* pool = default_memory_pool()); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h new file mode 100644 index 0000000000000000000000000000000000000000..52a1e228e01f1d6c3c37a5e2d49d843f0a4573f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A std::generate() like function to write sequential bits into a bitmap area. +// Bits preceding the bitmap area are preserved, bits following the bitmap +// area may be clobbered. + +template +void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) { + if (length == 0) { + return; + } + uint8_t* cur = bitmap + start_offset / 8; + uint8_t bit_mask = bit_util::kBitmask[start_offset % 8]; + uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8]; + + for (int64_t index = 0; index < length; ++index) { + const bool bit = g(); + current_byte = bit ? (current_byte | bit_mask) : current_byte; + bit_mask = static_cast(bit_mask << 1); + if (bit_mask == 0) { + bit_mask = 1; + *cur++ = current_byte; + current_byte = 0; + } + } + if (bit_mask != 1) { + *cur++ = current_byte; + } +} + +// Like GenerateBits(), but unrolls its main loop for higher performance. + +template +void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length, + Generator&& g) { + static_assert(std::is_same()()), bool>::value, + "Functor passed to GenerateBitsUnrolled must return bool"); + + if (length == 0) { + return; + } + uint8_t current_byte; + uint8_t* cur = bitmap + start_offset / 8; + const uint64_t start_bit_offset = start_offset % 8; + uint8_t bit_mask = bit_util::kBitmask[start_bit_offset]; + int64_t remaining = length; + + if (bit_mask != 0x01) { + current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset]; + while (bit_mask != 0 && remaining > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + --remaining; + } + *cur++ = current_byte; + } + + int64_t remaining_bytes = remaining / 8; + uint8_t out_results[8]; + while (remaining_bytes-- > 0) { + for (int i = 0; i < 8; ++i) { + out_results[i] = g(); + } + *cur++ = static_cast(out_results[0] | out_results[1] << 1 | + out_results[2] << 2 | out_results[3] << 3 | + out_results[4] << 4 | out_results[5] << 5 | + out_results[6] << 6 | out_results[7] << 7); + } + + int64_t remaining_bits = remaining % 8; + if (remaining_bits) { + current_byte = 0; + bit_mask = 0x01; + while (remaining_bits-- > 0) { + current_byte |= g() * bit_mask; + bit_mask = static_cast(bit_mask << 1); + } + *cur++ = current_byte; + } +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..5526c87dbcaf2d6fc69709d6853d7dbbb351f044 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h @@ -0,0 +1,273 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +class BitmapReader { + public: + BitmapReader(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + current_byte_ = 0; + byte_offset_ = start_offset / 8; + bit_offset_ = start_offset % 8; + if (length > 0) { + current_byte_ = bitmap[byte_offset_]; + } + } + + bool IsSet() const { return (current_byte_ & (1 << bit_offset_)) != 0; } + + bool IsNotSet() const { return (current_byte_ & (1 << bit_offset_)) == 0; } + + void Next() { + ++bit_offset_; + ++position_; + if (ARROW_PREDICT_FALSE(bit_offset_ == 8)) { + bit_offset_ = 0; + ++byte_offset_; + if (ARROW_PREDICT_TRUE(position_ < length_)) { + current_byte_ = bitmap_[byte_offset_]; + } + } + } + + int64_t position() const { return position_; } + + int64_t length() const { return length_; } + + private: + const uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + int64_t byte_offset_; + int64_t bit_offset_; +}; + +// XXX Cannot name it BitmapWordReader because the name is already used +// in bitmap_ops.cc + +class BitmapUInt64Reader { + public: + BitmapUInt64Reader(const uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8), + num_carry_bits_(8 - start_offset % 8), + length_(length), + remaining_length_(length_), + carry_bits_(0) { + if (length_ > 0) { + // Load carry bits from the first byte's MSBs + if (length_ >= num_carry_bits_) { + carry_bits_ = + LoadPartialWord(static_cast(8 - num_carry_bits_), num_carry_bits_); + } else { + carry_bits_ = LoadPartialWord(static_cast(8 - num_carry_bits_), length_); + } + } + } + + uint64_t NextWord() { + if (ARROW_PREDICT_TRUE(remaining_length_ >= 64 + num_carry_bits_)) { + // We can load a full word + uint64_t next_word = LoadFullWord(); + // Carry bits come first, then the (64 - num_carry_bits_) LSBs from next_word + uint64_t word = carry_bits_ | (next_word << num_carry_bits_); + carry_bits_ = next_word >> (64 - num_carry_bits_); + remaining_length_ -= 64; + return word; + } else if (remaining_length_ > num_carry_bits_) { + // We can load a partial word + uint64_t next_word = + LoadPartialWord(/*bit_offset=*/0, remaining_length_ - num_carry_bits_); + uint64_t word = carry_bits_ | (next_word << num_carry_bits_); + carry_bits_ = next_word >> (64 - num_carry_bits_); + remaining_length_ = std::max(remaining_length_ - 64, 0); + return word; + } else { + remaining_length_ = 0; + return carry_bits_; + } + } + + int64_t position() const { return length_ - remaining_length_; } + + int64_t length() const { return length_; } + + private: + uint64_t LoadFullWord() { + uint64_t word; + memcpy(&word, bitmap_, 8); + bitmap_ += 8; + return bit_util::ToLittleEndian(word); + } + + uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) { + uint64_t word = 0; + const int64_t num_bytes = bit_util::BytesForBits(num_bits); + memcpy(&word, bitmap_, num_bytes); + bitmap_ += num_bytes; + return (bit_util::ToLittleEndian(word) >> bit_offset) & + bit_util::LeastSignificantBitMask(num_bits); + } + + const uint8_t* bitmap_; + const int64_t num_carry_bits_; // in [1, 8] + const int64_t length_; + int64_t remaining_length_; + uint64_t carry_bits_; +}; + +// BitmapWordReader here is faster than BitmapUInt64Reader (in bitmap_reader.h) +// on sufficiently large inputs. However, it has a larger prolog / epilog overhead +// and should probably not be used for small bitmaps. + +template +class BitmapWordReader { + public: + BitmapWordReader() = default; + BitmapWordReader(const uint8_t* bitmap, int64_t offset, int64_t length) + : offset_(static_cast(may_have_byte_offset) * (offset % 8)), + bitmap_(bitmap + offset / 8), + bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)) { + // decrement word count by one as we may touch two adjacent words in one iteration + nwords_ = length / (sizeof(Word) * 8) - 1; + if (nwords_ < 0) { + nwords_ = 0; + } + trailing_bits_ = static_cast(length - nwords_ * sizeof(Word) * 8); + trailing_bytes_ = static_cast(bit_util::BytesForBits(trailing_bits_)); + + if (nwords_ > 0) { + current_data.word_ = load(bitmap_); + } else if (length > 0) { + current_data.epi.byte_ = load(bitmap_); + } + } + + Word NextWord() { + bitmap_ += sizeof(Word); + const Word next_word = load(bitmap_); + Word word = current_data.word_; + if (may_have_byte_offset && offset_) { + // combine two adjacent words into one word + // |<------ next ----->|<---- current ---->| + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // | | offset + // v v + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // |<------ word ----->| + word >>= offset_; + word |= next_word << (sizeof(Word) * 8 - offset_); + } + current_data.word_ = next_word; + return word; + } + + uint8_t NextTrailingByte(int& valid_bits) { + uint8_t byte; + assert(trailing_bits_ > 0); + + if (trailing_bits_ <= 8) { + // last byte + valid_bits = trailing_bits_; + trailing_bits_ = 0; + byte = 0; + internal::BitmapReader reader(bitmap_, offset_, valid_bits); + for (int i = 0; i < valid_bits; ++i) { + byte >>= 1; + if (reader.IsSet()) { + byte |= 0x80; + } + reader.Next(); + } + byte >>= (8 - valid_bits); + } else { + ++bitmap_; + const uint8_t next_byte = load(bitmap_); + byte = current_data.epi.byte_; + if (may_have_byte_offset && offset_) { + byte >>= offset_; + byte |= next_byte << (8 - offset_); + } + current_data.epi.byte_ = next_byte; + trailing_bits_ -= 8; + trailing_bytes_--; + valid_bits = 8; + } + return byte; + } + + int64_t words() const { return nwords_; } + int trailing_bytes() const { return trailing_bytes_; } + + private: + int64_t offset_; + const uint8_t* bitmap_; + + const uint8_t* bitmap_end_; + int64_t nwords_; + int trailing_bits_; + int trailing_bytes_; + union { + Word word_; + struct { +#if ARROW_LITTLE_ENDIAN == 0 + uint8_t padding_bytes_[sizeof(Word) - 1]; +#endif + uint8_t byte_; + } epi; + } current_data; + + template + DType load(const uint8_t* bitmap) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + return bit_util::ToLittleEndian(util::SafeLoadAs(bitmap)); + } +}; + +/// \brief Index into a possibly nonexistent bitmap +struct OptionalBitIndexer { + const uint8_t* bitmap; + const int64_t offset; + + explicit OptionalBitIndexer(const uint8_t* buffer = NULLPTR, int64_t offset = 0) + : bitmap(buffer), offset(offset) {} + + bool operator[](int64_t i) const { + return bitmap == NULLPTR || bit_util::GetBit(bitmap, offset + i); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..c9ce8012f3eb5a65ec91b1321b687bc0d77f7557 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h @@ -0,0 +1,286 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +class BitmapWriter { + // A sequential bitwise writer that preserves surrounding bit values. + + public: + BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = bitmap[byte_offset_]; + } else { + current_byte_ = 0; + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; } + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + if (ARROW_PREDICT_TRUE(position_ < length_)) { + current_byte_ = bitmap_[byte_offset_]; + } + } + } + + void Finish() { + // Store current byte if we didn't went past bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +class FirstTimeBitmapWriter { + // Like BitmapWriter, but any bit values *following* the bits written + // might be clobbered. It is hence faster than BitmapWriter, and can + // also avoid false positives with Valgrind. + + public: + FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length) + : bitmap_(bitmap), position_(0), length_(length) { + current_byte_ = 0; + byte_offset_ = start_offset / 8; + bit_mask_ = bit_util::kBitmask[start_offset % 8]; + if (length > 0) { + current_byte_ = + bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8]; + } else { + current_byte_ = 0; + } + } + + /// Appends number_of_bits from word to valid_bits and valid_bits_offset. + /// + /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed + /// to be unset (i.e. 0). + /// \param[in] number_of_bits The number of bits to append from word. + void AppendWord(uint64_t word, int64_t number_of_bits) { + if (ARROW_PREDICT_FALSE(number_of_bits == 0)) { + return; + } + + // Location that the first byte needs to be written to. + uint8_t* append_position = bitmap_ + byte_offset_; + + // Update state variables except for current_byte_ here. + position_ += number_of_bits; + int64_t bit_offset = bit_util::CountTrailingZeros(static_cast(bit_mask_)); + bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8]; + byte_offset_ += (bit_offset + number_of_bits) / 8; + + if (bit_offset != 0) { + // We are in the middle of the byte. This code updates the byte and shifts + // bits appropriately within word so it can be memcpy'd below. + int64_t bits_to_carry = 8 - bit_offset; + // Carry over bits from word to current_byte_. We assume any extra bits in word + // unset so no additional accounting is needed for when number_of_bits < + // bits_to_carry. + current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset; + // Check if everything is transferred into current_byte_. + if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) { + return; + } + *append_position = current_byte_; + append_position++; + // Move the carry bits off of word. + word = word >> bits_to_carry; + number_of_bits -= bits_to_carry; + } + word = bit_util::ToLittleEndian(word); + int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits); + std::memcpy(append_position, &word, bytes_for_word); + // At this point, the previous current_byte_ has been written to bitmap_. + // The new current_byte_ is either the last relevant byte in 'word' + // or cleared if the new position is byte aligned (i.e. a fresh byte). + if (bit_mask_ == 0x1) { + current_byte_ = 0; + } else { + current_byte_ = *(append_position + bytes_for_word - 1); + } + } + + void Set() { current_byte_ |= bit_mask_; } + + void Clear() {} + + void Next() { + bit_mask_ = static_cast(bit_mask_ << 1); + ++position_; + if (bit_mask_ == 0) { + // Finished this byte, need advancing + bit_mask_ = 0x01; + bitmap_[byte_offset_++] = current_byte_; + current_byte_ = 0; + } + } + + void Finish() { + // Store current byte if we didn't went go bitmap storage + if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) { + bitmap_[byte_offset_] = current_byte_; + } + } + + int64_t position() const { return position_; } + + private: + uint8_t* bitmap_; + int64_t position_; + int64_t length_; + + uint8_t current_byte_; + uint8_t bit_mask_; + int64_t byte_offset_; +}; + +template +class BitmapWordWriter { + public: + BitmapWordWriter() = default; + BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length) + : offset_(static_cast(may_have_byte_offset) * (offset % 8)), + bitmap_(bitmap + offset / 8), + bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)), + mask_((1U << offset_) - 1) { + if (offset_) { + if (length >= static_cast(sizeof(Word) * 8)) { + current_data.word_ = load(bitmap_); + } else if (length > 0) { + current_data.epi.byte_ = load(bitmap_); + } + } + } + + void PutNextWord(Word word) { + if (may_have_byte_offset && offset_) { + // split one word into two adjacent words, don't touch unused bits + // |<------ word ----->| + // +-----+-------------+ + // | A | B | + // +-----+-------------+ + // | | + // v v offset + // +-------------+-----+-------------+-----+ + // | --- | A | B | --- | + // +-------------+-----+-------------+-----+ + // |<------ next ----->|<---- current ---->| + word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_)); + Word next_word = load(bitmap_ + sizeof(Word)); + current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_); + next_word = (next_word & ~mask_) | (word & mask_); + store(bitmap_, current_data.word_); + store(bitmap_ + sizeof(Word), next_word); + current_data.word_ = next_word; + } else { + store(bitmap_, word); + } + bitmap_ += sizeof(Word); + } + + void PutNextTrailingByte(uint8_t byte, int valid_bits) { + if (valid_bits == 8) { + if (may_have_byte_offset && offset_) { + byte = (byte << offset_) | (byte >> (8 - offset_)); + uint8_t next_byte = load(bitmap_ + 1); + current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_); + next_byte = (next_byte & ~mask_) | (byte & mask_); + store(bitmap_, current_data.epi.byte_); + store(bitmap_ + 1, next_byte); + current_data.epi.byte_ = next_byte; + } else { + store(bitmap_, byte); + } + ++bitmap_; + } else { + assert(valid_bits > 0); + assert(valid_bits < 8); + assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_); + internal::BitmapWriter writer(bitmap_, offset_, valid_bits); + for (int i = 0; i < valid_bits; ++i) { + (byte & 0x01) ? writer.Set() : writer.Clear(); + writer.Next(); + byte >>= 1; + } + writer.Finish(); + } + } + + private: + int64_t offset_; + uint8_t* bitmap_; + + const uint8_t* bitmap_end_; + uint64_t mask_; + union { + Word word_; + struct { +#if ARROW_LITTLE_ENDIAN == 0 + uint8_t padding_bytes_[sizeof(Word) - 1]; +#endif + uint8_t byte_; + } epi; + } current_data; + + template + DType load(const uint8_t* bitmap) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + return bit_util::ToLittleEndian(util::SafeLoadAs(bitmap)); + } + + template + void store(uint8_t* bitmap, DType data) { + assert(bitmap + sizeof(DType) <= bitmap_end_); + util::SafeStore(bitmap, bit_util::FromLittleEndian(data)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h new file mode 100644 index 0000000000000000000000000000000000000000..9b334b3605eeee020a2e717b64f530c5ba82bdcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/compare.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/string_builder.h" +#include "arrow/util/type_traits.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Store a stack of bitsets efficiently. The top bitset may be +/// accessed and its bits may be modified, but it may not be resized. +class BitsetStack { + public: + using reference = typename std::vector::reference; + + /// \brief push a bitset onto the stack + /// \param size number of bits in the next bitset + /// \param value initial value for bits in the pushed bitset + void Push(int size, bool value) { + offsets_.push_back(bit_count()); + bits_.resize(bit_count() + size, value); + } + + /// \brief number of bits in the bitset at the top of the stack + int TopSize() const { + if (offsets_.size() == 0) return 0; + return bit_count() - offsets_.back(); + } + + /// \brief pop a bitset off the stack + void Pop() { + bits_.resize(offsets_.back()); + offsets_.pop_back(); + } + + /// \brief get the value of a bit in the top bitset + /// \param i index of the bit to access + bool operator[](int i) const { return bits_[offsets_.back() + i]; } + + /// \brief get a mutable reference to a bit in the top bitset + /// \param i index of the bit to access + reference operator[](int i) { return bits_[offsets_.back() + i]; } + + private: + int bit_count() const { return static_cast(bits_.size()); } + std::vector bits_; + std::vector offsets_; +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h new file mode 100644 index 0000000000000000000000000000000000000000..dd85c1638c7bfcd9cfd4034fb80ce775aaa92ce9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/endian.h" +#include "arrow/util/visibility.h" + +#include + +namespace arrow { +namespace internal { + +ARROW_EXPORT +int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); +ARROW_EXPORT +int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h new file mode 100644 index 0000000000000000000000000000000000000000..4f45619b2a770e3e6589af03012641ceb833b115 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h @@ -0,0 +1,5642 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This file was generated by script which is modified from its original version in +// GitHub. Original source: +// https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py +// The original copyright notice follows. + +// This code is released under the +// Apache License Version 2.0 http://www.apache.org/licenses/. +// (c) Daniel Lemire 2013 + +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace internal { + +inline const uint8_t* unpack0_64(const uint8_t* in, uint64_t* out) { + for (int k = 0; k < 32; k += 1) { + out[k] = 0; + } + return in; +} + +inline const uint8_t* unpack1_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 1) & mask; + out[2] = (w0 >> 2) & mask; + out[3] = (w0 >> 3) & mask; + out[4] = (w0 >> 4) & mask; + out[5] = (w0 >> 5) & mask; + out[6] = (w0 >> 6) & mask; + out[7] = (w0 >> 7) & mask; + out[8] = (w0 >> 8) & mask; + out[9] = (w0 >> 9) & mask; + out[10] = (w0 >> 10) & mask; + out[11] = (w0 >> 11) & mask; + out[12] = (w0 >> 12) & mask; + out[13] = (w0 >> 13) & mask; + out[14] = (w0 >> 14) & mask; + out[15] = (w0 >> 15) & mask; + out[16] = (w0 >> 16) & mask; + out[17] = (w0 >> 17) & mask; + out[18] = (w0 >> 18) & mask; + out[19] = (w0 >> 19) & mask; + out[20] = (w0 >> 20) & mask; + out[21] = (w0 >> 21) & mask; + out[22] = (w0 >> 22) & mask; + out[23] = (w0 >> 23) & mask; + out[24] = (w0 >> 24) & mask; + out[25] = (w0 >> 25) & mask; + out[26] = (w0 >> 26) & mask; + out[27] = (w0 >> 27) & mask; + out[28] = (w0 >> 28) & mask; + out[29] = (w0 >> 29) & mask; + out[30] = (w0 >> 30) & mask; + out[31] = (w0 >> 31) & mask; + + return in; +} + +inline const uint8_t* unpack2_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 3ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 2) & mask; + out[2] = (w0 >> 4) & mask; + out[3] = (w0 >> 6) & mask; + out[4] = (w0 >> 8) & mask; + out[5] = (w0 >> 10) & mask; + out[6] = (w0 >> 12) & mask; + out[7] = (w0 >> 14) & mask; + out[8] = (w0 >> 16) & mask; + out[9] = (w0 >> 18) & mask; + out[10] = (w0 >> 20) & mask; + out[11] = (w0 >> 22) & mask; + out[12] = (w0 >> 24) & mask; + out[13] = (w0 >> 26) & mask; + out[14] = (w0 >> 28) & mask; + out[15] = (w0 >> 30) & mask; + out[16] = (w0 >> 32) & mask; + out[17] = (w0 >> 34) & mask; + out[18] = (w0 >> 36) & mask; + out[19] = (w0 >> 38) & mask; + out[20] = (w0 >> 40) & mask; + out[21] = (w0 >> 42) & mask; + out[22] = (w0 >> 44) & mask; + out[23] = (w0 >> 46) & mask; + out[24] = (w0 >> 48) & mask; + out[25] = (w0 >> 50) & mask; + out[26] = (w0 >> 52) & mask; + out[27] = (w0 >> 54) & mask; + out[28] = (w0 >> 56) & mask; + out[29] = (w0 >> 58) & mask; + out[30] = (w0 >> 60) & mask; + out[31] = w0 >> 62; + + return in; +} + +inline const uint8_t* unpack3_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 7ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 3) & mask; + out[2] = (w0 >> 6) & mask; + out[3] = (w0 >> 9) & mask; + out[4] = (w0 >> 12) & mask; + out[5] = (w0 >> 15) & mask; + out[6] = (w0 >> 18) & mask; + out[7] = (w0 >> 21) & mask; + out[8] = (w0 >> 24) & mask; + out[9] = (w0 >> 27) & mask; + out[10] = (w0 >> 30) & mask; + out[11] = (w0 >> 33) & mask; + out[12] = (w0 >> 36) & mask; + out[13] = (w0 >> 39) & mask; + out[14] = (w0 >> 42) & mask; + out[15] = (w0 >> 45) & mask; + out[16] = (w0 >> 48) & mask; + out[17] = (w0 >> 51) & mask; + out[18] = (w0 >> 54) & mask; + out[19] = (w0 >> 57) & mask; + out[20] = (w0 >> 60) & mask; + out[21] = ((w0 >> 63) | (w1 << 1)) & mask; + out[22] = (w1 >> 2) & mask; + out[23] = (w1 >> 5) & mask; + out[24] = (w1 >> 8) & mask; + out[25] = (w1 >> 11) & mask; + out[26] = (w1 >> 14) & mask; + out[27] = (w1 >> 17) & mask; + out[28] = (w1 >> 20) & mask; + out[29] = (w1 >> 23) & mask; + out[30] = (w1 >> 26) & mask; + out[31] = (w1 >> 29) & mask; + + return in; +} + +inline const uint8_t* unpack4_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 15ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 4) & mask; + out[2] = (w0 >> 8) & mask; + out[3] = (w0 >> 12) & mask; + out[4] = (w0 >> 16) & mask; + out[5] = (w0 >> 20) & mask; + out[6] = (w0 >> 24) & mask; + out[7] = (w0 >> 28) & mask; + out[8] = (w0 >> 32) & mask; + out[9] = (w0 >> 36) & mask; + out[10] = (w0 >> 40) & mask; + out[11] = (w0 >> 44) & mask; + out[12] = (w0 >> 48) & mask; + out[13] = (w0 >> 52) & mask; + out[14] = (w0 >> 56) & mask; + out[15] = w0 >> 60; + out[16] = (w1)&mask; + out[17] = (w1 >> 4) & mask; + out[18] = (w1 >> 8) & mask; + out[19] = (w1 >> 12) & mask; + out[20] = (w1 >> 16) & mask; + out[21] = (w1 >> 20) & mask; + out[22] = (w1 >> 24) & mask; + out[23] = (w1 >> 28) & mask; + out[24] = (w1 >> 32) & mask; + out[25] = (w1 >> 36) & mask; + out[26] = (w1 >> 40) & mask; + out[27] = (w1 >> 44) & mask; + out[28] = (w1 >> 48) & mask; + out[29] = (w1 >> 52) & mask; + out[30] = (w1 >> 56) & mask; + out[31] = w1 >> 60; + + return in; +} + +inline const uint8_t* unpack5_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 31ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 5) & mask; + out[2] = (w0 >> 10) & mask; + out[3] = (w0 >> 15) & mask; + out[4] = (w0 >> 20) & mask; + out[5] = (w0 >> 25) & mask; + out[6] = (w0 >> 30) & mask; + out[7] = (w0 >> 35) & mask; + out[8] = (w0 >> 40) & mask; + out[9] = (w0 >> 45) & mask; + out[10] = (w0 >> 50) & mask; + out[11] = (w0 >> 55) & mask; + out[12] = ((w0 >> 60) | (w1 << 4)) & mask; + out[13] = (w1 >> 1) & mask; + out[14] = (w1 >> 6) & mask; + out[15] = (w1 >> 11) & mask; + out[16] = (w1 >> 16) & mask; + out[17] = (w1 >> 21) & mask; + out[18] = (w1 >> 26) & mask; + out[19] = (w1 >> 31) & mask; + out[20] = (w1 >> 36) & mask; + out[21] = (w1 >> 41) & mask; + out[22] = (w1 >> 46) & mask; + out[23] = (w1 >> 51) & mask; + out[24] = (w1 >> 56) & mask; + out[25] = ((w1 >> 61) | (w2 << 3)) & mask; + out[26] = (w2 >> 2) & mask; + out[27] = (w2 >> 7) & mask; + out[28] = (w2 >> 12) & mask; + out[29] = (w2 >> 17) & mask; + out[30] = (w2 >> 22) & mask; + out[31] = (w2 >> 27) & mask; + + return in; +} + +inline const uint8_t* unpack6_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 63ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 6) & mask; + out[2] = (w0 >> 12) & mask; + out[3] = (w0 >> 18) & mask; + out[4] = (w0 >> 24) & mask; + out[5] = (w0 >> 30) & mask; + out[6] = (w0 >> 36) & mask; + out[7] = (w0 >> 42) & mask; + out[8] = (w0 >> 48) & mask; + out[9] = (w0 >> 54) & mask; + out[10] = ((w0 >> 60) | (w1 << 4)) & mask; + out[11] = (w1 >> 2) & mask; + out[12] = (w1 >> 8) & mask; + out[13] = (w1 >> 14) & mask; + out[14] = (w1 >> 20) & mask; + out[15] = (w1 >> 26) & mask; + out[16] = (w1 >> 32) & mask; + out[17] = (w1 >> 38) & mask; + out[18] = (w1 >> 44) & mask; + out[19] = (w1 >> 50) & mask; + out[20] = (w1 >> 56) & mask; + out[21] = ((w1 >> 62) | (w2 << 2)) & mask; + out[22] = (w2 >> 4) & mask; + out[23] = (w2 >> 10) & mask; + out[24] = (w2 >> 16) & mask; + out[25] = (w2 >> 22) & mask; + out[26] = (w2 >> 28) & mask; + out[27] = (w2 >> 34) & mask; + out[28] = (w2 >> 40) & mask; + out[29] = (w2 >> 46) & mask; + out[30] = (w2 >> 52) & mask; + out[31] = w2 >> 58; + + return in; +} + +inline const uint8_t* unpack7_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 127ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 7) & mask; + out[2] = (w0 >> 14) & mask; + out[3] = (w0 >> 21) & mask; + out[4] = (w0 >> 28) & mask; + out[5] = (w0 >> 35) & mask; + out[6] = (w0 >> 42) & mask; + out[7] = (w0 >> 49) & mask; + out[8] = (w0 >> 56) & mask; + out[9] = ((w0 >> 63) | (w1 << 1)) & mask; + out[10] = (w1 >> 6) & mask; + out[11] = (w1 >> 13) & mask; + out[12] = (w1 >> 20) & mask; + out[13] = (w1 >> 27) & mask; + out[14] = (w1 >> 34) & mask; + out[15] = (w1 >> 41) & mask; + out[16] = (w1 >> 48) & mask; + out[17] = (w1 >> 55) & mask; + out[18] = ((w1 >> 62) | (w2 << 2)) & mask; + out[19] = (w2 >> 5) & mask; + out[20] = (w2 >> 12) & mask; + out[21] = (w2 >> 19) & mask; + out[22] = (w2 >> 26) & mask; + out[23] = (w2 >> 33) & mask; + out[24] = (w2 >> 40) & mask; + out[25] = (w2 >> 47) & mask; + out[26] = (w2 >> 54) & mask; + out[27] = ((w2 >> 61) | (w3 << 3)) & mask; + out[28] = (w3 >> 4) & mask; + out[29] = (w3 >> 11) & mask; + out[30] = (w3 >> 18) & mask; + out[31] = (w3 >> 25) & mask; + + return in; +} + +inline const uint8_t* unpack8_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 255ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 8) & mask; + out[2] = (w0 >> 16) & mask; + out[3] = (w0 >> 24) & mask; + out[4] = (w0 >> 32) & mask; + out[5] = (w0 >> 40) & mask; + out[6] = (w0 >> 48) & mask; + out[7] = w0 >> 56; + out[8] = (w1)&mask; + out[9] = (w1 >> 8) & mask; + out[10] = (w1 >> 16) & mask; + out[11] = (w1 >> 24) & mask; + out[12] = (w1 >> 32) & mask; + out[13] = (w1 >> 40) & mask; + out[14] = (w1 >> 48) & mask; + out[15] = w1 >> 56; + out[16] = (w2)&mask; + out[17] = (w2 >> 8) & mask; + out[18] = (w2 >> 16) & mask; + out[19] = (w2 >> 24) & mask; + out[20] = (w2 >> 32) & mask; + out[21] = (w2 >> 40) & mask; + out[22] = (w2 >> 48) & mask; + out[23] = w2 >> 56; + out[24] = (w3)&mask; + out[25] = (w3 >> 8) & mask; + out[26] = (w3 >> 16) & mask; + out[27] = (w3 >> 24) & mask; + out[28] = (w3 >> 32) & mask; + out[29] = (w3 >> 40) & mask; + out[30] = (w3 >> 48) & mask; + out[31] = w3 >> 56; + + return in; +} + +inline const uint8_t* unpack9_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 511ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 9) & mask; + out[2] = (w0 >> 18) & mask; + out[3] = (w0 >> 27) & mask; + out[4] = (w0 >> 36) & mask; + out[5] = (w0 >> 45) & mask; + out[6] = (w0 >> 54) & mask; + out[7] = ((w0 >> 63) | (w1 << 1)) & mask; + out[8] = (w1 >> 8) & mask; + out[9] = (w1 >> 17) & mask; + out[10] = (w1 >> 26) & mask; + out[11] = (w1 >> 35) & mask; + out[12] = (w1 >> 44) & mask; + out[13] = (w1 >> 53) & mask; + out[14] = ((w1 >> 62) | (w2 << 2)) & mask; + out[15] = (w2 >> 7) & mask; + out[16] = (w2 >> 16) & mask; + out[17] = (w2 >> 25) & mask; + out[18] = (w2 >> 34) & mask; + out[19] = (w2 >> 43) & mask; + out[20] = (w2 >> 52) & mask; + out[21] = ((w2 >> 61) | (w3 << 3)) & mask; + out[22] = (w3 >> 6) & mask; + out[23] = (w3 >> 15) & mask; + out[24] = (w3 >> 24) & mask; + out[25] = (w3 >> 33) & mask; + out[26] = (w3 >> 42) & mask; + out[27] = (w3 >> 51) & mask; + out[28] = ((w3 >> 60) | (w4 << 4)) & mask; + out[29] = (w4 >> 5) & mask; + out[30] = (w4 >> 14) & mask; + out[31] = (w4 >> 23) & mask; + + return in; +} + +inline const uint8_t* unpack10_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1023ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 10) & mask; + out[2] = (w0 >> 20) & mask; + out[3] = (w0 >> 30) & mask; + out[4] = (w0 >> 40) & mask; + out[5] = (w0 >> 50) & mask; + out[6] = ((w0 >> 60) | (w1 << 4)) & mask; + out[7] = (w1 >> 6) & mask; + out[8] = (w1 >> 16) & mask; + out[9] = (w1 >> 26) & mask; + out[10] = (w1 >> 36) & mask; + out[11] = (w1 >> 46) & mask; + out[12] = ((w1 >> 56) | (w2 << 8)) & mask; + out[13] = (w2 >> 2) & mask; + out[14] = (w2 >> 12) & mask; + out[15] = (w2 >> 22) & mask; + out[16] = (w2 >> 32) & mask; + out[17] = (w2 >> 42) & mask; + out[18] = (w2 >> 52) & mask; + out[19] = ((w2 >> 62) | (w3 << 2)) & mask; + out[20] = (w3 >> 8) & mask; + out[21] = (w3 >> 18) & mask; + out[22] = (w3 >> 28) & mask; + out[23] = (w3 >> 38) & mask; + out[24] = (w3 >> 48) & mask; + out[25] = ((w3 >> 58) | (w4 << 6)) & mask; + out[26] = (w4 >> 4) & mask; + out[27] = (w4 >> 14) & mask; + out[28] = (w4 >> 24) & mask; + out[29] = (w4 >> 34) & mask; + out[30] = (w4 >> 44) & mask; + out[31] = w4 >> 54; + + return in; +} + +inline const uint8_t* unpack11_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2047ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 11) & mask; + out[2] = (w0 >> 22) & mask; + out[3] = (w0 >> 33) & mask; + out[4] = (w0 >> 44) & mask; + out[5] = ((w0 >> 55) | (w1 << 9)) & mask; + out[6] = (w1 >> 2) & mask; + out[7] = (w1 >> 13) & mask; + out[8] = (w1 >> 24) & mask; + out[9] = (w1 >> 35) & mask; + out[10] = (w1 >> 46) & mask; + out[11] = ((w1 >> 57) | (w2 << 7)) & mask; + out[12] = (w2 >> 4) & mask; + out[13] = (w2 >> 15) & mask; + out[14] = (w2 >> 26) & mask; + out[15] = (w2 >> 37) & mask; + out[16] = (w2 >> 48) & mask; + out[17] = ((w2 >> 59) | (w3 << 5)) & mask; + out[18] = (w3 >> 6) & mask; + out[19] = (w3 >> 17) & mask; + out[20] = (w3 >> 28) & mask; + out[21] = (w3 >> 39) & mask; + out[22] = (w3 >> 50) & mask; + out[23] = ((w3 >> 61) | (w4 << 3)) & mask; + out[24] = (w4 >> 8) & mask; + out[25] = (w4 >> 19) & mask; + out[26] = (w4 >> 30) & mask; + out[27] = (w4 >> 41) & mask; + out[28] = (w4 >> 52) & mask; + out[29] = ((w4 >> 63) | (w5 << 1)) & mask; + out[30] = (w5 >> 10) & mask; + out[31] = (w5 >> 21) & mask; + + return in; +} + +inline const uint8_t* unpack12_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4095ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 12) & mask; + out[2] = (w0 >> 24) & mask; + out[3] = (w0 >> 36) & mask; + out[4] = (w0 >> 48) & mask; + out[5] = ((w0 >> 60) | (w1 << 4)) & mask; + out[6] = (w1 >> 8) & mask; + out[7] = (w1 >> 20) & mask; + out[8] = (w1 >> 32) & mask; + out[9] = (w1 >> 44) & mask; + out[10] = ((w1 >> 56) | (w2 << 8)) & mask; + out[11] = (w2 >> 4) & mask; + out[12] = (w2 >> 16) & mask; + out[13] = (w2 >> 28) & mask; + out[14] = (w2 >> 40) & mask; + out[15] = w2 >> 52; + out[16] = (w3)&mask; + out[17] = (w3 >> 12) & mask; + out[18] = (w3 >> 24) & mask; + out[19] = (w3 >> 36) & mask; + out[20] = (w3 >> 48) & mask; + out[21] = ((w3 >> 60) | (w4 << 4)) & mask; + out[22] = (w4 >> 8) & mask; + out[23] = (w4 >> 20) & mask; + out[24] = (w4 >> 32) & mask; + out[25] = (w4 >> 44) & mask; + out[26] = ((w4 >> 56) | (w5 << 8)) & mask; + out[27] = (w5 >> 4) & mask; + out[28] = (w5 >> 16) & mask; + out[29] = (w5 >> 28) & mask; + out[30] = (w5 >> 40) & mask; + out[31] = w5 >> 52; + + return in; +} + +inline const uint8_t* unpack13_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8191ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 13) & mask; + out[2] = (w0 >> 26) & mask; + out[3] = (w0 >> 39) & mask; + out[4] = ((w0 >> 52) | (w1 << 12)) & mask; + out[5] = (w1 >> 1) & mask; + out[6] = (w1 >> 14) & mask; + out[7] = (w1 >> 27) & mask; + out[8] = (w1 >> 40) & mask; + out[9] = ((w1 >> 53) | (w2 << 11)) & mask; + out[10] = (w2 >> 2) & mask; + out[11] = (w2 >> 15) & mask; + out[12] = (w2 >> 28) & mask; + out[13] = (w2 >> 41) & mask; + out[14] = ((w2 >> 54) | (w3 << 10)) & mask; + out[15] = (w3 >> 3) & mask; + out[16] = (w3 >> 16) & mask; + out[17] = (w3 >> 29) & mask; + out[18] = (w3 >> 42) & mask; + out[19] = ((w3 >> 55) | (w4 << 9)) & mask; + out[20] = (w4 >> 4) & mask; + out[21] = (w4 >> 17) & mask; + out[22] = (w4 >> 30) & mask; + out[23] = (w4 >> 43) & mask; + out[24] = ((w4 >> 56) | (w5 << 8)) & mask; + out[25] = (w5 >> 5) & mask; + out[26] = (w5 >> 18) & mask; + out[27] = (w5 >> 31) & mask; + out[28] = (w5 >> 44) & mask; + out[29] = ((w5 >> 57) | (w6 << 7)) & mask; + out[30] = (w6 >> 6) & mask; + out[31] = (w6 >> 19) & mask; + + return in; +} + +inline const uint8_t* unpack14_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 16383ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 14) & mask; + out[2] = (w0 >> 28) & mask; + out[3] = (w0 >> 42) & mask; + out[4] = ((w0 >> 56) | (w1 << 8)) & mask; + out[5] = (w1 >> 6) & mask; + out[6] = (w1 >> 20) & mask; + out[7] = (w1 >> 34) & mask; + out[8] = (w1 >> 48) & mask; + out[9] = ((w1 >> 62) | (w2 << 2)) & mask; + out[10] = (w2 >> 12) & mask; + out[11] = (w2 >> 26) & mask; + out[12] = (w2 >> 40) & mask; + out[13] = ((w2 >> 54) | (w3 << 10)) & mask; + out[14] = (w3 >> 4) & mask; + out[15] = (w3 >> 18) & mask; + out[16] = (w3 >> 32) & mask; + out[17] = (w3 >> 46) & mask; + out[18] = ((w3 >> 60) | (w4 << 4)) & mask; + out[19] = (w4 >> 10) & mask; + out[20] = (w4 >> 24) & mask; + out[21] = (w4 >> 38) & mask; + out[22] = ((w4 >> 52) | (w5 << 12)) & mask; + out[23] = (w5 >> 2) & mask; + out[24] = (w5 >> 16) & mask; + out[25] = (w5 >> 30) & mask; + out[26] = (w5 >> 44) & mask; + out[27] = ((w5 >> 58) | (w6 << 6)) & mask; + out[28] = (w6 >> 8) & mask; + out[29] = (w6 >> 22) & mask; + out[30] = (w6 >> 36) & mask; + out[31] = w6 >> 50; + + return in; +} + +inline const uint8_t* unpack15_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 32767ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 15) & mask; + out[2] = (w0 >> 30) & mask; + out[3] = (w0 >> 45) & mask; + out[4] = ((w0 >> 60) | (w1 << 4)) & mask; + out[5] = (w1 >> 11) & mask; + out[6] = (w1 >> 26) & mask; + out[7] = (w1 >> 41) & mask; + out[8] = ((w1 >> 56) | (w2 << 8)) & mask; + out[9] = (w2 >> 7) & mask; + out[10] = (w2 >> 22) & mask; + out[11] = (w2 >> 37) & mask; + out[12] = ((w2 >> 52) | (w3 << 12)) & mask; + out[13] = (w3 >> 3) & mask; + out[14] = (w3 >> 18) & mask; + out[15] = (w3 >> 33) & mask; + out[16] = (w3 >> 48) & mask; + out[17] = ((w3 >> 63) | (w4 << 1)) & mask; + out[18] = (w4 >> 14) & mask; + out[19] = (w4 >> 29) & mask; + out[20] = (w4 >> 44) & mask; + out[21] = ((w4 >> 59) | (w5 << 5)) & mask; + out[22] = (w5 >> 10) & mask; + out[23] = (w5 >> 25) & mask; + out[24] = (w5 >> 40) & mask; + out[25] = ((w5 >> 55) | (w6 << 9)) & mask; + out[26] = (w6 >> 6) & mask; + out[27] = (w6 >> 21) & mask; + out[28] = (w6 >> 36) & mask; + out[29] = ((w6 >> 51) | (w7 << 13)) & mask; + out[30] = (w7 >> 2) & mask; + out[31] = (w7 >> 17) & mask; + + return in; +} + +inline const uint8_t* unpack16_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 65535ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 16) & mask; + out[2] = (w0 >> 32) & mask; + out[3] = w0 >> 48; + out[4] = (w1)&mask; + out[5] = (w1 >> 16) & mask; + out[6] = (w1 >> 32) & mask; + out[7] = w1 >> 48; + out[8] = (w2)&mask; + out[9] = (w2 >> 16) & mask; + out[10] = (w2 >> 32) & mask; + out[11] = w2 >> 48; + out[12] = (w3)&mask; + out[13] = (w3 >> 16) & mask; + out[14] = (w3 >> 32) & mask; + out[15] = w3 >> 48; + out[16] = (w4)&mask; + out[17] = (w4 >> 16) & mask; + out[18] = (w4 >> 32) & mask; + out[19] = w4 >> 48; + out[20] = (w5)&mask; + out[21] = (w5 >> 16) & mask; + out[22] = (w5 >> 32) & mask; + out[23] = w5 >> 48; + out[24] = (w6)&mask; + out[25] = (w6 >> 16) & mask; + out[26] = (w6 >> 32) & mask; + out[27] = w6 >> 48; + out[28] = (w7)&mask; + out[29] = (w7 >> 16) & mask; + out[30] = (w7 >> 32) & mask; + out[31] = w7 >> 48; + + return in; +} + +inline const uint8_t* unpack17_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 131071ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 17) & mask; + out[2] = (w0 >> 34) & mask; + out[3] = ((w0 >> 51) | (w1 << 13)) & mask; + out[4] = (w1 >> 4) & mask; + out[5] = (w1 >> 21) & mask; + out[6] = (w1 >> 38) & mask; + out[7] = ((w1 >> 55) | (w2 << 9)) & mask; + out[8] = (w2 >> 8) & mask; + out[9] = (w2 >> 25) & mask; + out[10] = (w2 >> 42) & mask; + out[11] = ((w2 >> 59) | (w3 << 5)) & mask; + out[12] = (w3 >> 12) & mask; + out[13] = (w3 >> 29) & mask; + out[14] = (w3 >> 46) & mask; + out[15] = ((w3 >> 63) | (w4 << 1)) & mask; + out[16] = (w4 >> 16) & mask; + out[17] = (w4 >> 33) & mask; + out[18] = ((w4 >> 50) | (w5 << 14)) & mask; + out[19] = (w5 >> 3) & mask; + out[20] = (w5 >> 20) & mask; + out[21] = (w5 >> 37) & mask; + out[22] = ((w5 >> 54) | (w6 << 10)) & mask; + out[23] = (w6 >> 7) & mask; + out[24] = (w6 >> 24) & mask; + out[25] = (w6 >> 41) & mask; + out[26] = ((w6 >> 58) | (w7 << 6)) & mask; + out[27] = (w7 >> 11) & mask; + out[28] = (w7 >> 28) & mask; + out[29] = (w7 >> 45) & mask; + out[30] = ((w7 >> 62) | (w8 << 2)) & mask; + out[31] = (w8 >> 15) & mask; + + return in; +} + +inline const uint8_t* unpack18_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 262143ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 18) & mask; + out[2] = (w0 >> 36) & mask; + out[3] = ((w0 >> 54) | (w1 << 10)) & mask; + out[4] = (w1 >> 8) & mask; + out[5] = (w1 >> 26) & mask; + out[6] = (w1 >> 44) & mask; + out[7] = ((w1 >> 62) | (w2 << 2)) & mask; + out[8] = (w2 >> 16) & mask; + out[9] = (w2 >> 34) & mask; + out[10] = ((w2 >> 52) | (w3 << 12)) & mask; + out[11] = (w3 >> 6) & mask; + out[12] = (w3 >> 24) & mask; + out[13] = (w3 >> 42) & mask; + out[14] = ((w3 >> 60) | (w4 << 4)) & mask; + out[15] = (w4 >> 14) & mask; + out[16] = (w4 >> 32) & mask; + out[17] = ((w4 >> 50) | (w5 << 14)) & mask; + out[18] = (w5 >> 4) & mask; + out[19] = (w5 >> 22) & mask; + out[20] = (w5 >> 40) & mask; + out[21] = ((w5 >> 58) | (w6 << 6)) & mask; + out[22] = (w6 >> 12) & mask; + out[23] = (w6 >> 30) & mask; + out[24] = ((w6 >> 48) | (w7 << 16)) & mask; + out[25] = (w7 >> 2) & mask; + out[26] = (w7 >> 20) & mask; + out[27] = (w7 >> 38) & mask; + out[28] = ((w7 >> 56) | (w8 << 8)) & mask; + out[29] = (w8 >> 10) & mask; + out[30] = (w8 >> 28) & mask; + out[31] = w8 >> 46; + + return in; +} + +inline const uint8_t* unpack19_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 524287ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 19) & mask; + out[2] = (w0 >> 38) & mask; + out[3] = ((w0 >> 57) | (w1 << 7)) & mask; + out[4] = (w1 >> 12) & mask; + out[5] = (w1 >> 31) & mask; + out[6] = ((w1 >> 50) | (w2 << 14)) & mask; + out[7] = (w2 >> 5) & mask; + out[8] = (w2 >> 24) & mask; + out[9] = (w2 >> 43) & mask; + out[10] = ((w2 >> 62) | (w3 << 2)) & mask; + out[11] = (w3 >> 17) & mask; + out[12] = (w3 >> 36) & mask; + out[13] = ((w3 >> 55) | (w4 << 9)) & mask; + out[14] = (w4 >> 10) & mask; + out[15] = (w4 >> 29) & mask; + out[16] = ((w4 >> 48) | (w5 << 16)) & mask; + out[17] = (w5 >> 3) & mask; + out[18] = (w5 >> 22) & mask; + out[19] = (w5 >> 41) & mask; + out[20] = ((w5 >> 60) | (w6 << 4)) & mask; + out[21] = (w6 >> 15) & mask; + out[22] = (w6 >> 34) & mask; + out[23] = ((w6 >> 53) | (w7 << 11)) & mask; + out[24] = (w7 >> 8) & mask; + out[25] = (w7 >> 27) & mask; + out[26] = ((w7 >> 46) | (w8 << 18)) & mask; + out[27] = (w8 >> 1) & mask; + out[28] = (w8 >> 20) & mask; + out[29] = (w8 >> 39) & mask; + out[30] = ((w8 >> 58) | (w9 << 6)) & mask; + out[31] = (w9 >> 13) & mask; + + return in; +} + +inline const uint8_t* unpack20_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1048575ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 20) & mask; + out[2] = (w0 >> 40) & mask; + out[3] = ((w0 >> 60) | (w1 << 4)) & mask; + out[4] = (w1 >> 16) & mask; + out[5] = (w1 >> 36) & mask; + out[6] = ((w1 >> 56) | (w2 << 8)) & mask; + out[7] = (w2 >> 12) & mask; + out[8] = (w2 >> 32) & mask; + out[9] = ((w2 >> 52) | (w3 << 12)) & mask; + out[10] = (w3 >> 8) & mask; + out[11] = (w3 >> 28) & mask; + out[12] = ((w3 >> 48) | (w4 << 16)) & mask; + out[13] = (w4 >> 4) & mask; + out[14] = (w4 >> 24) & mask; + out[15] = w4 >> 44; + out[16] = (w5)&mask; + out[17] = (w5 >> 20) & mask; + out[18] = (w5 >> 40) & mask; + out[19] = ((w5 >> 60) | (w6 << 4)) & mask; + out[20] = (w6 >> 16) & mask; + out[21] = (w6 >> 36) & mask; + out[22] = ((w6 >> 56) | (w7 << 8)) & mask; + out[23] = (w7 >> 12) & mask; + out[24] = (w7 >> 32) & mask; + out[25] = ((w7 >> 52) | (w8 << 12)) & mask; + out[26] = (w8 >> 8) & mask; + out[27] = (w8 >> 28) & mask; + out[28] = ((w8 >> 48) | (w9 << 16)) & mask; + out[29] = (w9 >> 4) & mask; + out[30] = (w9 >> 24) & mask; + out[31] = w9 >> 44; + + return in; +} + +inline const uint8_t* unpack21_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2097151ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 21) & mask; + out[2] = (w0 >> 42) & mask; + out[3] = ((w0 >> 63) | (w1 << 1)) & mask; + out[4] = (w1 >> 20) & mask; + out[5] = (w1 >> 41) & mask; + out[6] = ((w1 >> 62) | (w2 << 2)) & mask; + out[7] = (w2 >> 19) & mask; + out[8] = (w2 >> 40) & mask; + out[9] = ((w2 >> 61) | (w3 << 3)) & mask; + out[10] = (w3 >> 18) & mask; + out[11] = (w3 >> 39) & mask; + out[12] = ((w3 >> 60) | (w4 << 4)) & mask; + out[13] = (w4 >> 17) & mask; + out[14] = (w4 >> 38) & mask; + out[15] = ((w4 >> 59) | (w5 << 5)) & mask; + out[16] = (w5 >> 16) & mask; + out[17] = (w5 >> 37) & mask; + out[18] = ((w5 >> 58) | (w6 << 6)) & mask; + out[19] = (w6 >> 15) & mask; + out[20] = (w6 >> 36) & mask; + out[21] = ((w6 >> 57) | (w7 << 7)) & mask; + out[22] = (w7 >> 14) & mask; + out[23] = (w7 >> 35) & mask; + out[24] = ((w7 >> 56) | (w8 << 8)) & mask; + out[25] = (w8 >> 13) & mask; + out[26] = (w8 >> 34) & mask; + out[27] = ((w8 >> 55) | (w9 << 9)) & mask; + out[28] = (w9 >> 12) & mask; + out[29] = (w9 >> 33) & mask; + out[30] = ((w9 >> 54) | (w10 << 10)) & mask; + out[31] = (w10 >> 11) & mask; + + return in; +} + +inline const uint8_t* unpack22_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4194303ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 22) & mask; + out[2] = ((w0 >> 44) | (w1 << 20)) & mask; + out[3] = (w1 >> 2) & mask; + out[4] = (w1 >> 24) & mask; + out[5] = ((w1 >> 46) | (w2 << 18)) & mask; + out[6] = (w2 >> 4) & mask; + out[7] = (w2 >> 26) & mask; + out[8] = ((w2 >> 48) | (w3 << 16)) & mask; + out[9] = (w3 >> 6) & mask; + out[10] = (w3 >> 28) & mask; + out[11] = ((w3 >> 50) | (w4 << 14)) & mask; + out[12] = (w4 >> 8) & mask; + out[13] = (w4 >> 30) & mask; + out[14] = ((w4 >> 52) | (w5 << 12)) & mask; + out[15] = (w5 >> 10) & mask; + out[16] = (w5 >> 32) & mask; + out[17] = ((w5 >> 54) | (w6 << 10)) & mask; + out[18] = (w6 >> 12) & mask; + out[19] = (w6 >> 34) & mask; + out[20] = ((w6 >> 56) | (w7 << 8)) & mask; + out[21] = (w7 >> 14) & mask; + out[22] = (w7 >> 36) & mask; + out[23] = ((w7 >> 58) | (w8 << 6)) & mask; + out[24] = (w8 >> 16) & mask; + out[25] = (w8 >> 38) & mask; + out[26] = ((w8 >> 60) | (w9 << 4)) & mask; + out[27] = (w9 >> 18) & mask; + out[28] = (w9 >> 40) & mask; + out[29] = ((w9 >> 62) | (w10 << 2)) & mask; + out[30] = (w10 >> 20) & mask; + out[31] = w10 >> 42; + + return in; +} + +inline const uint8_t* unpack23_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8388607ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 23) & mask; + out[2] = ((w0 >> 46) | (w1 << 18)) & mask; + out[3] = (w1 >> 5) & mask; + out[4] = (w1 >> 28) & mask; + out[5] = ((w1 >> 51) | (w2 << 13)) & mask; + out[6] = (w2 >> 10) & mask; + out[7] = (w2 >> 33) & mask; + out[8] = ((w2 >> 56) | (w3 << 8)) & mask; + out[9] = (w3 >> 15) & mask; + out[10] = (w3 >> 38) & mask; + out[11] = ((w3 >> 61) | (w4 << 3)) & mask; + out[12] = (w4 >> 20) & mask; + out[13] = ((w4 >> 43) | (w5 << 21)) & mask; + out[14] = (w5 >> 2) & mask; + out[15] = (w5 >> 25) & mask; + out[16] = ((w5 >> 48) | (w6 << 16)) & mask; + out[17] = (w6 >> 7) & mask; + out[18] = (w6 >> 30) & mask; + out[19] = ((w6 >> 53) | (w7 << 11)) & mask; + out[20] = (w7 >> 12) & mask; + out[21] = (w7 >> 35) & mask; + out[22] = ((w7 >> 58) | (w8 << 6)) & mask; + out[23] = (w8 >> 17) & mask; + out[24] = (w8 >> 40) & mask; + out[25] = ((w8 >> 63) | (w9 << 1)) & mask; + out[26] = (w9 >> 22) & mask; + out[27] = ((w9 >> 45) | (w10 << 19)) & mask; + out[28] = (w10 >> 4) & mask; + out[29] = (w10 >> 27) & mask; + out[30] = ((w10 >> 50) | (w11 << 14)) & mask; + out[31] = (w11 >> 9) & mask; + + return in; +} + +inline const uint8_t* unpack24_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 16777215ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 24) & mask; + out[2] = ((w0 >> 48) | (w1 << 16)) & mask; + out[3] = (w1 >> 8) & mask; + out[4] = (w1 >> 32) & mask; + out[5] = ((w1 >> 56) | (w2 << 8)) & mask; + out[6] = (w2 >> 16) & mask; + out[7] = w2 >> 40; + out[8] = (w3)&mask; + out[9] = (w3 >> 24) & mask; + out[10] = ((w3 >> 48) | (w4 << 16)) & mask; + out[11] = (w4 >> 8) & mask; + out[12] = (w4 >> 32) & mask; + out[13] = ((w4 >> 56) | (w5 << 8)) & mask; + out[14] = (w5 >> 16) & mask; + out[15] = w5 >> 40; + out[16] = (w6)&mask; + out[17] = (w6 >> 24) & mask; + out[18] = ((w6 >> 48) | (w7 << 16)) & mask; + out[19] = (w7 >> 8) & mask; + out[20] = (w7 >> 32) & mask; + out[21] = ((w7 >> 56) | (w8 << 8)) & mask; + out[22] = (w8 >> 16) & mask; + out[23] = w8 >> 40; + out[24] = (w9)&mask; + out[25] = (w9 >> 24) & mask; + out[26] = ((w9 >> 48) | (w10 << 16)) & mask; + out[27] = (w10 >> 8) & mask; + out[28] = (w10 >> 32) & mask; + out[29] = ((w10 >> 56) | (w11 << 8)) & mask; + out[30] = (w11 >> 16) & mask; + out[31] = w11 >> 40; + + return in; +} + +inline const uint8_t* unpack25_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 33554431ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 25) & mask; + out[2] = ((w0 >> 50) | (w1 << 14)) & mask; + out[3] = (w1 >> 11) & mask; + out[4] = (w1 >> 36) & mask; + out[5] = ((w1 >> 61) | (w2 << 3)) & mask; + out[6] = (w2 >> 22) & mask; + out[7] = ((w2 >> 47) | (w3 << 17)) & mask; + out[8] = (w3 >> 8) & mask; + out[9] = (w3 >> 33) & mask; + out[10] = ((w3 >> 58) | (w4 << 6)) & mask; + out[11] = (w4 >> 19) & mask; + out[12] = ((w4 >> 44) | (w5 << 20)) & mask; + out[13] = (w5 >> 5) & mask; + out[14] = (w5 >> 30) & mask; + out[15] = ((w5 >> 55) | (w6 << 9)) & mask; + out[16] = (w6 >> 16) & mask; + out[17] = ((w6 >> 41) | (w7 << 23)) & mask; + out[18] = (w7 >> 2) & mask; + out[19] = (w7 >> 27) & mask; + out[20] = ((w7 >> 52) | (w8 << 12)) & mask; + out[21] = (w8 >> 13) & mask; + out[22] = (w8 >> 38) & mask; + out[23] = ((w8 >> 63) | (w9 << 1)) & mask; + out[24] = (w9 >> 24) & mask; + out[25] = ((w9 >> 49) | (w10 << 15)) & mask; + out[26] = (w10 >> 10) & mask; + out[27] = (w10 >> 35) & mask; + out[28] = ((w10 >> 60) | (w11 << 4)) & mask; + out[29] = (w11 >> 21) & mask; + out[30] = ((w11 >> 46) | (w12 << 18)) & mask; + out[31] = (w12 >> 7) & mask; + + return in; +} + +inline const uint8_t* unpack26_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 67108863ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 26) & mask; + out[2] = ((w0 >> 52) | (w1 << 12)) & mask; + out[3] = (w1 >> 14) & mask; + out[4] = ((w1 >> 40) | (w2 << 24)) & mask; + out[5] = (w2 >> 2) & mask; + out[6] = (w2 >> 28) & mask; + out[7] = ((w2 >> 54) | (w3 << 10)) & mask; + out[8] = (w3 >> 16) & mask; + out[9] = ((w3 >> 42) | (w4 << 22)) & mask; + out[10] = (w4 >> 4) & mask; + out[11] = (w4 >> 30) & mask; + out[12] = ((w4 >> 56) | (w5 << 8)) & mask; + out[13] = (w5 >> 18) & mask; + out[14] = ((w5 >> 44) | (w6 << 20)) & mask; + out[15] = (w6 >> 6) & mask; + out[16] = (w6 >> 32) & mask; + out[17] = ((w6 >> 58) | (w7 << 6)) & mask; + out[18] = (w7 >> 20) & mask; + out[19] = ((w7 >> 46) | (w8 << 18)) & mask; + out[20] = (w8 >> 8) & mask; + out[21] = (w8 >> 34) & mask; + out[22] = ((w8 >> 60) | (w9 << 4)) & mask; + out[23] = (w9 >> 22) & mask; + out[24] = ((w9 >> 48) | (w10 << 16)) & mask; + out[25] = (w10 >> 10) & mask; + out[26] = (w10 >> 36) & mask; + out[27] = ((w10 >> 62) | (w11 << 2)) & mask; + out[28] = (w11 >> 24) & mask; + out[29] = ((w11 >> 50) | (w12 << 14)) & mask; + out[30] = (w12 >> 12) & mask; + out[31] = w12 >> 38; + + return in; +} + +inline const uint8_t* unpack27_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 134217727ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 27) & mask; + out[2] = ((w0 >> 54) | (w1 << 10)) & mask; + out[3] = (w1 >> 17) & mask; + out[4] = ((w1 >> 44) | (w2 << 20)) & mask; + out[5] = (w2 >> 7) & mask; + out[6] = (w2 >> 34) & mask; + out[7] = ((w2 >> 61) | (w3 << 3)) & mask; + out[8] = (w3 >> 24) & mask; + out[9] = ((w3 >> 51) | (w4 << 13)) & mask; + out[10] = (w4 >> 14) & mask; + out[11] = ((w4 >> 41) | (w5 << 23)) & mask; + out[12] = (w5 >> 4) & mask; + out[13] = (w5 >> 31) & mask; + out[14] = ((w5 >> 58) | (w6 << 6)) & mask; + out[15] = (w6 >> 21) & mask; + out[16] = ((w6 >> 48) | (w7 << 16)) & mask; + out[17] = (w7 >> 11) & mask; + out[18] = ((w7 >> 38) | (w8 << 26)) & mask; + out[19] = (w8 >> 1) & mask; + out[20] = (w8 >> 28) & mask; + out[21] = ((w8 >> 55) | (w9 << 9)) & mask; + out[22] = (w9 >> 18) & mask; + out[23] = ((w9 >> 45) | (w10 << 19)) & mask; + out[24] = (w10 >> 8) & mask; + out[25] = (w10 >> 35) & mask; + out[26] = ((w10 >> 62) | (w11 << 2)) & mask; + out[27] = (w11 >> 25) & mask; + out[28] = ((w11 >> 52) | (w12 << 12)) & mask; + out[29] = (w12 >> 15) & mask; + out[30] = ((w12 >> 42) | (w13 << 22)) & mask; + out[31] = (w13 >> 5) & mask; + + return in; +} + +inline const uint8_t* unpack28_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 268435455ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 28) & mask; + out[2] = ((w0 >> 56) | (w1 << 8)) & mask; + out[3] = (w1 >> 20) & mask; + out[4] = ((w1 >> 48) | (w2 << 16)) & mask; + out[5] = (w2 >> 12) & mask; + out[6] = ((w2 >> 40) | (w3 << 24)) & mask; + out[7] = (w3 >> 4) & mask; + out[8] = (w3 >> 32) & mask; + out[9] = ((w3 >> 60) | (w4 << 4)) & mask; + out[10] = (w4 >> 24) & mask; + out[11] = ((w4 >> 52) | (w5 << 12)) & mask; + out[12] = (w5 >> 16) & mask; + out[13] = ((w5 >> 44) | (w6 << 20)) & mask; + out[14] = (w6 >> 8) & mask; + out[15] = w6 >> 36; + out[16] = (w7)&mask; + out[17] = (w7 >> 28) & mask; + out[18] = ((w7 >> 56) | (w8 << 8)) & mask; + out[19] = (w8 >> 20) & mask; + out[20] = ((w8 >> 48) | (w9 << 16)) & mask; + out[21] = (w9 >> 12) & mask; + out[22] = ((w9 >> 40) | (w10 << 24)) & mask; + out[23] = (w10 >> 4) & mask; + out[24] = (w10 >> 32) & mask; + out[25] = ((w10 >> 60) | (w11 << 4)) & mask; + out[26] = (w11 >> 24) & mask; + out[27] = ((w11 >> 52) | (w12 << 12)) & mask; + out[28] = (w12 >> 16) & mask; + out[29] = ((w12 >> 44) | (w13 << 20)) & mask; + out[30] = (w13 >> 8) & mask; + out[31] = w13 >> 36; + + return in; +} + +inline const uint8_t* unpack29_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 536870911ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 29) & mask; + out[2] = ((w0 >> 58) | (w1 << 6)) & mask; + out[3] = (w1 >> 23) & mask; + out[4] = ((w1 >> 52) | (w2 << 12)) & mask; + out[5] = (w2 >> 17) & mask; + out[6] = ((w2 >> 46) | (w3 << 18)) & mask; + out[7] = (w3 >> 11) & mask; + out[8] = ((w3 >> 40) | (w4 << 24)) & mask; + out[9] = (w4 >> 5) & mask; + out[10] = (w4 >> 34) & mask; + out[11] = ((w4 >> 63) | (w5 << 1)) & mask; + out[12] = (w5 >> 28) & mask; + out[13] = ((w5 >> 57) | (w6 << 7)) & mask; + out[14] = (w6 >> 22) & mask; + out[15] = ((w6 >> 51) | (w7 << 13)) & mask; + out[16] = (w7 >> 16) & mask; + out[17] = ((w7 >> 45) | (w8 << 19)) & mask; + out[18] = (w8 >> 10) & mask; + out[19] = ((w8 >> 39) | (w9 << 25)) & mask; + out[20] = (w9 >> 4) & mask; + out[21] = (w9 >> 33) & mask; + out[22] = ((w9 >> 62) | (w10 << 2)) & mask; + out[23] = (w10 >> 27) & mask; + out[24] = ((w10 >> 56) | (w11 << 8)) & mask; + out[25] = (w11 >> 21) & mask; + out[26] = ((w11 >> 50) | (w12 << 14)) & mask; + out[27] = (w12 >> 15) & mask; + out[28] = ((w12 >> 44) | (w13 << 20)) & mask; + out[29] = (w13 >> 9) & mask; + out[30] = ((w13 >> 38) | (w14 << 26)) & mask; + out[31] = (w14 >> 3) & mask; + + return in; +} + +inline const uint8_t* unpack30_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1073741823ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + out[0] = (w0)&mask; + out[1] = (w0 >> 30) & mask; + out[2] = ((w0 >> 60) | (w1 << 4)) & mask; + out[3] = (w1 >> 26) & mask; + out[4] = ((w1 >> 56) | (w2 << 8)) & mask; + out[5] = (w2 >> 22) & mask; + out[6] = ((w2 >> 52) | (w3 << 12)) & mask; + out[7] = (w3 >> 18) & mask; + out[8] = ((w3 >> 48) | (w4 << 16)) & mask; + out[9] = (w4 >> 14) & mask; + out[10] = ((w4 >> 44) | (w5 << 20)) & mask; + out[11] = (w5 >> 10) & mask; + out[12] = ((w5 >> 40) | (w6 << 24)) & mask; + out[13] = (w6 >> 6) & mask; + out[14] = ((w6 >> 36) | (w7 << 28)) & mask; + out[15] = (w7 >> 2) & mask; + out[16] = (w7 >> 32) & mask; + out[17] = ((w7 >> 62) | (w8 << 2)) & mask; + out[18] = (w8 >> 28) & mask; + out[19] = ((w8 >> 58) | (w9 << 6)) & mask; + out[20] = (w9 >> 24) & mask; + out[21] = ((w9 >> 54) | (w10 << 10)) & mask; + out[22] = (w10 >> 20) & mask; + out[23] = ((w10 >> 50) | (w11 << 14)) & mask; + out[24] = (w11 >> 16) & mask; + out[25] = ((w11 >> 46) | (w12 << 18)) & mask; + out[26] = (w12 >> 12) & mask; + out[27] = ((w12 >> 42) | (w13 << 22)) & mask; + out[28] = (w13 >> 8) & mask; + out[29] = ((w13 >> 38) | (w14 << 26)) & mask; + out[30] = (w14 >> 4) & mask; + out[31] = w14 >> 34; + + return in; +} + +inline const uint8_t* unpack31_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2147483647ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 4; + out[0] = (w0)&mask; + out[1] = (w0 >> 31) & mask; + out[2] = ((w0 >> 62) | (w1 << 2)) & mask; + out[3] = (w1 >> 29) & mask; + out[4] = ((w1 >> 60) | (w2 << 4)) & mask; + out[5] = (w2 >> 27) & mask; + out[6] = ((w2 >> 58) | (w3 << 6)) & mask; + out[7] = (w3 >> 25) & mask; + out[8] = ((w3 >> 56) | (w4 << 8)) & mask; + out[9] = (w4 >> 23) & mask; + out[10] = ((w4 >> 54) | (w5 << 10)) & mask; + out[11] = (w5 >> 21) & mask; + out[12] = ((w5 >> 52) | (w6 << 12)) & mask; + out[13] = (w6 >> 19) & mask; + out[14] = ((w6 >> 50) | (w7 << 14)) & mask; + out[15] = (w7 >> 17) & mask; + out[16] = ((w7 >> 48) | (w8 << 16)) & mask; + out[17] = (w8 >> 15) & mask; + out[18] = ((w8 >> 46) | (w9 << 18)) & mask; + out[19] = (w9 >> 13) & mask; + out[20] = ((w9 >> 44) | (w10 << 20)) & mask; + out[21] = (w10 >> 11) & mask; + out[22] = ((w10 >> 42) | (w11 << 22)) & mask; + out[23] = (w11 >> 9) & mask; + out[24] = ((w11 >> 40) | (w12 << 24)) & mask; + out[25] = (w12 >> 7) & mask; + out[26] = ((w12 >> 38) | (w13 << 26)) & mask; + out[27] = (w13 >> 5) & mask; + out[28] = ((w13 >> 36) | (w14 << 28)) & mask; + out[29] = (w14 >> 3) & mask; + out[30] = ((w14 >> 34) | (w15 << 30)) & mask; + out[31] = (w15 >> 1) & mask; + + return in; +} + +inline const uint8_t* unpack32_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4294967295ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + out[0] = (w0)&mask; + out[1] = w0 >> 32; + out[2] = (w1)&mask; + out[3] = w1 >> 32; + out[4] = (w2)&mask; + out[5] = w2 >> 32; + out[6] = (w3)&mask; + out[7] = w3 >> 32; + out[8] = (w4)&mask; + out[9] = w4 >> 32; + out[10] = (w5)&mask; + out[11] = w5 >> 32; + out[12] = (w6)&mask; + out[13] = w6 >> 32; + out[14] = (w7)&mask; + out[15] = w7 >> 32; + out[16] = (w8)&mask; + out[17] = w8 >> 32; + out[18] = (w9)&mask; + out[19] = w9 >> 32; + out[20] = (w10)&mask; + out[21] = w10 >> 32; + out[22] = (w11)&mask; + out[23] = w11 >> 32; + out[24] = (w12)&mask; + out[25] = w12 >> 32; + out[26] = (w13)&mask; + out[27] = w13 >> 32; + out[28] = (w14)&mask; + out[29] = w14 >> 32; + out[30] = (w15)&mask; + out[31] = w15 >> 32; + + return in; +} + +inline const uint8_t* unpack33_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8589934591ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 33) | (w1 << 31)) & mask; + out[2] = (w1 >> 2) & mask; + out[3] = ((w1 >> 35) | (w2 << 29)) & mask; + out[4] = (w2 >> 4) & mask; + out[5] = ((w2 >> 37) | (w3 << 27)) & mask; + out[6] = (w3 >> 6) & mask; + out[7] = ((w3 >> 39) | (w4 << 25)) & mask; + out[8] = (w4 >> 8) & mask; + out[9] = ((w4 >> 41) | (w5 << 23)) & mask; + out[10] = (w5 >> 10) & mask; + out[11] = ((w5 >> 43) | (w6 << 21)) & mask; + out[12] = (w6 >> 12) & mask; + out[13] = ((w6 >> 45) | (w7 << 19)) & mask; + out[14] = (w7 >> 14) & mask; + out[15] = ((w7 >> 47) | (w8 << 17)) & mask; + out[16] = (w8 >> 16) & mask; + out[17] = ((w8 >> 49) | (w9 << 15)) & mask; + out[18] = (w9 >> 18) & mask; + out[19] = ((w9 >> 51) | (w10 << 13)) & mask; + out[20] = (w10 >> 20) & mask; + out[21] = ((w10 >> 53) | (w11 << 11)) & mask; + out[22] = (w11 >> 22) & mask; + out[23] = ((w11 >> 55) | (w12 << 9)) & mask; + out[24] = (w12 >> 24) & mask; + out[25] = ((w12 >> 57) | (w13 << 7)) & mask; + out[26] = (w13 >> 26) & mask; + out[27] = ((w13 >> 59) | (w14 << 5)) & mask; + out[28] = (w14 >> 28) & mask; + out[29] = ((w14 >> 61) | (w15 << 3)) & mask; + out[30] = (w15 >> 30) & mask; + out[31] = ((w15 >> 63) | (w16 << 1)) & mask; + + return in; +} + +inline const uint8_t* unpack34_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 17179869183ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 34) | (w1 << 30)) & mask; + out[2] = (w1 >> 4) & mask; + out[3] = ((w1 >> 38) | (w2 << 26)) & mask; + out[4] = (w2 >> 8) & mask; + out[5] = ((w2 >> 42) | (w3 << 22)) & mask; + out[6] = (w3 >> 12) & mask; + out[7] = ((w3 >> 46) | (w4 << 18)) & mask; + out[8] = (w4 >> 16) & mask; + out[9] = ((w4 >> 50) | (w5 << 14)) & mask; + out[10] = (w5 >> 20) & mask; + out[11] = ((w5 >> 54) | (w6 << 10)) & mask; + out[12] = (w6 >> 24) & mask; + out[13] = ((w6 >> 58) | (w7 << 6)) & mask; + out[14] = (w7 >> 28) & mask; + out[15] = ((w7 >> 62) | (w8 << 2)) & mask; + out[16] = ((w8 >> 32) | (w9 << 32)) & mask; + out[17] = (w9 >> 2) & mask; + out[18] = ((w9 >> 36) | (w10 << 28)) & mask; + out[19] = (w10 >> 6) & mask; + out[20] = ((w10 >> 40) | (w11 << 24)) & mask; + out[21] = (w11 >> 10) & mask; + out[22] = ((w11 >> 44) | (w12 << 20)) & mask; + out[23] = (w12 >> 14) & mask; + out[24] = ((w12 >> 48) | (w13 << 16)) & mask; + out[25] = (w13 >> 18) & mask; + out[26] = ((w13 >> 52) | (w14 << 12)) & mask; + out[27] = (w14 >> 22) & mask; + out[28] = ((w14 >> 56) | (w15 << 8)) & mask; + out[29] = (w15 >> 26) & mask; + out[30] = ((w15 >> 60) | (w16 << 4)) & mask; + out[31] = w16 >> 30; + + return in; +} + +inline const uint8_t* unpack35_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 34359738367ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 35) | (w1 << 29)) & mask; + out[2] = (w1 >> 6) & mask; + out[3] = ((w1 >> 41) | (w2 << 23)) & mask; + out[4] = (w2 >> 12) & mask; + out[5] = ((w2 >> 47) | (w3 << 17)) & mask; + out[6] = (w3 >> 18) & mask; + out[7] = ((w3 >> 53) | (w4 << 11)) & mask; + out[8] = (w4 >> 24) & mask; + out[9] = ((w4 >> 59) | (w5 << 5)) & mask; + out[10] = ((w5 >> 30) | (w6 << 34)) & mask; + out[11] = (w6 >> 1) & mask; + out[12] = ((w6 >> 36) | (w7 << 28)) & mask; + out[13] = (w7 >> 7) & mask; + out[14] = ((w7 >> 42) | (w8 << 22)) & mask; + out[15] = (w8 >> 13) & mask; + out[16] = ((w8 >> 48) | (w9 << 16)) & mask; + out[17] = (w9 >> 19) & mask; + out[18] = ((w9 >> 54) | (w10 << 10)) & mask; + out[19] = (w10 >> 25) & mask; + out[20] = ((w10 >> 60) | (w11 << 4)) & mask; + out[21] = ((w11 >> 31) | (w12 << 33)) & mask; + out[22] = (w12 >> 2) & mask; + out[23] = ((w12 >> 37) | (w13 << 27)) & mask; + out[24] = (w13 >> 8) & mask; + out[25] = ((w13 >> 43) | (w14 << 21)) & mask; + out[26] = (w14 >> 14) & mask; + out[27] = ((w14 >> 49) | (w15 << 15)) & mask; + out[28] = (w15 >> 20) & mask; + out[29] = ((w15 >> 55) | (w16 << 9)) & mask; + out[30] = (w16 >> 26) & mask; + out[31] = ((w16 >> 61) | (w17 << 3)) & mask; + + return in; +} + +inline const uint8_t* unpack36_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 68719476735ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 36) | (w1 << 28)) & mask; + out[2] = (w1 >> 8) & mask; + out[3] = ((w1 >> 44) | (w2 << 20)) & mask; + out[4] = (w2 >> 16) & mask; + out[5] = ((w2 >> 52) | (w3 << 12)) & mask; + out[6] = (w3 >> 24) & mask; + out[7] = ((w3 >> 60) | (w4 << 4)) & mask; + out[8] = ((w4 >> 32) | (w5 << 32)) & mask; + out[9] = (w5 >> 4) & mask; + out[10] = ((w5 >> 40) | (w6 << 24)) & mask; + out[11] = (w6 >> 12) & mask; + out[12] = ((w6 >> 48) | (w7 << 16)) & mask; + out[13] = (w7 >> 20) & mask; + out[14] = ((w7 >> 56) | (w8 << 8)) & mask; + out[15] = w8 >> 28; + out[16] = (w9)&mask; + out[17] = ((w9 >> 36) | (w10 << 28)) & mask; + out[18] = (w10 >> 8) & mask; + out[19] = ((w10 >> 44) | (w11 << 20)) & mask; + out[20] = (w11 >> 16) & mask; + out[21] = ((w11 >> 52) | (w12 << 12)) & mask; + out[22] = (w12 >> 24) & mask; + out[23] = ((w12 >> 60) | (w13 << 4)) & mask; + out[24] = ((w13 >> 32) | (w14 << 32)) & mask; + out[25] = (w14 >> 4) & mask; + out[26] = ((w14 >> 40) | (w15 << 24)) & mask; + out[27] = (w15 >> 12) & mask; + out[28] = ((w15 >> 48) | (w16 << 16)) & mask; + out[29] = (w16 >> 20) & mask; + out[30] = ((w16 >> 56) | (w17 << 8)) & mask; + out[31] = w17 >> 28; + + return in; +} + +inline const uint8_t* unpack37_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 137438953471ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 37) | (w1 << 27)) & mask; + out[2] = (w1 >> 10) & mask; + out[3] = ((w1 >> 47) | (w2 << 17)) & mask; + out[4] = (w2 >> 20) & mask; + out[5] = ((w2 >> 57) | (w3 << 7)) & mask; + out[6] = ((w3 >> 30) | (w4 << 34)) & mask; + out[7] = (w4 >> 3) & mask; + out[8] = ((w4 >> 40) | (w5 << 24)) & mask; + out[9] = (w5 >> 13) & mask; + out[10] = ((w5 >> 50) | (w6 << 14)) & mask; + out[11] = (w6 >> 23) & mask; + out[12] = ((w6 >> 60) | (w7 << 4)) & mask; + out[13] = ((w7 >> 33) | (w8 << 31)) & mask; + out[14] = (w8 >> 6) & mask; + out[15] = ((w8 >> 43) | (w9 << 21)) & mask; + out[16] = (w9 >> 16) & mask; + out[17] = ((w9 >> 53) | (w10 << 11)) & mask; + out[18] = (w10 >> 26) & mask; + out[19] = ((w10 >> 63) | (w11 << 1)) & mask; + out[20] = ((w11 >> 36) | (w12 << 28)) & mask; + out[21] = (w12 >> 9) & mask; + out[22] = ((w12 >> 46) | (w13 << 18)) & mask; + out[23] = (w13 >> 19) & mask; + out[24] = ((w13 >> 56) | (w14 << 8)) & mask; + out[25] = ((w14 >> 29) | (w15 << 35)) & mask; + out[26] = (w15 >> 2) & mask; + out[27] = ((w15 >> 39) | (w16 << 25)) & mask; + out[28] = (w16 >> 12) & mask; + out[29] = ((w16 >> 49) | (w17 << 15)) & mask; + out[30] = (w17 >> 22) & mask; + out[31] = ((w17 >> 59) | (w18 << 5)) & mask; + + return in; +} + +inline const uint8_t* unpack38_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 274877906943ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 38) | (w1 << 26)) & mask; + out[2] = (w1 >> 12) & mask; + out[3] = ((w1 >> 50) | (w2 << 14)) & mask; + out[4] = (w2 >> 24) & mask; + out[5] = ((w2 >> 62) | (w3 << 2)) & mask; + out[6] = ((w3 >> 36) | (w4 << 28)) & mask; + out[7] = (w4 >> 10) & mask; + out[8] = ((w4 >> 48) | (w5 << 16)) & mask; + out[9] = (w5 >> 22) & mask; + out[10] = ((w5 >> 60) | (w6 << 4)) & mask; + out[11] = ((w6 >> 34) | (w7 << 30)) & mask; + out[12] = (w7 >> 8) & mask; + out[13] = ((w7 >> 46) | (w8 << 18)) & mask; + out[14] = (w8 >> 20) & mask; + out[15] = ((w8 >> 58) | (w9 << 6)) & mask; + out[16] = ((w9 >> 32) | (w10 << 32)) & mask; + out[17] = (w10 >> 6) & mask; + out[18] = ((w10 >> 44) | (w11 << 20)) & mask; + out[19] = (w11 >> 18) & mask; + out[20] = ((w11 >> 56) | (w12 << 8)) & mask; + out[21] = ((w12 >> 30) | (w13 << 34)) & mask; + out[22] = (w13 >> 4) & mask; + out[23] = ((w13 >> 42) | (w14 << 22)) & mask; + out[24] = (w14 >> 16) & mask; + out[25] = ((w14 >> 54) | (w15 << 10)) & mask; + out[26] = ((w15 >> 28) | (w16 << 36)) & mask; + out[27] = (w16 >> 2) & mask; + out[28] = ((w16 >> 40) | (w17 << 24)) & mask; + out[29] = (w17 >> 14) & mask; + out[30] = ((w17 >> 52) | (w18 << 12)) & mask; + out[31] = w18 >> 26; + + return in; +} + +inline const uint8_t* unpack39_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 549755813887ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 39) | (w1 << 25)) & mask; + out[2] = (w1 >> 14) & mask; + out[3] = ((w1 >> 53) | (w2 << 11)) & mask; + out[4] = ((w2 >> 28) | (w3 << 36)) & mask; + out[5] = (w3 >> 3) & mask; + out[6] = ((w3 >> 42) | (w4 << 22)) & mask; + out[7] = (w4 >> 17) & mask; + out[8] = ((w4 >> 56) | (w5 << 8)) & mask; + out[9] = ((w5 >> 31) | (w6 << 33)) & mask; + out[10] = (w6 >> 6) & mask; + out[11] = ((w6 >> 45) | (w7 << 19)) & mask; + out[12] = (w7 >> 20) & mask; + out[13] = ((w7 >> 59) | (w8 << 5)) & mask; + out[14] = ((w8 >> 34) | (w9 << 30)) & mask; + out[15] = (w9 >> 9) & mask; + out[16] = ((w9 >> 48) | (w10 << 16)) & mask; + out[17] = (w10 >> 23) & mask; + out[18] = ((w10 >> 62) | (w11 << 2)) & mask; + out[19] = ((w11 >> 37) | (w12 << 27)) & mask; + out[20] = (w12 >> 12) & mask; + out[21] = ((w12 >> 51) | (w13 << 13)) & mask; + out[22] = ((w13 >> 26) | (w14 << 38)) & mask; + out[23] = (w14 >> 1) & mask; + out[24] = ((w14 >> 40) | (w15 << 24)) & mask; + out[25] = (w15 >> 15) & mask; + out[26] = ((w15 >> 54) | (w16 << 10)) & mask; + out[27] = ((w16 >> 29) | (w17 << 35)) & mask; + out[28] = (w17 >> 4) & mask; + out[29] = ((w17 >> 43) | (w18 << 21)) & mask; + out[30] = (w18 >> 18) & mask; + out[31] = ((w18 >> 57) | (w19 << 7)) & mask; + + return in; +} + +inline const uint8_t* unpack40_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1099511627775ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 40) | (w1 << 24)) & mask; + out[2] = (w1 >> 16) & mask; + out[3] = ((w1 >> 56) | (w2 << 8)) & mask; + out[4] = ((w2 >> 32) | (w3 << 32)) & mask; + out[5] = (w3 >> 8) & mask; + out[6] = ((w3 >> 48) | (w4 << 16)) & mask; + out[7] = w4 >> 24; + out[8] = (w5)&mask; + out[9] = ((w5 >> 40) | (w6 << 24)) & mask; + out[10] = (w6 >> 16) & mask; + out[11] = ((w6 >> 56) | (w7 << 8)) & mask; + out[12] = ((w7 >> 32) | (w8 << 32)) & mask; + out[13] = (w8 >> 8) & mask; + out[14] = ((w8 >> 48) | (w9 << 16)) & mask; + out[15] = w9 >> 24; + out[16] = (w10)&mask; + out[17] = ((w10 >> 40) | (w11 << 24)) & mask; + out[18] = (w11 >> 16) & mask; + out[19] = ((w11 >> 56) | (w12 << 8)) & mask; + out[20] = ((w12 >> 32) | (w13 << 32)) & mask; + out[21] = (w13 >> 8) & mask; + out[22] = ((w13 >> 48) | (w14 << 16)) & mask; + out[23] = w14 >> 24; + out[24] = (w15)&mask; + out[25] = ((w15 >> 40) | (w16 << 24)) & mask; + out[26] = (w16 >> 16) & mask; + out[27] = ((w16 >> 56) | (w17 << 8)) & mask; + out[28] = ((w17 >> 32) | (w18 << 32)) & mask; + out[29] = (w18 >> 8) & mask; + out[30] = ((w18 >> 48) | (w19 << 16)) & mask; + out[31] = w19 >> 24; + + return in; +} + +inline const uint8_t* unpack41_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2199023255551ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 41) | (w1 << 23)) & mask; + out[2] = (w1 >> 18) & mask; + out[3] = ((w1 >> 59) | (w2 << 5)) & mask; + out[4] = ((w2 >> 36) | (w3 << 28)) & mask; + out[5] = (w3 >> 13) & mask; + out[6] = ((w3 >> 54) | (w4 << 10)) & mask; + out[7] = ((w4 >> 31) | (w5 << 33)) & mask; + out[8] = (w5 >> 8) & mask; + out[9] = ((w5 >> 49) | (w6 << 15)) & mask; + out[10] = ((w6 >> 26) | (w7 << 38)) & mask; + out[11] = (w7 >> 3) & mask; + out[12] = ((w7 >> 44) | (w8 << 20)) & mask; + out[13] = (w8 >> 21) & mask; + out[14] = ((w8 >> 62) | (w9 << 2)) & mask; + out[15] = ((w9 >> 39) | (w10 << 25)) & mask; + out[16] = (w10 >> 16) & mask; + out[17] = ((w10 >> 57) | (w11 << 7)) & mask; + out[18] = ((w11 >> 34) | (w12 << 30)) & mask; + out[19] = (w12 >> 11) & mask; + out[20] = ((w12 >> 52) | (w13 << 12)) & mask; + out[21] = ((w13 >> 29) | (w14 << 35)) & mask; + out[22] = (w14 >> 6) & mask; + out[23] = ((w14 >> 47) | (w15 << 17)) & mask; + out[24] = ((w15 >> 24) | (w16 << 40)) & mask; + out[25] = (w16 >> 1) & mask; + out[26] = ((w16 >> 42) | (w17 << 22)) & mask; + out[27] = (w17 >> 19) & mask; + out[28] = ((w17 >> 60) | (w18 << 4)) & mask; + out[29] = ((w18 >> 37) | (w19 << 27)) & mask; + out[30] = (w19 >> 14) & mask; + out[31] = ((w19 >> 55) | (w20 << 9)) & mask; + + return in; +} + +inline const uint8_t* unpack42_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4398046511103ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 42) | (w1 << 22)) & mask; + out[2] = (w1 >> 20) & mask; + out[3] = ((w1 >> 62) | (w2 << 2)) & mask; + out[4] = ((w2 >> 40) | (w3 << 24)) & mask; + out[5] = (w3 >> 18) & mask; + out[6] = ((w3 >> 60) | (w4 << 4)) & mask; + out[7] = ((w4 >> 38) | (w5 << 26)) & mask; + out[8] = (w5 >> 16) & mask; + out[9] = ((w5 >> 58) | (w6 << 6)) & mask; + out[10] = ((w6 >> 36) | (w7 << 28)) & mask; + out[11] = (w7 >> 14) & mask; + out[12] = ((w7 >> 56) | (w8 << 8)) & mask; + out[13] = ((w8 >> 34) | (w9 << 30)) & mask; + out[14] = (w9 >> 12) & mask; + out[15] = ((w9 >> 54) | (w10 << 10)) & mask; + out[16] = ((w10 >> 32) | (w11 << 32)) & mask; + out[17] = (w11 >> 10) & mask; + out[18] = ((w11 >> 52) | (w12 << 12)) & mask; + out[19] = ((w12 >> 30) | (w13 << 34)) & mask; + out[20] = (w13 >> 8) & mask; + out[21] = ((w13 >> 50) | (w14 << 14)) & mask; + out[22] = ((w14 >> 28) | (w15 << 36)) & mask; + out[23] = (w15 >> 6) & mask; + out[24] = ((w15 >> 48) | (w16 << 16)) & mask; + out[25] = ((w16 >> 26) | (w17 << 38)) & mask; + out[26] = (w17 >> 4) & mask; + out[27] = ((w17 >> 46) | (w18 << 18)) & mask; + out[28] = ((w18 >> 24) | (w19 << 40)) & mask; + out[29] = (w19 >> 2) & mask; + out[30] = ((w19 >> 44) | (w20 << 20)) & mask; + out[31] = w20 >> 22; + + return in; +} + +inline const uint8_t* unpack43_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 8796093022207ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 43) | (w1 << 21)) & mask; + out[2] = ((w1 >> 22) | (w2 << 42)) & mask; + out[3] = (w2 >> 1) & mask; + out[4] = ((w2 >> 44) | (w3 << 20)) & mask; + out[5] = ((w3 >> 23) | (w4 << 41)) & mask; + out[6] = (w4 >> 2) & mask; + out[7] = ((w4 >> 45) | (w5 << 19)) & mask; + out[8] = ((w5 >> 24) | (w6 << 40)) & mask; + out[9] = (w6 >> 3) & mask; + out[10] = ((w6 >> 46) | (w7 << 18)) & mask; + out[11] = ((w7 >> 25) | (w8 << 39)) & mask; + out[12] = (w8 >> 4) & mask; + out[13] = ((w8 >> 47) | (w9 << 17)) & mask; + out[14] = ((w9 >> 26) | (w10 << 38)) & mask; + out[15] = (w10 >> 5) & mask; + out[16] = ((w10 >> 48) | (w11 << 16)) & mask; + out[17] = ((w11 >> 27) | (w12 << 37)) & mask; + out[18] = (w12 >> 6) & mask; + out[19] = ((w12 >> 49) | (w13 << 15)) & mask; + out[20] = ((w13 >> 28) | (w14 << 36)) & mask; + out[21] = (w14 >> 7) & mask; + out[22] = ((w14 >> 50) | (w15 << 14)) & mask; + out[23] = ((w15 >> 29) | (w16 << 35)) & mask; + out[24] = (w16 >> 8) & mask; + out[25] = ((w16 >> 51) | (w17 << 13)) & mask; + out[26] = ((w17 >> 30) | (w18 << 34)) & mask; + out[27] = (w18 >> 9) & mask; + out[28] = ((w18 >> 52) | (w19 << 12)) & mask; + out[29] = ((w19 >> 31) | (w20 << 33)) & mask; + out[30] = (w20 >> 10) & mask; + out[31] = ((w20 >> 53) | (w21 << 11)) & mask; + + return in; +} + +inline const uint8_t* unpack44_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 17592186044415ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 44) | (w1 << 20)) & mask; + out[2] = ((w1 >> 24) | (w2 << 40)) & mask; + out[3] = (w2 >> 4) & mask; + out[4] = ((w2 >> 48) | (w3 << 16)) & mask; + out[5] = ((w3 >> 28) | (w4 << 36)) & mask; + out[6] = (w4 >> 8) & mask; + out[7] = ((w4 >> 52) | (w5 << 12)) & mask; + out[8] = ((w5 >> 32) | (w6 << 32)) & mask; + out[9] = (w6 >> 12) & mask; + out[10] = ((w6 >> 56) | (w7 << 8)) & mask; + out[11] = ((w7 >> 36) | (w8 << 28)) & mask; + out[12] = (w8 >> 16) & mask; + out[13] = ((w8 >> 60) | (w9 << 4)) & mask; + out[14] = ((w9 >> 40) | (w10 << 24)) & mask; + out[15] = w10 >> 20; + out[16] = (w11)&mask; + out[17] = ((w11 >> 44) | (w12 << 20)) & mask; + out[18] = ((w12 >> 24) | (w13 << 40)) & mask; + out[19] = (w13 >> 4) & mask; + out[20] = ((w13 >> 48) | (w14 << 16)) & mask; + out[21] = ((w14 >> 28) | (w15 << 36)) & mask; + out[22] = (w15 >> 8) & mask; + out[23] = ((w15 >> 52) | (w16 << 12)) & mask; + out[24] = ((w16 >> 32) | (w17 << 32)) & mask; + out[25] = (w17 >> 12) & mask; + out[26] = ((w17 >> 56) | (w18 << 8)) & mask; + out[27] = ((w18 >> 36) | (w19 << 28)) & mask; + out[28] = (w19 >> 16) & mask; + out[29] = ((w19 >> 60) | (w20 << 4)) & mask; + out[30] = ((w20 >> 40) | (w21 << 24)) & mask; + out[31] = w21 >> 20; + + return in; +} + +inline const uint8_t* unpack45_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 35184372088831ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 45) | (w1 << 19)) & mask; + out[2] = ((w1 >> 26) | (w2 << 38)) & mask; + out[3] = (w2 >> 7) & mask; + out[4] = ((w2 >> 52) | (w3 << 12)) & mask; + out[5] = ((w3 >> 33) | (w4 << 31)) & mask; + out[6] = (w4 >> 14) & mask; + out[7] = ((w4 >> 59) | (w5 << 5)) & mask; + out[8] = ((w5 >> 40) | (w6 << 24)) & mask; + out[9] = ((w6 >> 21) | (w7 << 43)) & mask; + out[10] = (w7 >> 2) & mask; + out[11] = ((w7 >> 47) | (w8 << 17)) & mask; + out[12] = ((w8 >> 28) | (w9 << 36)) & mask; + out[13] = (w9 >> 9) & mask; + out[14] = ((w9 >> 54) | (w10 << 10)) & mask; + out[15] = ((w10 >> 35) | (w11 << 29)) & mask; + out[16] = (w11 >> 16) & mask; + out[17] = ((w11 >> 61) | (w12 << 3)) & mask; + out[18] = ((w12 >> 42) | (w13 << 22)) & mask; + out[19] = ((w13 >> 23) | (w14 << 41)) & mask; + out[20] = (w14 >> 4) & mask; + out[21] = ((w14 >> 49) | (w15 << 15)) & mask; + out[22] = ((w15 >> 30) | (w16 << 34)) & mask; + out[23] = (w16 >> 11) & mask; + out[24] = ((w16 >> 56) | (w17 << 8)) & mask; + out[25] = ((w17 >> 37) | (w18 << 27)) & mask; + out[26] = (w18 >> 18) & mask; + out[27] = ((w18 >> 63) | (w19 << 1)) & mask; + out[28] = ((w19 >> 44) | (w20 << 20)) & mask; + out[29] = ((w20 >> 25) | (w21 << 39)) & mask; + out[30] = (w21 >> 6) & mask; + out[31] = ((w21 >> 51) | (w22 << 13)) & mask; + + return in; +} + +inline const uint8_t* unpack46_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 70368744177663ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 46) | (w1 << 18)) & mask; + out[2] = ((w1 >> 28) | (w2 << 36)) & mask; + out[3] = (w2 >> 10) & mask; + out[4] = ((w2 >> 56) | (w3 << 8)) & mask; + out[5] = ((w3 >> 38) | (w4 << 26)) & mask; + out[6] = ((w4 >> 20) | (w5 << 44)) & mask; + out[7] = (w5 >> 2) & mask; + out[8] = ((w5 >> 48) | (w6 << 16)) & mask; + out[9] = ((w6 >> 30) | (w7 << 34)) & mask; + out[10] = (w7 >> 12) & mask; + out[11] = ((w7 >> 58) | (w8 << 6)) & mask; + out[12] = ((w8 >> 40) | (w9 << 24)) & mask; + out[13] = ((w9 >> 22) | (w10 << 42)) & mask; + out[14] = (w10 >> 4) & mask; + out[15] = ((w10 >> 50) | (w11 << 14)) & mask; + out[16] = ((w11 >> 32) | (w12 << 32)) & mask; + out[17] = (w12 >> 14) & mask; + out[18] = ((w12 >> 60) | (w13 << 4)) & mask; + out[19] = ((w13 >> 42) | (w14 << 22)) & mask; + out[20] = ((w14 >> 24) | (w15 << 40)) & mask; + out[21] = (w15 >> 6) & mask; + out[22] = ((w15 >> 52) | (w16 << 12)) & mask; + out[23] = ((w16 >> 34) | (w17 << 30)) & mask; + out[24] = (w17 >> 16) & mask; + out[25] = ((w17 >> 62) | (w18 << 2)) & mask; + out[26] = ((w18 >> 44) | (w19 << 20)) & mask; + out[27] = ((w19 >> 26) | (w20 << 38)) & mask; + out[28] = (w20 >> 8) & mask; + out[29] = ((w20 >> 54) | (w21 << 10)) & mask; + out[30] = ((w21 >> 36) | (w22 << 28)) & mask; + out[31] = w22 >> 18; + + return in; +} + +inline const uint8_t* unpack47_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 140737488355327ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 47) | (w1 << 17)) & mask; + out[2] = ((w1 >> 30) | (w2 << 34)) & mask; + out[3] = (w2 >> 13) & mask; + out[4] = ((w2 >> 60) | (w3 << 4)) & mask; + out[5] = ((w3 >> 43) | (w4 << 21)) & mask; + out[6] = ((w4 >> 26) | (w5 << 38)) & mask; + out[7] = (w5 >> 9) & mask; + out[8] = ((w5 >> 56) | (w6 << 8)) & mask; + out[9] = ((w6 >> 39) | (w7 << 25)) & mask; + out[10] = ((w7 >> 22) | (w8 << 42)) & mask; + out[11] = (w8 >> 5) & mask; + out[12] = ((w8 >> 52) | (w9 << 12)) & mask; + out[13] = ((w9 >> 35) | (w10 << 29)) & mask; + out[14] = ((w10 >> 18) | (w11 << 46)) & mask; + out[15] = (w11 >> 1) & mask; + out[16] = ((w11 >> 48) | (w12 << 16)) & mask; + out[17] = ((w12 >> 31) | (w13 << 33)) & mask; + out[18] = (w13 >> 14) & mask; + out[19] = ((w13 >> 61) | (w14 << 3)) & mask; + out[20] = ((w14 >> 44) | (w15 << 20)) & mask; + out[21] = ((w15 >> 27) | (w16 << 37)) & mask; + out[22] = (w16 >> 10) & mask; + out[23] = ((w16 >> 57) | (w17 << 7)) & mask; + out[24] = ((w17 >> 40) | (w18 << 24)) & mask; + out[25] = ((w18 >> 23) | (w19 << 41)) & mask; + out[26] = (w19 >> 6) & mask; + out[27] = ((w19 >> 53) | (w20 << 11)) & mask; + out[28] = ((w20 >> 36) | (w21 << 28)) & mask; + out[29] = ((w21 >> 19) | (w22 << 45)) & mask; + out[30] = (w22 >> 2) & mask; + out[31] = ((w22 >> 49) | (w23 << 15)) & mask; + + return in; +} + +inline const uint8_t* unpack48_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 281474976710655ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 48) | (w1 << 16)) & mask; + out[2] = ((w1 >> 32) | (w2 << 32)) & mask; + out[3] = w2 >> 16; + out[4] = (w3)&mask; + out[5] = ((w3 >> 48) | (w4 << 16)) & mask; + out[6] = ((w4 >> 32) | (w5 << 32)) & mask; + out[7] = w5 >> 16; + out[8] = (w6)&mask; + out[9] = ((w6 >> 48) | (w7 << 16)) & mask; + out[10] = ((w7 >> 32) | (w8 << 32)) & mask; + out[11] = w8 >> 16; + out[12] = (w9)&mask; + out[13] = ((w9 >> 48) | (w10 << 16)) & mask; + out[14] = ((w10 >> 32) | (w11 << 32)) & mask; + out[15] = w11 >> 16; + out[16] = (w12)&mask; + out[17] = ((w12 >> 48) | (w13 << 16)) & mask; + out[18] = ((w13 >> 32) | (w14 << 32)) & mask; + out[19] = w14 >> 16; + out[20] = (w15)&mask; + out[21] = ((w15 >> 48) | (w16 << 16)) & mask; + out[22] = ((w16 >> 32) | (w17 << 32)) & mask; + out[23] = w17 >> 16; + out[24] = (w18)&mask; + out[25] = ((w18 >> 48) | (w19 << 16)) & mask; + out[26] = ((w19 >> 32) | (w20 << 32)) & mask; + out[27] = w20 >> 16; + out[28] = (w21)&mask; + out[29] = ((w21 >> 48) | (w22 << 16)) & mask; + out[30] = ((w22 >> 32) | (w23 << 32)) & mask; + out[31] = w23 >> 16; + + return in; +} + +inline const uint8_t* unpack49_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 562949953421311ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 49) | (w1 << 15)) & mask; + out[2] = ((w1 >> 34) | (w2 << 30)) & mask; + out[3] = ((w2 >> 19) | (w3 << 45)) & mask; + out[4] = (w3 >> 4) & mask; + out[5] = ((w3 >> 53) | (w4 << 11)) & mask; + out[6] = ((w4 >> 38) | (w5 << 26)) & mask; + out[7] = ((w5 >> 23) | (w6 << 41)) & mask; + out[8] = (w6 >> 8) & mask; + out[9] = ((w6 >> 57) | (w7 << 7)) & mask; + out[10] = ((w7 >> 42) | (w8 << 22)) & mask; + out[11] = ((w8 >> 27) | (w9 << 37)) & mask; + out[12] = (w9 >> 12) & mask; + out[13] = ((w9 >> 61) | (w10 << 3)) & mask; + out[14] = ((w10 >> 46) | (w11 << 18)) & mask; + out[15] = ((w11 >> 31) | (w12 << 33)) & mask; + out[16] = ((w12 >> 16) | (w13 << 48)) & mask; + out[17] = (w13 >> 1) & mask; + out[18] = ((w13 >> 50) | (w14 << 14)) & mask; + out[19] = ((w14 >> 35) | (w15 << 29)) & mask; + out[20] = ((w15 >> 20) | (w16 << 44)) & mask; + out[21] = (w16 >> 5) & mask; + out[22] = ((w16 >> 54) | (w17 << 10)) & mask; + out[23] = ((w17 >> 39) | (w18 << 25)) & mask; + out[24] = ((w18 >> 24) | (w19 << 40)) & mask; + out[25] = (w19 >> 9) & mask; + out[26] = ((w19 >> 58) | (w20 << 6)) & mask; + out[27] = ((w20 >> 43) | (w21 << 21)) & mask; + out[28] = ((w21 >> 28) | (w22 << 36)) & mask; + out[29] = (w22 >> 13) & mask; + out[30] = ((w22 >> 62) | (w23 << 2)) & mask; + out[31] = ((w23 >> 47) | (w24 << 17)) & mask; + + return in; +} + +inline const uint8_t* unpack50_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1125899906842623ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 50) | (w1 << 14)) & mask; + out[2] = ((w1 >> 36) | (w2 << 28)) & mask; + out[3] = ((w2 >> 22) | (w3 << 42)) & mask; + out[4] = (w3 >> 8) & mask; + out[5] = ((w3 >> 58) | (w4 << 6)) & mask; + out[6] = ((w4 >> 44) | (w5 << 20)) & mask; + out[7] = ((w5 >> 30) | (w6 << 34)) & mask; + out[8] = ((w6 >> 16) | (w7 << 48)) & mask; + out[9] = (w7 >> 2) & mask; + out[10] = ((w7 >> 52) | (w8 << 12)) & mask; + out[11] = ((w8 >> 38) | (w9 << 26)) & mask; + out[12] = ((w9 >> 24) | (w10 << 40)) & mask; + out[13] = (w10 >> 10) & mask; + out[14] = ((w10 >> 60) | (w11 << 4)) & mask; + out[15] = ((w11 >> 46) | (w12 << 18)) & mask; + out[16] = ((w12 >> 32) | (w13 << 32)) & mask; + out[17] = ((w13 >> 18) | (w14 << 46)) & mask; + out[18] = (w14 >> 4) & mask; + out[19] = ((w14 >> 54) | (w15 << 10)) & mask; + out[20] = ((w15 >> 40) | (w16 << 24)) & mask; + out[21] = ((w16 >> 26) | (w17 << 38)) & mask; + out[22] = (w17 >> 12) & mask; + out[23] = ((w17 >> 62) | (w18 << 2)) & mask; + out[24] = ((w18 >> 48) | (w19 << 16)) & mask; + out[25] = ((w19 >> 34) | (w20 << 30)) & mask; + out[26] = ((w20 >> 20) | (w21 << 44)) & mask; + out[27] = (w21 >> 6) & mask; + out[28] = ((w21 >> 56) | (w22 << 8)) & mask; + out[29] = ((w22 >> 42) | (w23 << 22)) & mask; + out[30] = ((w23 >> 28) | (w24 << 36)) & mask; + out[31] = w24 >> 14; + + return in; +} + +inline const uint8_t* unpack51_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2251799813685247ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 51) | (w1 << 13)) & mask; + out[2] = ((w1 >> 38) | (w2 << 26)) & mask; + out[3] = ((w2 >> 25) | (w3 << 39)) & mask; + out[4] = (w3 >> 12) & mask; + out[5] = ((w3 >> 63) | (w4 << 1)) & mask; + out[6] = ((w4 >> 50) | (w5 << 14)) & mask; + out[7] = ((w5 >> 37) | (w6 << 27)) & mask; + out[8] = ((w6 >> 24) | (w7 << 40)) & mask; + out[9] = (w7 >> 11) & mask; + out[10] = ((w7 >> 62) | (w8 << 2)) & mask; + out[11] = ((w8 >> 49) | (w9 << 15)) & mask; + out[12] = ((w9 >> 36) | (w10 << 28)) & mask; + out[13] = ((w10 >> 23) | (w11 << 41)) & mask; + out[14] = (w11 >> 10) & mask; + out[15] = ((w11 >> 61) | (w12 << 3)) & mask; + out[16] = ((w12 >> 48) | (w13 << 16)) & mask; + out[17] = ((w13 >> 35) | (w14 << 29)) & mask; + out[18] = ((w14 >> 22) | (w15 << 42)) & mask; + out[19] = (w15 >> 9) & mask; + out[20] = ((w15 >> 60) | (w16 << 4)) & mask; + out[21] = ((w16 >> 47) | (w17 << 17)) & mask; + out[22] = ((w17 >> 34) | (w18 << 30)) & mask; + out[23] = ((w18 >> 21) | (w19 << 43)) & mask; + out[24] = (w19 >> 8) & mask; + out[25] = ((w19 >> 59) | (w20 << 5)) & mask; + out[26] = ((w20 >> 46) | (w21 << 18)) & mask; + out[27] = ((w21 >> 33) | (w22 << 31)) & mask; + out[28] = ((w22 >> 20) | (w23 << 44)) & mask; + out[29] = (w23 >> 7) & mask; + out[30] = ((w23 >> 58) | (w24 << 6)) & mask; + out[31] = ((w24 >> 45) | (w25 << 19)) & mask; + + return in; +} + +inline const uint8_t* unpack52_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4503599627370495ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 52) | (w1 << 12)) & mask; + out[2] = ((w1 >> 40) | (w2 << 24)) & mask; + out[3] = ((w2 >> 28) | (w3 << 36)) & mask; + out[4] = ((w3 >> 16) | (w4 << 48)) & mask; + out[5] = (w4 >> 4) & mask; + out[6] = ((w4 >> 56) | (w5 << 8)) & mask; + out[7] = ((w5 >> 44) | (w6 << 20)) & mask; + out[8] = ((w6 >> 32) | (w7 << 32)) & mask; + out[9] = ((w7 >> 20) | (w8 << 44)) & mask; + out[10] = (w8 >> 8) & mask; + out[11] = ((w8 >> 60) | (w9 << 4)) & mask; + out[12] = ((w9 >> 48) | (w10 << 16)) & mask; + out[13] = ((w10 >> 36) | (w11 << 28)) & mask; + out[14] = ((w11 >> 24) | (w12 << 40)) & mask; + out[15] = w12 >> 12; + out[16] = (w13)&mask; + out[17] = ((w13 >> 52) | (w14 << 12)) & mask; + out[18] = ((w14 >> 40) | (w15 << 24)) & mask; + out[19] = ((w15 >> 28) | (w16 << 36)) & mask; + out[20] = ((w16 >> 16) | (w17 << 48)) & mask; + out[21] = (w17 >> 4) & mask; + out[22] = ((w17 >> 56) | (w18 << 8)) & mask; + out[23] = ((w18 >> 44) | (w19 << 20)) & mask; + out[24] = ((w19 >> 32) | (w20 << 32)) & mask; + out[25] = ((w20 >> 20) | (w21 << 44)) & mask; + out[26] = (w21 >> 8) & mask; + out[27] = ((w21 >> 60) | (w22 << 4)) & mask; + out[28] = ((w22 >> 48) | (w23 << 16)) & mask; + out[29] = ((w23 >> 36) | (w24 << 28)) & mask; + out[30] = ((w24 >> 24) | (w25 << 40)) & mask; + out[31] = w25 >> 12; + + return in; +} + +inline const uint8_t* unpack53_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 9007199254740991ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 53) | (w1 << 11)) & mask; + out[2] = ((w1 >> 42) | (w2 << 22)) & mask; + out[3] = ((w2 >> 31) | (w3 << 33)) & mask; + out[4] = ((w3 >> 20) | (w4 << 44)) & mask; + out[5] = (w4 >> 9) & mask; + out[6] = ((w4 >> 62) | (w5 << 2)) & mask; + out[7] = ((w5 >> 51) | (w6 << 13)) & mask; + out[8] = ((w6 >> 40) | (w7 << 24)) & mask; + out[9] = ((w7 >> 29) | (w8 << 35)) & mask; + out[10] = ((w8 >> 18) | (w9 << 46)) & mask; + out[11] = (w9 >> 7) & mask; + out[12] = ((w9 >> 60) | (w10 << 4)) & mask; + out[13] = ((w10 >> 49) | (w11 << 15)) & mask; + out[14] = ((w11 >> 38) | (w12 << 26)) & mask; + out[15] = ((w12 >> 27) | (w13 << 37)) & mask; + out[16] = ((w13 >> 16) | (w14 << 48)) & mask; + out[17] = (w14 >> 5) & mask; + out[18] = ((w14 >> 58) | (w15 << 6)) & mask; + out[19] = ((w15 >> 47) | (w16 << 17)) & mask; + out[20] = ((w16 >> 36) | (w17 << 28)) & mask; + out[21] = ((w17 >> 25) | (w18 << 39)) & mask; + out[22] = ((w18 >> 14) | (w19 << 50)) & mask; + out[23] = (w19 >> 3) & mask; + out[24] = ((w19 >> 56) | (w20 << 8)) & mask; + out[25] = ((w20 >> 45) | (w21 << 19)) & mask; + out[26] = ((w21 >> 34) | (w22 << 30)) & mask; + out[27] = ((w22 >> 23) | (w23 << 41)) & mask; + out[28] = ((w23 >> 12) | (w24 << 52)) & mask; + out[29] = (w24 >> 1) & mask; + out[30] = ((w24 >> 54) | (w25 << 10)) & mask; + out[31] = ((w25 >> 43) | (w26 << 21)) & mask; + + return in; +} + +inline const uint8_t* unpack54_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 18014398509481983ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 54) | (w1 << 10)) & mask; + out[2] = ((w1 >> 44) | (w2 << 20)) & mask; + out[3] = ((w2 >> 34) | (w3 << 30)) & mask; + out[4] = ((w3 >> 24) | (w4 << 40)) & mask; + out[5] = ((w4 >> 14) | (w5 << 50)) & mask; + out[6] = (w5 >> 4) & mask; + out[7] = ((w5 >> 58) | (w6 << 6)) & mask; + out[8] = ((w6 >> 48) | (w7 << 16)) & mask; + out[9] = ((w7 >> 38) | (w8 << 26)) & mask; + out[10] = ((w8 >> 28) | (w9 << 36)) & mask; + out[11] = ((w9 >> 18) | (w10 << 46)) & mask; + out[12] = (w10 >> 8) & mask; + out[13] = ((w10 >> 62) | (w11 << 2)) & mask; + out[14] = ((w11 >> 52) | (w12 << 12)) & mask; + out[15] = ((w12 >> 42) | (w13 << 22)) & mask; + out[16] = ((w13 >> 32) | (w14 << 32)) & mask; + out[17] = ((w14 >> 22) | (w15 << 42)) & mask; + out[18] = ((w15 >> 12) | (w16 << 52)) & mask; + out[19] = (w16 >> 2) & mask; + out[20] = ((w16 >> 56) | (w17 << 8)) & mask; + out[21] = ((w17 >> 46) | (w18 << 18)) & mask; + out[22] = ((w18 >> 36) | (w19 << 28)) & mask; + out[23] = ((w19 >> 26) | (w20 << 38)) & mask; + out[24] = ((w20 >> 16) | (w21 << 48)) & mask; + out[25] = (w21 >> 6) & mask; + out[26] = ((w21 >> 60) | (w22 << 4)) & mask; + out[27] = ((w22 >> 50) | (w23 << 14)) & mask; + out[28] = ((w23 >> 40) | (w24 << 24)) & mask; + out[29] = ((w24 >> 30) | (w25 << 34)) & mask; + out[30] = ((w25 >> 20) | (w26 << 44)) & mask; + out[31] = w26 >> 10; + + return in; +} + +inline const uint8_t* unpack55_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 36028797018963967ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 55) | (w1 << 9)) & mask; + out[2] = ((w1 >> 46) | (w2 << 18)) & mask; + out[3] = ((w2 >> 37) | (w3 << 27)) & mask; + out[4] = ((w3 >> 28) | (w4 << 36)) & mask; + out[5] = ((w4 >> 19) | (w5 << 45)) & mask; + out[6] = ((w5 >> 10) | (w6 << 54)) & mask; + out[7] = (w6 >> 1) & mask; + out[8] = ((w6 >> 56) | (w7 << 8)) & mask; + out[9] = ((w7 >> 47) | (w8 << 17)) & mask; + out[10] = ((w8 >> 38) | (w9 << 26)) & mask; + out[11] = ((w9 >> 29) | (w10 << 35)) & mask; + out[12] = ((w10 >> 20) | (w11 << 44)) & mask; + out[13] = ((w11 >> 11) | (w12 << 53)) & mask; + out[14] = (w12 >> 2) & mask; + out[15] = ((w12 >> 57) | (w13 << 7)) & mask; + out[16] = ((w13 >> 48) | (w14 << 16)) & mask; + out[17] = ((w14 >> 39) | (w15 << 25)) & mask; + out[18] = ((w15 >> 30) | (w16 << 34)) & mask; + out[19] = ((w16 >> 21) | (w17 << 43)) & mask; + out[20] = ((w17 >> 12) | (w18 << 52)) & mask; + out[21] = (w18 >> 3) & mask; + out[22] = ((w18 >> 58) | (w19 << 6)) & mask; + out[23] = ((w19 >> 49) | (w20 << 15)) & mask; + out[24] = ((w20 >> 40) | (w21 << 24)) & mask; + out[25] = ((w21 >> 31) | (w22 << 33)) & mask; + out[26] = ((w22 >> 22) | (w23 << 42)) & mask; + out[27] = ((w23 >> 13) | (w24 << 51)) & mask; + out[28] = (w24 >> 4) & mask; + out[29] = ((w24 >> 59) | (w25 << 5)) & mask; + out[30] = ((w25 >> 50) | (w26 << 14)) & mask; + out[31] = ((w26 >> 41) | (w27 << 23)) & mask; + + return in; +} + +inline const uint8_t* unpack56_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 72057594037927935ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 56) | (w1 << 8)) & mask; + out[2] = ((w1 >> 48) | (w2 << 16)) & mask; + out[3] = ((w2 >> 40) | (w3 << 24)) & mask; + out[4] = ((w3 >> 32) | (w4 << 32)) & mask; + out[5] = ((w4 >> 24) | (w5 << 40)) & mask; + out[6] = ((w5 >> 16) | (w6 << 48)) & mask; + out[7] = w6 >> 8; + out[8] = (w7)&mask; + out[9] = ((w7 >> 56) | (w8 << 8)) & mask; + out[10] = ((w8 >> 48) | (w9 << 16)) & mask; + out[11] = ((w9 >> 40) | (w10 << 24)) & mask; + out[12] = ((w10 >> 32) | (w11 << 32)) & mask; + out[13] = ((w11 >> 24) | (w12 << 40)) & mask; + out[14] = ((w12 >> 16) | (w13 << 48)) & mask; + out[15] = w13 >> 8; + out[16] = (w14)&mask; + out[17] = ((w14 >> 56) | (w15 << 8)) & mask; + out[18] = ((w15 >> 48) | (w16 << 16)) & mask; + out[19] = ((w16 >> 40) | (w17 << 24)) & mask; + out[20] = ((w17 >> 32) | (w18 << 32)) & mask; + out[21] = ((w18 >> 24) | (w19 << 40)) & mask; + out[22] = ((w19 >> 16) | (w20 << 48)) & mask; + out[23] = w20 >> 8; + out[24] = (w21)&mask; + out[25] = ((w21 >> 56) | (w22 << 8)) & mask; + out[26] = ((w22 >> 48) | (w23 << 16)) & mask; + out[27] = ((w23 >> 40) | (w24 << 24)) & mask; + out[28] = ((w24 >> 32) | (w25 << 32)) & mask; + out[29] = ((w25 >> 24) | (w26 << 40)) & mask; + out[30] = ((w26 >> 16) | (w27 << 48)) & mask; + out[31] = w27 >> 8; + + return in; +} + +inline const uint8_t* unpack57_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 144115188075855871ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 57) | (w1 << 7)) & mask; + out[2] = ((w1 >> 50) | (w2 << 14)) & mask; + out[3] = ((w2 >> 43) | (w3 << 21)) & mask; + out[4] = ((w3 >> 36) | (w4 << 28)) & mask; + out[5] = ((w4 >> 29) | (w5 << 35)) & mask; + out[6] = ((w5 >> 22) | (w6 << 42)) & mask; + out[7] = ((w6 >> 15) | (w7 << 49)) & mask; + out[8] = ((w7 >> 8) | (w8 << 56)) & mask; + out[9] = (w8 >> 1) & mask; + out[10] = ((w8 >> 58) | (w9 << 6)) & mask; + out[11] = ((w9 >> 51) | (w10 << 13)) & mask; + out[12] = ((w10 >> 44) | (w11 << 20)) & mask; + out[13] = ((w11 >> 37) | (w12 << 27)) & mask; + out[14] = ((w12 >> 30) | (w13 << 34)) & mask; + out[15] = ((w13 >> 23) | (w14 << 41)) & mask; + out[16] = ((w14 >> 16) | (w15 << 48)) & mask; + out[17] = ((w15 >> 9) | (w16 << 55)) & mask; + out[18] = (w16 >> 2) & mask; + out[19] = ((w16 >> 59) | (w17 << 5)) & mask; + out[20] = ((w17 >> 52) | (w18 << 12)) & mask; + out[21] = ((w18 >> 45) | (w19 << 19)) & mask; + out[22] = ((w19 >> 38) | (w20 << 26)) & mask; + out[23] = ((w20 >> 31) | (w21 << 33)) & mask; + out[24] = ((w21 >> 24) | (w22 << 40)) & mask; + out[25] = ((w22 >> 17) | (w23 << 47)) & mask; + out[26] = ((w23 >> 10) | (w24 << 54)) & mask; + out[27] = (w24 >> 3) & mask; + out[28] = ((w24 >> 60) | (w25 << 4)) & mask; + out[29] = ((w25 >> 53) | (w26 << 11)) & mask; + out[30] = ((w26 >> 46) | (w27 << 18)) & mask; + out[31] = ((w27 >> 39) | (w28 << 25)) & mask; + + return in; +} + +inline const uint8_t* unpack58_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 288230376151711743ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 58) | (w1 << 6)) & mask; + out[2] = ((w1 >> 52) | (w2 << 12)) & mask; + out[3] = ((w2 >> 46) | (w3 << 18)) & mask; + out[4] = ((w3 >> 40) | (w4 << 24)) & mask; + out[5] = ((w4 >> 34) | (w5 << 30)) & mask; + out[6] = ((w5 >> 28) | (w6 << 36)) & mask; + out[7] = ((w6 >> 22) | (w7 << 42)) & mask; + out[8] = ((w7 >> 16) | (w8 << 48)) & mask; + out[9] = ((w8 >> 10) | (w9 << 54)) & mask; + out[10] = (w9 >> 4) & mask; + out[11] = ((w9 >> 62) | (w10 << 2)) & mask; + out[12] = ((w10 >> 56) | (w11 << 8)) & mask; + out[13] = ((w11 >> 50) | (w12 << 14)) & mask; + out[14] = ((w12 >> 44) | (w13 << 20)) & mask; + out[15] = ((w13 >> 38) | (w14 << 26)) & mask; + out[16] = ((w14 >> 32) | (w15 << 32)) & mask; + out[17] = ((w15 >> 26) | (w16 << 38)) & mask; + out[18] = ((w16 >> 20) | (w17 << 44)) & mask; + out[19] = ((w17 >> 14) | (w18 << 50)) & mask; + out[20] = ((w18 >> 8) | (w19 << 56)) & mask; + out[21] = (w19 >> 2) & mask; + out[22] = ((w19 >> 60) | (w20 << 4)) & mask; + out[23] = ((w20 >> 54) | (w21 << 10)) & mask; + out[24] = ((w21 >> 48) | (w22 << 16)) & mask; + out[25] = ((w22 >> 42) | (w23 << 22)) & mask; + out[26] = ((w23 >> 36) | (w24 << 28)) & mask; + out[27] = ((w24 >> 30) | (w25 << 34)) & mask; + out[28] = ((w25 >> 24) | (w26 << 40)) & mask; + out[29] = ((w26 >> 18) | (w27 << 46)) & mask; + out[30] = ((w27 >> 12) | (w28 << 52)) & mask; + out[31] = w28 >> 6; + + return in; +} + +inline const uint8_t* unpack59_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 576460752303423487ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 59) | (w1 << 5)) & mask; + out[2] = ((w1 >> 54) | (w2 << 10)) & mask; + out[3] = ((w2 >> 49) | (w3 << 15)) & mask; + out[4] = ((w3 >> 44) | (w4 << 20)) & mask; + out[5] = ((w4 >> 39) | (w5 << 25)) & mask; + out[6] = ((w5 >> 34) | (w6 << 30)) & mask; + out[7] = ((w6 >> 29) | (w7 << 35)) & mask; + out[8] = ((w7 >> 24) | (w8 << 40)) & mask; + out[9] = ((w8 >> 19) | (w9 << 45)) & mask; + out[10] = ((w9 >> 14) | (w10 << 50)) & mask; + out[11] = ((w10 >> 9) | (w11 << 55)) & mask; + out[12] = (w11 >> 4) & mask; + out[13] = ((w11 >> 63) | (w12 << 1)) & mask; + out[14] = ((w12 >> 58) | (w13 << 6)) & mask; + out[15] = ((w13 >> 53) | (w14 << 11)) & mask; + out[16] = ((w14 >> 48) | (w15 << 16)) & mask; + out[17] = ((w15 >> 43) | (w16 << 21)) & mask; + out[18] = ((w16 >> 38) | (w17 << 26)) & mask; + out[19] = ((w17 >> 33) | (w18 << 31)) & mask; + out[20] = ((w18 >> 28) | (w19 << 36)) & mask; + out[21] = ((w19 >> 23) | (w20 << 41)) & mask; + out[22] = ((w20 >> 18) | (w21 << 46)) & mask; + out[23] = ((w21 >> 13) | (w22 << 51)) & mask; + out[24] = ((w22 >> 8) | (w23 << 56)) & mask; + out[25] = (w23 >> 3) & mask; + out[26] = ((w23 >> 62) | (w24 << 2)) & mask; + out[27] = ((w24 >> 57) | (w25 << 7)) & mask; + out[28] = ((w25 >> 52) | (w26 << 12)) & mask; + out[29] = ((w26 >> 47) | (w27 << 17)) & mask; + out[30] = ((w27 >> 42) | (w28 << 22)) & mask; + out[31] = ((w28 >> 37) | (w29 << 27)) & mask; + + return in; +} + +inline const uint8_t* unpack60_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 1152921504606846975ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 60) | (w1 << 4)) & mask; + out[2] = ((w1 >> 56) | (w2 << 8)) & mask; + out[3] = ((w2 >> 52) | (w3 << 12)) & mask; + out[4] = ((w3 >> 48) | (w4 << 16)) & mask; + out[5] = ((w4 >> 44) | (w5 << 20)) & mask; + out[6] = ((w5 >> 40) | (w6 << 24)) & mask; + out[7] = ((w6 >> 36) | (w7 << 28)) & mask; + out[8] = ((w7 >> 32) | (w8 << 32)) & mask; + out[9] = ((w8 >> 28) | (w9 << 36)) & mask; + out[10] = ((w9 >> 24) | (w10 << 40)) & mask; + out[11] = ((w10 >> 20) | (w11 << 44)) & mask; + out[12] = ((w11 >> 16) | (w12 << 48)) & mask; + out[13] = ((w12 >> 12) | (w13 << 52)) & mask; + out[14] = ((w13 >> 8) | (w14 << 56)) & mask; + out[15] = w14 >> 4; + out[16] = (w15)&mask; + out[17] = ((w15 >> 60) | (w16 << 4)) & mask; + out[18] = ((w16 >> 56) | (w17 << 8)) & mask; + out[19] = ((w17 >> 52) | (w18 << 12)) & mask; + out[20] = ((w18 >> 48) | (w19 << 16)) & mask; + out[21] = ((w19 >> 44) | (w20 << 20)) & mask; + out[22] = ((w20 >> 40) | (w21 << 24)) & mask; + out[23] = ((w21 >> 36) | (w22 << 28)) & mask; + out[24] = ((w22 >> 32) | (w23 << 32)) & mask; + out[25] = ((w23 >> 28) | (w24 << 36)) & mask; + out[26] = ((w24 >> 24) | (w25 << 40)) & mask; + out[27] = ((w25 >> 20) | (w26 << 44)) & mask; + out[28] = ((w26 >> 16) | (w27 << 48)) & mask; + out[29] = ((w27 >> 12) | (w28 << 52)) & mask; + out[30] = ((w28 >> 8) | (w29 << 56)) & mask; + out[31] = w29 >> 4; + + return in; +} + +inline const uint8_t* unpack61_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 2305843009213693951ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 61) | (w1 << 3)) & mask; + out[2] = ((w1 >> 58) | (w2 << 6)) & mask; + out[3] = ((w2 >> 55) | (w3 << 9)) & mask; + out[4] = ((w3 >> 52) | (w4 << 12)) & mask; + out[5] = ((w4 >> 49) | (w5 << 15)) & mask; + out[6] = ((w5 >> 46) | (w6 << 18)) & mask; + out[7] = ((w6 >> 43) | (w7 << 21)) & mask; + out[8] = ((w7 >> 40) | (w8 << 24)) & mask; + out[9] = ((w8 >> 37) | (w9 << 27)) & mask; + out[10] = ((w9 >> 34) | (w10 << 30)) & mask; + out[11] = ((w10 >> 31) | (w11 << 33)) & mask; + out[12] = ((w11 >> 28) | (w12 << 36)) & mask; + out[13] = ((w12 >> 25) | (w13 << 39)) & mask; + out[14] = ((w13 >> 22) | (w14 << 42)) & mask; + out[15] = ((w14 >> 19) | (w15 << 45)) & mask; + out[16] = ((w15 >> 16) | (w16 << 48)) & mask; + out[17] = ((w16 >> 13) | (w17 << 51)) & mask; + out[18] = ((w17 >> 10) | (w18 << 54)) & mask; + out[19] = ((w18 >> 7) | (w19 << 57)) & mask; + out[20] = ((w19 >> 4) | (w20 << 60)) & mask; + out[21] = (w20 >> 1) & mask; + out[22] = ((w20 >> 62) | (w21 << 2)) & mask; + out[23] = ((w21 >> 59) | (w22 << 5)) & mask; + out[24] = ((w22 >> 56) | (w23 << 8)) & mask; + out[25] = ((w23 >> 53) | (w24 << 11)) & mask; + out[26] = ((w24 >> 50) | (w25 << 14)) & mask; + out[27] = ((w25 >> 47) | (w26 << 17)) & mask; + out[28] = ((w26 >> 44) | (w27 << 20)) & mask; + out[29] = ((w27 >> 41) | (w28 << 23)) & mask; + out[30] = ((w28 >> 38) | (w29 << 26)) & mask; + out[31] = ((w29 >> 35) | (w30 << 29)) & mask; + + return in; +} + +inline const uint8_t* unpack62_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 4611686018427387903ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + out[0] = (w0)&mask; + out[1] = ((w0 >> 62) | (w1 << 2)) & mask; + out[2] = ((w1 >> 60) | (w2 << 4)) & mask; + out[3] = ((w2 >> 58) | (w3 << 6)) & mask; + out[4] = ((w3 >> 56) | (w4 << 8)) & mask; + out[5] = ((w4 >> 54) | (w5 << 10)) & mask; + out[6] = ((w5 >> 52) | (w6 << 12)) & mask; + out[7] = ((w6 >> 50) | (w7 << 14)) & mask; + out[8] = ((w7 >> 48) | (w8 << 16)) & mask; + out[9] = ((w8 >> 46) | (w9 << 18)) & mask; + out[10] = ((w9 >> 44) | (w10 << 20)) & mask; + out[11] = ((w10 >> 42) | (w11 << 22)) & mask; + out[12] = ((w11 >> 40) | (w12 << 24)) & mask; + out[13] = ((w12 >> 38) | (w13 << 26)) & mask; + out[14] = ((w13 >> 36) | (w14 << 28)) & mask; + out[15] = ((w14 >> 34) | (w15 << 30)) & mask; + out[16] = ((w15 >> 32) | (w16 << 32)) & mask; + out[17] = ((w16 >> 30) | (w17 << 34)) & mask; + out[18] = ((w17 >> 28) | (w18 << 36)) & mask; + out[19] = ((w18 >> 26) | (w19 << 38)) & mask; + out[20] = ((w19 >> 24) | (w20 << 40)) & mask; + out[21] = ((w20 >> 22) | (w21 << 42)) & mask; + out[22] = ((w21 >> 20) | (w22 << 44)) & mask; + out[23] = ((w22 >> 18) | (w23 << 46)) & mask; + out[24] = ((w23 >> 16) | (w24 << 48)) & mask; + out[25] = ((w24 >> 14) | (w25 << 50)) & mask; + out[26] = ((w25 >> 12) | (w26 << 52)) & mask; + out[27] = ((w26 >> 10) | (w27 << 54)) & mask; + out[28] = ((w27 >> 8) | (w28 << 56)) & mask; + out[29] = ((w28 >> 6) | (w29 << 58)) & mask; + out[30] = ((w29 >> 4) | (w30 << 60)) & mask; + out[31] = w30 >> 2; + + return in; +} + +inline const uint8_t* unpack63_64(const uint8_t* in, uint64_t* out) { + const uint64_t mask = 9223372036854775807ULL; + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + uint64_t w31 = util::SafeLoadAs(in); + w31 = arrow::bit_util::FromLittleEndian(w31); + in += 4; + out[0] = (w0)&mask; + out[1] = ((w0 >> 63) | (w1 << 1)) & mask; + out[2] = ((w1 >> 62) | (w2 << 2)) & mask; + out[3] = ((w2 >> 61) | (w3 << 3)) & mask; + out[4] = ((w3 >> 60) | (w4 << 4)) & mask; + out[5] = ((w4 >> 59) | (w5 << 5)) & mask; + out[6] = ((w5 >> 58) | (w6 << 6)) & mask; + out[7] = ((w6 >> 57) | (w7 << 7)) & mask; + out[8] = ((w7 >> 56) | (w8 << 8)) & mask; + out[9] = ((w8 >> 55) | (w9 << 9)) & mask; + out[10] = ((w9 >> 54) | (w10 << 10)) & mask; + out[11] = ((w10 >> 53) | (w11 << 11)) & mask; + out[12] = ((w11 >> 52) | (w12 << 12)) & mask; + out[13] = ((w12 >> 51) | (w13 << 13)) & mask; + out[14] = ((w13 >> 50) | (w14 << 14)) & mask; + out[15] = ((w14 >> 49) | (w15 << 15)) & mask; + out[16] = ((w15 >> 48) | (w16 << 16)) & mask; + out[17] = ((w16 >> 47) | (w17 << 17)) & mask; + out[18] = ((w17 >> 46) | (w18 << 18)) & mask; + out[19] = ((w18 >> 45) | (w19 << 19)) & mask; + out[20] = ((w19 >> 44) | (w20 << 20)) & mask; + out[21] = ((w20 >> 43) | (w21 << 21)) & mask; + out[22] = ((w21 >> 42) | (w22 << 22)) & mask; + out[23] = ((w22 >> 41) | (w23 << 23)) & mask; + out[24] = ((w23 >> 40) | (w24 << 24)) & mask; + out[25] = ((w24 >> 39) | (w25 << 25)) & mask; + out[26] = ((w25 >> 38) | (w26 << 26)) & mask; + out[27] = ((w26 >> 37) | (w27 << 27)) & mask; + out[28] = ((w27 >> 36) | (w28 << 28)) & mask; + out[29] = ((w28 >> 35) | (w29 << 29)) & mask; + out[30] = ((w29 >> 34) | (w30 << 30)) & mask; + out[31] = ((w30 >> 33) | (w31 << 31)) & mask; + + return in; +} + +inline const uint8_t* unpack64_64(const uint8_t* in, uint64_t* out) { + uint64_t w0 = util::SafeLoadAs(in); + w0 = arrow::bit_util::FromLittleEndian(w0); + in += 8; + uint64_t w1 = util::SafeLoadAs(in); + w1 = arrow::bit_util::FromLittleEndian(w1); + in += 8; + uint64_t w2 = util::SafeLoadAs(in); + w2 = arrow::bit_util::FromLittleEndian(w2); + in += 8; + uint64_t w3 = util::SafeLoadAs(in); + w3 = arrow::bit_util::FromLittleEndian(w3); + in += 8; + uint64_t w4 = util::SafeLoadAs(in); + w4 = arrow::bit_util::FromLittleEndian(w4); + in += 8; + uint64_t w5 = util::SafeLoadAs(in); + w5 = arrow::bit_util::FromLittleEndian(w5); + in += 8; + uint64_t w6 = util::SafeLoadAs(in); + w6 = arrow::bit_util::FromLittleEndian(w6); + in += 8; + uint64_t w7 = util::SafeLoadAs(in); + w7 = arrow::bit_util::FromLittleEndian(w7); + in += 8; + uint64_t w8 = util::SafeLoadAs(in); + w8 = arrow::bit_util::FromLittleEndian(w8); + in += 8; + uint64_t w9 = util::SafeLoadAs(in); + w9 = arrow::bit_util::FromLittleEndian(w9); + in += 8; + uint64_t w10 = util::SafeLoadAs(in); + w10 = arrow::bit_util::FromLittleEndian(w10); + in += 8; + uint64_t w11 = util::SafeLoadAs(in); + w11 = arrow::bit_util::FromLittleEndian(w11); + in += 8; + uint64_t w12 = util::SafeLoadAs(in); + w12 = arrow::bit_util::FromLittleEndian(w12); + in += 8; + uint64_t w13 = util::SafeLoadAs(in); + w13 = arrow::bit_util::FromLittleEndian(w13); + in += 8; + uint64_t w14 = util::SafeLoadAs(in); + w14 = arrow::bit_util::FromLittleEndian(w14); + in += 8; + uint64_t w15 = util::SafeLoadAs(in); + w15 = arrow::bit_util::FromLittleEndian(w15); + in += 8; + uint64_t w16 = util::SafeLoadAs(in); + w16 = arrow::bit_util::FromLittleEndian(w16); + in += 8; + uint64_t w17 = util::SafeLoadAs(in); + w17 = arrow::bit_util::FromLittleEndian(w17); + in += 8; + uint64_t w18 = util::SafeLoadAs(in); + w18 = arrow::bit_util::FromLittleEndian(w18); + in += 8; + uint64_t w19 = util::SafeLoadAs(in); + w19 = arrow::bit_util::FromLittleEndian(w19); + in += 8; + uint64_t w20 = util::SafeLoadAs(in); + w20 = arrow::bit_util::FromLittleEndian(w20); + in += 8; + uint64_t w21 = util::SafeLoadAs(in); + w21 = arrow::bit_util::FromLittleEndian(w21); + in += 8; + uint64_t w22 = util::SafeLoadAs(in); + w22 = arrow::bit_util::FromLittleEndian(w22); + in += 8; + uint64_t w23 = util::SafeLoadAs(in); + w23 = arrow::bit_util::FromLittleEndian(w23); + in += 8; + uint64_t w24 = util::SafeLoadAs(in); + w24 = arrow::bit_util::FromLittleEndian(w24); + in += 8; + uint64_t w25 = util::SafeLoadAs(in); + w25 = arrow::bit_util::FromLittleEndian(w25); + in += 8; + uint64_t w26 = util::SafeLoadAs(in); + w26 = arrow::bit_util::FromLittleEndian(w26); + in += 8; + uint64_t w27 = util::SafeLoadAs(in); + w27 = arrow::bit_util::FromLittleEndian(w27); + in += 8; + uint64_t w28 = util::SafeLoadAs(in); + w28 = arrow::bit_util::FromLittleEndian(w28); + in += 8; + uint64_t w29 = util::SafeLoadAs(in); + w29 = arrow::bit_util::FromLittleEndian(w29); + in += 8; + uint64_t w30 = util::SafeLoadAs(in); + w30 = arrow::bit_util::FromLittleEndian(w30); + in += 8; + uint64_t w31 = util::SafeLoadAs(in); + w31 = arrow::bit_util::FromLittleEndian(w31); + in += 8; + out[0] = w0; + out[1] = w1; + out[2] = w2; + out[3] = w3; + out[4] = w4; + out[5] = w5; + out[6] = w6; + out[7] = w7; + out[8] = w8; + out[9] = w9; + out[10] = w10; + out[11] = w11; + out[12] = w12; + out[13] = w13; + out[14] = w14; + out[15] = w15; + out[16] = w16; + out[17] = w17; + out[18] = w18; + out[19] = w19; + out[20] = w20; + out[21] = w21; + out[22] = w22; + out[23] = w23; + out[24] = w24; + out[25] = w25; + out[26] = w26; + out[27] = w27; + out[28] = w28; + out[29] = w29; + out[30] = w30; + out[31] = w31; + + return in; +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h new file mode 100644 index 0000000000000000000000000000000000000000..7a7d8bf8c44777f4c9e053c6ee1b086d7d954bd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_avx2(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h new file mode 100644 index 0000000000000000000000000000000000000000..96723f803e0c1a64ef753ab6a51d8f2bd8c173d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h new file mode 100644 index 0000000000000000000000000000000000000000..4c661dcce3798c737c1d20bce525dcaa88c83078 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h @@ -0,0 +1,4251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This file was modified from its original version for inclusion in parquet-cpp. +// Original source: +// https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +// The original copyright notice follows. + +// This code is released under the +// Apache License Version 2.0 http://www.apache.org/licenses/. +// (c) Daniel Lemire 2013 + +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace internal { + +inline const uint32_t* unpack1_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) & 1; + out++; + *out = (inl >> 1) & 1; + out++; + *out = (inl >> 2) & 1; + out++; + *out = (inl >> 3) & 1; + out++; + *out = (inl >> 4) & 1; + out++; + *out = (inl >> 5) & 1; + out++; + *out = (inl >> 6) & 1; + out++; + *out = (inl >> 7) & 1; + out++; + *out = (inl >> 8) & 1; + out++; + *out = (inl >> 9) & 1; + out++; + *out = (inl >> 10) & 1; + out++; + *out = (inl >> 11) & 1; + out++; + *out = (inl >> 12) & 1; + out++; + *out = (inl >> 13) & 1; + out++; + *out = (inl >> 14) & 1; + out++; + *out = (inl >> 15) & 1; + out++; + *out = (inl >> 16) & 1; + out++; + *out = (inl >> 17) & 1; + out++; + *out = (inl >> 18) & 1; + out++; + *out = (inl >> 19) & 1; + out++; + *out = (inl >> 20) & 1; + out++; + *out = (inl >> 21) & 1; + out++; + *out = (inl >> 22) & 1; + out++; + *out = (inl >> 23) & 1; + out++; + *out = (inl >> 24) & 1; + out++; + *out = (inl >> 25) & 1; + out++; + *out = (inl >> 26) & 1; + out++; + *out = (inl >> 27) & 1; + out++; + *out = (inl >> 28) & 1; + out++; + *out = (inl >> 29) & 1; + out++; + *out = (inl >> 30) & 1; + out++; + *out = (inl >> 31); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack2_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 2); + out++; + *out = (inl >> 2) % (1U << 2); + out++; + *out = (inl >> 4) % (1U << 2); + out++; + *out = (inl >> 6) % (1U << 2); + out++; + *out = (inl >> 8) % (1U << 2); + out++; + *out = (inl >> 10) % (1U << 2); + out++; + *out = (inl >> 12) % (1U << 2); + out++; + *out = (inl >> 14) % (1U << 2); + out++; + *out = (inl >> 16) % (1U << 2); + out++; + *out = (inl >> 18) % (1U << 2); + out++; + *out = (inl >> 20) % (1U << 2); + out++; + *out = (inl >> 22) % (1U << 2); + out++; + *out = (inl >> 24) % (1U << 2); + out++; + *out = (inl >> 26) % (1U << 2); + out++; + *out = (inl >> 28) % (1U << 2); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 2); + out++; + *out = (inl >> 2) % (1U << 2); + out++; + *out = (inl >> 4) % (1U << 2); + out++; + *out = (inl >> 6) % (1U << 2); + out++; + *out = (inl >> 8) % (1U << 2); + out++; + *out = (inl >> 10) % (1U << 2); + out++; + *out = (inl >> 12) % (1U << 2); + out++; + *out = (inl >> 14) % (1U << 2); + out++; + *out = (inl >> 16) % (1U << 2); + out++; + *out = (inl >> 18) % (1U << 2); + out++; + *out = (inl >> 20) % (1U << 2); + out++; + *out = (inl >> 22) % (1U << 2); + out++; + *out = (inl >> 24) % (1U << 2); + out++; + *out = (inl >> 26) % (1U << 2); + out++; + *out = (inl >> 28) % (1U << 2); + out++; + *out = (inl >> 30); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack3_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 3); + out++; + *out = (inl >> 3) % (1U << 3); + out++; + *out = (inl >> 6) % (1U << 3); + out++; + *out = (inl >> 9) % (1U << 3); + out++; + *out = (inl >> 12) % (1U << 3); + out++; + *out = (inl >> 15) % (1U << 3); + out++; + *out = (inl >> 18) % (1U << 3); + out++; + *out = (inl >> 21) % (1U << 3); + out++; + *out = (inl >> 24) % (1U << 3); + out++; + *out = (inl >> 27) % (1U << 3); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (3 - 1); + out++; + *out = (inl >> 1) % (1U << 3); + out++; + *out = (inl >> 4) % (1U << 3); + out++; + *out = (inl >> 7) % (1U << 3); + out++; + *out = (inl >> 10) % (1U << 3); + out++; + *out = (inl >> 13) % (1U << 3); + out++; + *out = (inl >> 16) % (1U << 3); + out++; + *out = (inl >> 19) % (1U << 3); + out++; + *out = (inl >> 22) % (1U << 3); + out++; + *out = (inl >> 25) % (1U << 3); + out++; + *out = (inl >> 28) % (1U << 3); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (3 - 2); + out++; + *out = (inl >> 2) % (1U << 3); + out++; + *out = (inl >> 5) % (1U << 3); + out++; + *out = (inl >> 8) % (1U << 3); + out++; + *out = (inl >> 11) % (1U << 3); + out++; + *out = (inl >> 14) % (1U << 3); + out++; + *out = (inl >> 17) % (1U << 3); + out++; + *out = (inl >> 20) % (1U << 3); + out++; + *out = (inl >> 23) % (1U << 3); + out++; + *out = (inl >> 26) % (1U << 3); + out++; + *out = (inl >> 29); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack4_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 4); + out++; + *out = (inl >> 4) % (1U << 4); + out++; + *out = (inl >> 8) % (1U << 4); + out++; + *out = (inl >> 12) % (1U << 4); + out++; + *out = (inl >> 16) % (1U << 4); + out++; + *out = (inl >> 20) % (1U << 4); + out++; + *out = (inl >> 24) % (1U << 4); + out++; + *out = (inl >> 28); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack5_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 5); + out++; + *out = (inl >> 5) % (1U << 5); + out++; + *out = (inl >> 10) % (1U << 5); + out++; + *out = (inl >> 15) % (1U << 5); + out++; + *out = (inl >> 20) % (1U << 5); + out++; + *out = (inl >> 25) % (1U << 5); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (5 - 3); + out++; + *out = (inl >> 3) % (1U << 5); + out++; + *out = (inl >> 8) % (1U << 5); + out++; + *out = (inl >> 13) % (1U << 5); + out++; + *out = (inl >> 18) % (1U << 5); + out++; + *out = (inl >> 23) % (1U << 5); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (5 - 1); + out++; + *out = (inl >> 1) % (1U << 5); + out++; + *out = (inl >> 6) % (1U << 5); + out++; + *out = (inl >> 11) % (1U << 5); + out++; + *out = (inl >> 16) % (1U << 5); + out++; + *out = (inl >> 21) % (1U << 5); + out++; + *out = (inl >> 26) % (1U << 5); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (5 - 4); + out++; + *out = (inl >> 4) % (1U << 5); + out++; + *out = (inl >> 9) % (1U << 5); + out++; + *out = (inl >> 14) % (1U << 5); + out++; + *out = (inl >> 19) % (1U << 5); + out++; + *out = (inl >> 24) % (1U << 5); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (5 - 2); + out++; + *out = (inl >> 2) % (1U << 5); + out++; + *out = (inl >> 7) % (1U << 5); + out++; + *out = (inl >> 12) % (1U << 5); + out++; + *out = (inl >> 17) % (1U << 5); + out++; + *out = (inl >> 22) % (1U << 5); + out++; + *out = (inl >> 27); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack6_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 6); + out++; + *out = (inl >> 6) % (1U << 6); + out++; + *out = (inl >> 12) % (1U << 6); + out++; + *out = (inl >> 18) % (1U << 6); + out++; + *out = (inl >> 24) % (1U << 6); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (6 - 4); + out++; + *out = (inl >> 4) % (1U << 6); + out++; + *out = (inl >> 10) % (1U << 6); + out++; + *out = (inl >> 16) % (1U << 6); + out++; + *out = (inl >> 22) % (1U << 6); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (6 - 2); + out++; + *out = (inl >> 2) % (1U << 6); + out++; + *out = (inl >> 8) % (1U << 6); + out++; + *out = (inl >> 14) % (1U << 6); + out++; + *out = (inl >> 20) % (1U << 6); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 6); + out++; + *out = (inl >> 6) % (1U << 6); + out++; + *out = (inl >> 12) % (1U << 6); + out++; + *out = (inl >> 18) % (1U << 6); + out++; + *out = (inl >> 24) % (1U << 6); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (6 - 4); + out++; + *out = (inl >> 4) % (1U << 6); + out++; + *out = (inl >> 10) % (1U << 6); + out++; + *out = (inl >> 16) % (1U << 6); + out++; + *out = (inl >> 22) % (1U << 6); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (6 - 2); + out++; + *out = (inl >> 2) % (1U << 6); + out++; + *out = (inl >> 8) % (1U << 6); + out++; + *out = (inl >> 14) % (1U << 6); + out++; + *out = (inl >> 20) % (1U << 6); + out++; + *out = (inl >> 26); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack7_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 7); + out++; + *out = (inl >> 7) % (1U << 7); + out++; + *out = (inl >> 14) % (1U << 7); + out++; + *out = (inl >> 21) % (1U << 7); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (7 - 3); + out++; + *out = (inl >> 3) % (1U << 7); + out++; + *out = (inl >> 10) % (1U << 7); + out++; + *out = (inl >> 17) % (1U << 7); + out++; + *out = (inl >> 24) % (1U << 7); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (7 - 6); + out++; + *out = (inl >> 6) % (1U << 7); + out++; + *out = (inl >> 13) % (1U << 7); + out++; + *out = (inl >> 20) % (1U << 7); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (7 - 2); + out++; + *out = (inl >> 2) % (1U << 7); + out++; + *out = (inl >> 9) % (1U << 7); + out++; + *out = (inl >> 16) % (1U << 7); + out++; + *out = (inl >> 23) % (1U << 7); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (7 - 5); + out++; + *out = (inl >> 5) % (1U << 7); + out++; + *out = (inl >> 12) % (1U << 7); + out++; + *out = (inl >> 19) % (1U << 7); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (7 - 1); + out++; + *out = (inl >> 1) % (1U << 7); + out++; + *out = (inl >> 8) % (1U << 7); + out++; + *out = (inl >> 15) % (1U << 7); + out++; + *out = (inl >> 22) % (1U << 7); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (7 - 4); + out++; + *out = (inl >> 4) % (1U << 7); + out++; + *out = (inl >> 11) % (1U << 7); + out++; + *out = (inl >> 18) % (1U << 7); + out++; + *out = (inl >> 25); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack8_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 8); + out++; + *out = (inl >> 8) % (1U << 8); + out++; + *out = (inl >> 16) % (1U << 8); + out++; + *out = (inl >> 24); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack9_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 9); + out++; + *out = (inl >> 9) % (1U << 9); + out++; + *out = (inl >> 18) % (1U << 9); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (9 - 4); + out++; + *out = (inl >> 4) % (1U << 9); + out++; + *out = (inl >> 13) % (1U << 9); + out++; + *out = (inl >> 22) % (1U << 9); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (9 - 8); + out++; + *out = (inl >> 8) % (1U << 9); + out++; + *out = (inl >> 17) % (1U << 9); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (9 - 3); + out++; + *out = (inl >> 3) % (1U << 9); + out++; + *out = (inl >> 12) % (1U << 9); + out++; + *out = (inl >> 21) % (1U << 9); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (9 - 7); + out++; + *out = (inl >> 7) % (1U << 9); + out++; + *out = (inl >> 16) % (1U << 9); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (9 - 2); + out++; + *out = (inl >> 2) % (1U << 9); + out++; + *out = (inl >> 11) % (1U << 9); + out++; + *out = (inl >> 20) % (1U << 9); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (9 - 6); + out++; + *out = (inl >> 6) % (1U << 9); + out++; + *out = (inl >> 15) % (1U << 9); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (9 - 1); + out++; + *out = (inl >> 1) % (1U << 9); + out++; + *out = (inl >> 10) % (1U << 9); + out++; + *out = (inl >> 19) % (1U << 9); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (9 - 5); + out++; + *out = (inl >> 5) % (1U << 9); + out++; + *out = (inl >> 14) % (1U << 9); + out++; + *out = (inl >> 23); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack10_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 10); + out++; + *out = (inl >> 10) % (1U << 10); + out++; + *out = (inl >> 20) % (1U << 10); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (10 - 8); + out++; + *out = (inl >> 8) % (1U << 10); + out++; + *out = (inl >> 18) % (1U << 10); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (10 - 6); + out++; + *out = (inl >> 6) % (1U << 10); + out++; + *out = (inl >> 16) % (1U << 10); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (10 - 4); + out++; + *out = (inl >> 4) % (1U << 10); + out++; + *out = (inl >> 14) % (1U << 10); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (10 - 2); + out++; + *out = (inl >> 2) % (1U << 10); + out++; + *out = (inl >> 12) % (1U << 10); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 10); + out++; + *out = (inl >> 10) % (1U << 10); + out++; + *out = (inl >> 20) % (1U << 10); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (10 - 8); + out++; + *out = (inl >> 8) % (1U << 10); + out++; + *out = (inl >> 18) % (1U << 10); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (10 - 6); + out++; + *out = (inl >> 6) % (1U << 10); + out++; + *out = (inl >> 16) % (1U << 10); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (10 - 4); + out++; + *out = (inl >> 4) % (1U << 10); + out++; + *out = (inl >> 14) % (1U << 10); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (10 - 2); + out++; + *out = (inl >> 2) % (1U << 10); + out++; + *out = (inl >> 12) % (1U << 10); + out++; + *out = (inl >> 22); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack11_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 11); + out++; + *out = (inl >> 11) % (1U << 11); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (11 - 1); + out++; + *out = (inl >> 1) % (1U << 11); + out++; + *out = (inl >> 12) % (1U << 11); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (11 - 2); + out++; + *out = (inl >> 2) % (1U << 11); + out++; + *out = (inl >> 13) % (1U << 11); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (11 - 3); + out++; + *out = (inl >> 3) % (1U << 11); + out++; + *out = (inl >> 14) % (1U << 11); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (11 - 4); + out++; + *out = (inl >> 4) % (1U << 11); + out++; + *out = (inl >> 15) % (1U << 11); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (11 - 5); + out++; + *out = (inl >> 5) % (1U << 11); + out++; + *out = (inl >> 16) % (1U << 11); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (11 - 6); + out++; + *out = (inl >> 6) % (1U << 11); + out++; + *out = (inl >> 17) % (1U << 11); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (11 - 7); + out++; + *out = (inl >> 7) % (1U << 11); + out++; + *out = (inl >> 18) % (1U << 11); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (11 - 8); + out++; + *out = (inl >> 8) % (1U << 11); + out++; + *out = (inl >> 19) % (1U << 11); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (11 - 9); + out++; + *out = (inl >> 9) % (1U << 11); + out++; + *out = (inl >> 20) % (1U << 11); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (11 - 10); + out++; + *out = (inl >> 10) % (1U << 11); + out++; + *out = (inl >> 21); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack12_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 12); + out++; + *out = (inl >> 12) % (1U << 12); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (12 - 4); + out++; + *out = (inl >> 4) % (1U << 12); + out++; + *out = (inl >> 16) % (1U << 12); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (12 - 8); + out++; + *out = (inl >> 8) % (1U << 12); + out++; + *out = (inl >> 20); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack13_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 13); + out++; + *out = (inl >> 13) % (1U << 13); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (13 - 7); + out++; + *out = (inl >> 7) % (1U << 13); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (13 - 1); + out++; + *out = (inl >> 1) % (1U << 13); + out++; + *out = (inl >> 14) % (1U << 13); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (13 - 8); + out++; + *out = (inl >> 8) % (1U << 13); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (13 - 2); + out++; + *out = (inl >> 2) % (1U << 13); + out++; + *out = (inl >> 15) % (1U << 13); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (13 - 9); + out++; + *out = (inl >> 9) % (1U << 13); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (13 - 3); + out++; + *out = (inl >> 3) % (1U << 13); + out++; + *out = (inl >> 16) % (1U << 13); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (13 - 10); + out++; + *out = (inl >> 10) % (1U << 13); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (13 - 4); + out++; + *out = (inl >> 4) % (1U << 13); + out++; + *out = (inl >> 17) % (1U << 13); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (13 - 11); + out++; + *out = (inl >> 11) % (1U << 13); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (13 - 5); + out++; + *out = (inl >> 5) % (1U << 13); + out++; + *out = (inl >> 18) % (1U << 13); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (13 - 12); + out++; + *out = (inl >> 12) % (1U << 13); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (13 - 6); + out++; + *out = (inl >> 6) % (1U << 13); + out++; + *out = (inl >> 19); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack14_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 14); + out++; + *out = (inl >> 14) % (1U << 14); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (14 - 10); + out++; + *out = (inl >> 10) % (1U << 14); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (14 - 6); + out++; + *out = (inl >> 6) % (1U << 14); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (14 - 2); + out++; + *out = (inl >> 2) % (1U << 14); + out++; + *out = (inl >> 16) % (1U << 14); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (14 - 12); + out++; + *out = (inl >> 12) % (1U << 14); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (14 - 8); + out++; + *out = (inl >> 8) % (1U << 14); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (14 - 4); + out++; + *out = (inl >> 4) % (1U << 14); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 14); + out++; + *out = (inl >> 14) % (1U << 14); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (14 - 10); + out++; + *out = (inl >> 10) % (1U << 14); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (14 - 6); + out++; + *out = (inl >> 6) % (1U << 14); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (14 - 2); + out++; + *out = (inl >> 2) % (1U << 14); + out++; + *out = (inl >> 16) % (1U << 14); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (14 - 12); + out++; + *out = (inl >> 12) % (1U << 14); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (14 - 8); + out++; + *out = (inl >> 8) % (1U << 14); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (14 - 4); + out++; + *out = (inl >> 4) % (1U << 14); + out++; + *out = (inl >> 18); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack15_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 15); + out++; + *out = (inl >> 15) % (1U << 15); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (15 - 13); + out++; + *out = (inl >> 13) % (1U << 15); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (15 - 11); + out++; + *out = (inl >> 11) % (1U << 15); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (15 - 9); + out++; + *out = (inl >> 9) % (1U << 15); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (15 - 7); + out++; + *out = (inl >> 7) % (1U << 15); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (15 - 5); + out++; + *out = (inl >> 5) % (1U << 15); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (15 - 3); + out++; + *out = (inl >> 3) % (1U << 15); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (15 - 1); + out++; + *out = (inl >> 1) % (1U << 15); + out++; + *out = (inl >> 16) % (1U << 15); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (15 - 14); + out++; + *out = (inl >> 14) % (1U << 15); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (15 - 12); + out++; + *out = (inl >> 12) % (1U << 15); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (15 - 10); + out++; + *out = (inl >> 10) % (1U << 15); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (15 - 8); + out++; + *out = (inl >> 8) % (1U << 15); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (15 - 6); + out++; + *out = (inl >> 6) % (1U << 15); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (15 - 4); + out++; + *out = (inl >> 4) % (1U << 15); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (15 - 2); + out++; + *out = (inl >> 2) % (1U << 15); + out++; + *out = (inl >> 17); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack16_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 16); + out++; + *out = (inl >> 16); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack17_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (17 - 2); + out++; + *out = (inl >> 2) % (1U << 17); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (17 - 4); + out++; + *out = (inl >> 4) % (1U << 17); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (17 - 6); + out++; + *out = (inl >> 6) % (1U << 17); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (17 - 8); + out++; + *out = (inl >> 8) % (1U << 17); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (17 - 10); + out++; + *out = (inl >> 10) % (1U << 17); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (17 - 12); + out++; + *out = (inl >> 12) % (1U << 17); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (17 - 14); + out++; + *out = (inl >> 14) % (1U << 17); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (17 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (17 - 1); + out++; + *out = (inl >> 1) % (1U << 17); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (17 - 3); + out++; + *out = (inl >> 3) % (1U << 17); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (17 - 5); + out++; + *out = (inl >> 5) % (1U << 17); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (17 - 7); + out++; + *out = (inl >> 7) % (1U << 17); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (17 - 9); + out++; + *out = (inl >> 9) % (1U << 17); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (17 - 11); + out++; + *out = (inl >> 11) % (1U << 17); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (17 - 13); + out++; + *out = (inl >> 13) % (1U << 17); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (17 - 15); + out++; + *out = (inl >> 15); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack18_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (18 - 4); + out++; + *out = (inl >> 4) % (1U << 18); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (18 - 8); + out++; + *out = (inl >> 8) % (1U << 18); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (18 - 12); + out++; + *out = (inl >> 12) % (1U << 18); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (18 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (18 - 2); + out++; + *out = (inl >> 2) % (1U << 18); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (18 - 6); + out++; + *out = (inl >> 6) % (1U << 18); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (18 - 10); + out++; + *out = (inl >> 10) % (1U << 18); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (18 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (18 - 4); + out++; + *out = (inl >> 4) % (1U << 18); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (18 - 8); + out++; + *out = (inl >> 8) % (1U << 18); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (18 - 12); + out++; + *out = (inl >> 12) % (1U << 18); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (18 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (18 - 2); + out++; + *out = (inl >> 2) % (1U << 18); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (18 - 6); + out++; + *out = (inl >> 6) % (1U << 18); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (18 - 10); + out++; + *out = (inl >> 10) % (1U << 18); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (18 - 14); + out++; + *out = (inl >> 14); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack19_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (19 - 6); + out++; + *out = (inl >> 6) % (1U << 19); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (19 - 12); + out++; + *out = (inl >> 12) % (1U << 19); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (19 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (19 - 5); + out++; + *out = (inl >> 5) % (1U << 19); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (19 - 11); + out++; + *out = (inl >> 11) % (1U << 19); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (19 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (19 - 4); + out++; + *out = (inl >> 4) % (1U << 19); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (19 - 10); + out++; + *out = (inl >> 10) % (1U << 19); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (19 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (19 - 3); + out++; + *out = (inl >> 3) % (1U << 19); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (19 - 9); + out++; + *out = (inl >> 9) % (1U << 19); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (19 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (19 - 2); + out++; + *out = (inl >> 2) % (1U << 19); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (19 - 8); + out++; + *out = (inl >> 8) % (1U << 19); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (19 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (19 - 1); + out++; + *out = (inl >> 1) % (1U << 19); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (19 - 7); + out++; + *out = (inl >> 7) % (1U << 19); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (19 - 13); + out++; + *out = (inl >> 13); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack20_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (20 - 8); + out++; + *out = (inl >> 8) % (1U << 20); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (20 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (20 - 4); + out++; + *out = (inl >> 4) % (1U << 20); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (20 - 12); + out++; + *out = (inl >> 12); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack21_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (21 - 10); + out++; + *out = (inl >> 10) % (1U << 21); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (21 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (21 - 9); + out++; + *out = (inl >> 9) % (1U << 21); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (21 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (21 - 8); + out++; + *out = (inl >> 8) % (1U << 21); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (21 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (21 - 7); + out++; + *out = (inl >> 7) % (1U << 21); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (21 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (21 - 6); + out++; + *out = (inl >> 6) % (1U << 21); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (21 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (21 - 5); + out++; + *out = (inl >> 5) % (1U << 21); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (21 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (21 - 4); + out++; + *out = (inl >> 4) % (1U << 21); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (21 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (21 - 3); + out++; + *out = (inl >> 3) % (1U << 21); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (21 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (21 - 2); + out++; + *out = (inl >> 2) % (1U << 21); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (21 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (21 - 1); + out++; + *out = (inl >> 1) % (1U << 21); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (21 - 11); + out++; + *out = (inl >> 11); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack22_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (22 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (22 - 2); + out++; + *out = (inl >> 2) % (1U << 22); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (22 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (22 - 4); + out++; + *out = (inl >> 4) % (1U << 22); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (22 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (22 - 6); + out++; + *out = (inl >> 6) % (1U << 22); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (22 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (22 - 8); + out++; + *out = (inl >> 8) % (1U << 22); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (22 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (22 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (22 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (22 - 2); + out++; + *out = (inl >> 2) % (1U << 22); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (22 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (22 - 4); + out++; + *out = (inl >> 4) % (1U << 22); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (22 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (22 - 6); + out++; + *out = (inl >> 6) % (1U << 22); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (22 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (22 - 8); + out++; + *out = (inl >> 8) % (1U << 22); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (22 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (22 - 10); + out++; + *out = (inl >> 10); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack23_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (23 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (23 - 5); + out++; + *out = (inl >> 5) % (1U << 23); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (23 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (23 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (23 - 1); + out++; + *out = (inl >> 1) % (1U << 23); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (23 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (23 - 6); + out++; + *out = (inl >> 6) % (1U << 23); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (23 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (23 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (23 - 2); + out++; + *out = (inl >> 2) % (1U << 23); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (23 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (23 - 7); + out++; + *out = (inl >> 7) % (1U << 23); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (23 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (23 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (23 - 3); + out++; + *out = (inl >> 3) % (1U << 23); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (23 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (23 - 8); + out++; + *out = (inl >> 8) % (1U << 23); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (23 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (23 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (23 - 4); + out++; + *out = (inl >> 4) % (1U << 23); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (23 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (23 - 9); + out++; + *out = (inl >> 9); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack24_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (24 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (24 - 8); + out++; + *out = (inl >> 8); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack25_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (25 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (25 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (25 - 4); + out++; + *out = (inl >> 4) % (1U << 25); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (25 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (25 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (25 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (25 - 1); + out++; + *out = (inl >> 1) % (1U << 25); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (25 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (25 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (25 - 5); + out++; + *out = (inl >> 5) % (1U << 25); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (25 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (25 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (25 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (25 - 2); + out++; + *out = (inl >> 2) % (1U << 25); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (25 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (25 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (25 - 6); + out++; + *out = (inl >> 6) % (1U << 25); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (25 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (25 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (25 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (25 - 3); + out++; + *out = (inl >> 3) % (1U << 25); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (25 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (25 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (25 - 7); + out++; + *out = (inl >> 7); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack26_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (26 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (26 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (26 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (26 - 2); + out++; + *out = (inl >> 2) % (1U << 26); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (26 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (26 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (26 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (26 - 4); + out++; + *out = (inl >> 4) % (1U << 26); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (26 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (26 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (26 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (26 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (26 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (26 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (26 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (26 - 2); + out++; + *out = (inl >> 2) % (1U << 26); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (26 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (26 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (26 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (26 - 4); + out++; + *out = (inl >> 4) % (1U << 26); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (26 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (26 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (26 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (26 - 6); + out++; + *out = (inl >> 6); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack27_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (27 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (27 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (27 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (27 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (27 - 2); + out++; + *out = (inl >> 2) % (1U << 27); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (27 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (27 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (27 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (27 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (27 - 4); + out++; + *out = (inl >> 4) % (1U << 27); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (27 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (27 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (27 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (27 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (27 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (27 - 1); + out++; + *out = (inl >> 1) % (1U << 27); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (27 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (27 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (27 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (27 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (27 - 3); + out++; + *out = (inl >> 3) % (1U << 27); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (27 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (27 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (27 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (27 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (27 - 5); + out++; + *out = (inl >> 5); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack28_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (28 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (28 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (28 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (28 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (28 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (28 - 4); + out++; + *out = (inl >> 4); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack29_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 29); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (29 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (29 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (29 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (29 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (29 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (29 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (29 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (29 - 5); + out++; + *out = (inl >> 5); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (29 - 2); + out++; + *out = (inl >> 2) % (1U << 29); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (29 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (29 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (29 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (29 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (29 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (29 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (29 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (29 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (29 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (29 - 1); + out++; + *out = (inl >> 1) % (1U << 29); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 27)) << (29 - 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (29 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (29 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (29 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (29 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (29 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (29 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (29 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (29 - 3); + out++; + *out = (inl >> 3); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack30_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (30 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (30 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (30 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (30 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (30 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (30 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (30 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (30 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (30 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (30 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (30 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (30 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (30 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (30 - 2); + out++; + *out = (inl >> 2); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0) % (1U << 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (30 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (30 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (30 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (30 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (30 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (30 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (30 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (30 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (30 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (30 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (30 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (30 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (30 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (30 - 2); + out++; + *out = (inl >> 2); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack31_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0) % (1U << 31); + out++; + *out = (inl >> 31); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 30)) << (31 - 30); + out++; + *out = (inl >> 30); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 29)) << (31 - 29); + out++; + *out = (inl >> 29); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 28)) << (31 - 28); + out++; + *out = (inl >> 28); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 27)) << (31 - 27); + out++; + *out = (inl >> 27); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 26)) << (31 - 26); + out++; + *out = (inl >> 26); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 25)) << (31 - 25); + out++; + *out = (inl >> 25); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 24)) << (31 - 24); + out++; + *out = (inl >> 24); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 23)) << (31 - 23); + out++; + *out = (inl >> 23); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 22)) << (31 - 22); + out++; + *out = (inl >> 22); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 21)) << (31 - 21); + out++; + *out = (inl >> 21); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 20)) << (31 - 20); + out++; + *out = (inl >> 20); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 19)) << (31 - 19); + out++; + *out = (inl >> 19); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 18)) << (31 - 18); + out++; + *out = (inl >> 18); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 17)) << (31 - 17); + out++; + *out = (inl >> 17); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 16)) << (31 - 16); + out++; + *out = (inl >> 16); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 15)) << (31 - 15); + out++; + *out = (inl >> 15); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 14)) << (31 - 14); + out++; + *out = (inl >> 14); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 13)) << (31 - 13); + out++; + *out = (inl >> 13); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 12)) << (31 - 12); + out++; + *out = (inl >> 12); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 11)) << (31 - 11); + out++; + *out = (inl >> 11); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 10)) << (31 - 10); + out++; + *out = (inl >> 10); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 9)) << (31 - 9); + out++; + *out = (inl >> 9); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 8)) << (31 - 8); + out++; + *out = (inl >> 8); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 7)) << (31 - 7); + out++; + *out = (inl >> 7); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 6)) << (31 - 6); + out++; + *out = (inl >> 6); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 5)) << (31 - 5); + out++; + *out = (inl >> 5); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 4)) << (31 - 4); + out++; + *out = (inl >> 4); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 3)) << (31 - 3); + out++; + *out = (inl >> 3); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 2)) << (31 - 2); + out++; + *out = (inl >> 2); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out |= (inl % (1U << 1)) << (31 - 1); + out++; + *out = (inl >> 1); + ++in; + out++; + + return in; +} + +inline const uint32_t* unpack32_32(const uint32_t* in, uint32_t* out) { + uint32_t inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + inl = util::SafeLoad(in); + inl = arrow::bit_util::FromLittleEndian(inl); + out++; + *out = (inl >> 0); + ++in; + out++; + + return in; +} + +inline const uint32_t* nullunpacker32(const uint32_t* in, uint32_t* out) { + for (int k = 0; k < 32; ++k) { + out[k] = 0; + } + return in; +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h new file mode 100644 index 0000000000000000000000000000000000000000..214c7551b6c76bc95a7d71eb8b8c31bd96d4b838 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/type_fwd.h" + +namespace arrow { + +namespace util { + +/// \brief The sum of bytes in each buffer referenced by the array +/// +/// Note: An array may only reference a portion of a buffer. +/// This method will overestimate in this case and return the +/// byte size of the entire buffer. +/// Note: If a buffer is referenced multiple times then it will +/// only be counted once. +ARROW_EXPORT int64_t TotalBufferSize(const ArrayData& array_data); +/// \brief The sum of bytes in each buffer referenced by the array +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const Array& array); +/// \brief The sum of bytes in each buffer referenced by the array +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const ChunkedArray& chunked_array); +/// \brief The sum of bytes in each buffer referenced by the batch +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const RecordBatch& record_batch); +/// \brief The sum of bytes in each buffer referenced by the table +/// \see TotalBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT int64_t TotalBufferSize(const Table& table); + +/// \brief Calculate the buffer ranges referenced by the array +/// +/// These ranges will take into account array offsets +/// +/// The ranges may contain duplicates +/// +/// Dictionary arrays will ignore the offset of their containing array +/// +/// The return value will be a struct array corresponding to the schema: +/// schema({field("start", uint64()), field("offset", uint64()), field("length", +/// uint64())) +ARROW_EXPORT Result> ReferencedRanges(const ArrayData& array_data); + +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// +/// Unlike TotalBufferSize this method will account for array +/// offsets. +/// +/// If buffers are shared between arrays then the shared +/// portion will be counted multiple times. +/// +/// Dictionary arrays will always be counted in their entirety +/// even if the array only references a portion of the dictionary. +ARROW_EXPORT Result ReferencedBufferSize(const ArrayData& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const Array& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const ChunkedArray& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const RecordBatch& array_data); +/// \brief Returns the sum of bytes from all buffer ranges referenced +/// \see ReferencedBufferSize(const ArrayData& array_data) for details +ARROW_EXPORT Result ReferencedBufferSize(const Table& array_data); + +} // namespace util + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bf4d5e12d02d349c3a0e0fce43f6be5ef4d585 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h @@ -0,0 +1,241 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +constexpr int kUseDefaultCompressionLevel = std::numeric_limits::min(); + +/// \brief Streaming compressor interface +/// +class ARROW_EXPORT Compressor { + public: + virtual ~Compressor() = default; + + struct CompressResult { + int64_t bytes_read; + int64_t bytes_written; + }; + struct FlushResult { + int64_t bytes_written; + bool should_retry; + }; + struct EndResult { + int64_t bytes_written; + bool should_retry; + }; + + /// \brief Compress some input. + /// + /// If bytes_read is 0 on return, then a larger output buffer should be supplied. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Flush part of the compressed output. + /// + /// If should_retry is true on return, Flush() should be called again + /// with a larger buffer. + virtual Result Flush(int64_t output_len, uint8_t* output) = 0; + + /// \brief End compressing, doing whatever is necessary to end the stream. + /// + /// If should_retry is true on return, End() should be called again + /// with a larger buffer. Otherwise, the Compressor should not be used anymore. + /// + /// End() implies Flush(). + virtual Result End(int64_t output_len, uint8_t* output) = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Streaming decompressor interface +/// +class ARROW_EXPORT Decompressor { + public: + virtual ~Decompressor() = default; + + struct DecompressResult { + // XXX is need_more_output necessary? (Brotli?) + int64_t bytes_read; + int64_t bytes_written; + bool need_more_output; + }; + + /// \brief Decompress some input. + /// + /// If need_more_output is true on return, a larger output buffer needs + /// to be supplied. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, uint8_t* output) = 0; + + /// \brief Return whether the compressed stream is finished. + /// + /// This is a heuristic. If true is returned, then it is guaranteed + /// that the stream is finished. If false is returned, however, it may + /// simply be that the underlying library isn't able to provide the information. + virtual bool IsFinished() = 0; + + /// \brief Reinitialize decompressor, making it ready for a new compressed stream. + virtual Status Reset() = 0; + + // XXX add methods for buffer size heuristics? +}; + +/// \brief Compression codec options +class ARROW_EXPORT CodecOptions { + public: + explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel) + : compression_level(compression_level) {} + + virtual ~CodecOptions() = default; + + int compression_level; +}; + +// ---------------------------------------------------------------------- +// GZip codec options implementation + +enum class GZipFormat { + ZLIB, + DEFLATE, + GZIP, +}; + +class ARROW_EXPORT GZipCodecOptions : public CodecOptions { + public: + GZipFormat gzip_format = GZipFormat::GZIP; + std::optional window_bits; +}; + +// ---------------------------------------------------------------------- +// brotli codec options implementation + +class ARROW_EXPORT BrotliCodecOptions : public CodecOptions { + public: + std::optional window_bits; +}; + +/// \brief Compression codec +class ARROW_EXPORT Codec { + public: + virtual ~Codec() = default; + + /// \brief Return special value to indicate that a codec implementation + /// should use its default compression level + static int UseDefaultCompressionLevel(); + + /// \brief Return a string name for compression type + static const std::string& GetCodecAsString(Compression::type t); + + /// \brief Return compression type for name (all lower case) + static Result GetCompressionType(const std::string& name); + + /// \brief Create a codec for the given compression algorithm with CodecOptions + static Result> Create( + Compression::type codec, const CodecOptions& codec_options = CodecOptions{}); + + /// \brief Create a codec for the given compression algorithm + static Result> Create(Compression::type codec, + int compression_level); + + /// \brief Return true if support for indicated codec has been enabled + static bool IsAvailable(Compression::type codec); + + /// \brief Return true if indicated codec supports setting a compression level + static bool SupportsCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MinimumCompressionLevel(Compression::type codec); + + /// \brief Return the largest supported compression level for the codec + /// Note: This function creates a temporary Codec instance + static Result MaximumCompressionLevel(Compression::type codec); + + /// \brief Return the default compression level + /// Note: This function creates a temporary Codec instance + static Result DefaultCompressionLevel(Compression::type codec); + + /// \brief Return the smallest supported compression level + virtual int minimum_compression_level() const = 0; + + /// \brief Return the largest supported compression level + virtual int maximum_compression_level() const = 0; + + /// \brief Return the default compression level + virtual int default_compression_level() const = 0; + + /// \brief One-shot decompression function + /// + /// output_buffer_len must be correct and therefore be obtained in advance. + /// The actual decompressed length is returned. + /// + /// \note One-shot decompression is not always compatible with streaming + /// compression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Decompress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) = 0; + + /// \brief One-shot compression function + /// + /// output_buffer_len must first have been computed using MaxCompressedLen(). + /// The actual compressed length is returned. + /// + /// \note One-shot compression is not always compatible with streaming + /// decompression. Depending on the codec (e.g. LZ4), different formats may + /// be used. + virtual Result Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, uint8_t* output_buffer) = 0; + + virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeCompressor() = 0; + + /// \brief Create a streaming compressor instance + virtual Result> MakeDecompressor() = 0; + + /// \brief This Codec's compression type + virtual Compression::type compression_type() const = 0; + + /// \brief The name of this Codec's compression type + const std::string& name() const { return GetCodecAsString(compression_type()); } + + /// \brief This Codec's compression level, if applicable + virtual int compression_level() const { return UseDefaultCompressionLevel(); } + + private: + /// \brief Initializes the codec's resources. + virtual Status Init(); +}; + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..c23d6ccd9886e4539d52d537abb85da1dcc93385 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h @@ -0,0 +1,411 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/chunked_array.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { +namespace internal { + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool); + +template +class Converter { + public: + using Self = Converter; + using InputType = Input; + using OptionsType = Options; + + virtual ~Converter() = default; + + Status Construct(std::shared_ptr type, OptionsType options, + MemoryPool* pool) { + type_ = std::move(type); + options_ = std::move(options); + return Init(pool); + } + + virtual Status Append(InputType value) { return Status::NotImplemented("Append"); } + + virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) { + return Status::NotImplemented("Extend"); + } + + virtual Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + return Status::NotImplemented("ExtendMasked"); + } + + const std::shared_ptr& builder() const { return builder_; } + + const std::shared_ptr& type() const { return type_; } + + OptionsType options() const { return options_; } + + bool may_overflow() const { return may_overflow_; } + + bool rewind_on_overflow() const { return rewind_on_overflow_; } + + virtual Status Reserve(int64_t additional_capacity) { + return builder_->Reserve(additional_capacity); + } + + Status AppendNull() { return builder_->AppendNull(); } + + virtual Result> ToArray() { return builder_->Finish(); } + + virtual Result> ToArray(int64_t length) { + ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray()); + return arr->Slice(0, length); + } + + virtual Result> ToChunkedArray() { + ARROW_ASSIGN_OR_RAISE(auto array, ToArray()); + std::vector> chunks = {std::move(array)}; + return std::make_shared(chunks); + } + + protected: + virtual Status Init(MemoryPool* pool) { return Status::OK(); } + + std::shared_ptr type_; + std::shared_ptr builder_; + OptionsType options_; + bool may_overflow_ = false; + bool rewind_on_overflow_ = false; +}; + +template +class PrimitiveConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + + protected: + Status Init(MemoryPool* pool) override { + this->builder_ = std::make_shared(this->type_, pool); + // Narrow variable-sized binary types may overflow + this->may_overflow_ = is_binary_like(this->type_->id()); + primitive_type_ = checked_cast(this->type_.get()); + primitive_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const ArrowType* primitive_type_; + BuilderType* primitive_builder_; +}; + +template class ConverterTrait> +class ListConverter : public BaseConverter { + public: + using BuilderType = typename TypeTraits::BuilderType; + using ConverterType = typename ConverterTrait::type; + + protected: + Status Init(MemoryPool* pool) override { + list_type_ = checked_cast(this->type_.get()); + ARROW_ASSIGN_OR_RAISE(value_converter_, + (MakeConverter( + list_type_->value_type(), this->options_, pool))); + this->builder_ = + std::make_shared(pool, value_converter_->builder(), this->type_); + list_builder_ = checked_cast(this->builder_.get()); + // Narrow list types may overflow + this->may_overflow_ = this->rewind_on_overflow_ = + sizeof(typename ArrowType::offset_type) < sizeof(int64_t); + return Status::OK(); + } + + const ArrowType* list_type_; + BuilderType* list_builder_; + std::unique_ptr value_converter_; +}; + +template class ConverterTrait> +class StructConverter : public BaseConverter { + public: + using ConverterType = typename ConverterTrait::type; + + Status Reserve(int64_t additional_capacity) override { + ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity)); + for (const auto& child : children_) { + ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity)); + } + return Status::OK(); + } + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr child_converter; + std::vector> child_builders; + + struct_type_ = checked_cast(this->type_.get()); + for (const auto& field : struct_type_->fields()) { + ARROW_ASSIGN_OR_RAISE(child_converter, + (MakeConverter( + field->type(), this->options_, pool))); + this->may_overflow_ |= child_converter->may_overflow(); + this->rewind_on_overflow_ = this->may_overflow_; + child_builders.push_back(child_converter->builder()); + children_.push_back(std::move(child_converter)); + } + + this->builder_ = + std::make_shared(this->type_, pool, std::move(child_builders)); + struct_builder_ = checked_cast(this->builder_.get()); + + return Status::OK(); + } + + const StructType* struct_type_; + StructBuilder* struct_builder_; + std::vector> children_; +}; + +template +class DictionaryConverter : public BaseConverter { + public: + using BuilderType = DictionaryBuilder; + + protected: + Status Init(MemoryPool* pool) override { + std::unique_ptr builder; + ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder)); + this->builder_ = std::move(builder); + this->may_overflow_ = false; + dict_type_ = checked_cast(this->type_.get()); + value_type_ = checked_cast(dict_type_->value_type().get()); + value_builder_ = checked_cast(this->builder_.get()); + return Status::OK(); + } + + const DictionaryType* dict_type_; + const ValueType* value_type_; + BuilderType* value_builder_; +}; + +template class ConverterTrait> +struct MakeConverterImpl { + template ::type> + Status Visit(const T&) { + out.reset(new ConverterType()); + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DictionaryType& t) { + switch (t.value_type()->id()) { +#define DICTIONARY_CASE(TYPE) \ + case TYPE::type_id: \ + out = std::make_unique< \ + typename ConverterTrait::template dictionary_type>(); \ + break; + DICTIONARY_CASE(BooleanType); + DICTIONARY_CASE(Int8Type); + DICTIONARY_CASE(Int16Type); + DICTIONARY_CASE(Int32Type); + DICTIONARY_CASE(Int64Type); + DICTIONARY_CASE(UInt8Type); + DICTIONARY_CASE(UInt16Type); + DICTIONARY_CASE(UInt32Type); + DICTIONARY_CASE(UInt64Type); + DICTIONARY_CASE(FloatType); + DICTIONARY_CASE(DoubleType); + DICTIONARY_CASE(BinaryType); + DICTIONARY_CASE(StringType); + DICTIONARY_CASE(FixedSizeBinaryType); +#undef DICTIONARY_CASE + default: + return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(), + " not implemented"); + } + return out->Construct(std::move(type), std::move(options), pool); + } + + Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); } + + std::shared_ptr type; + typename BaseConverter::OptionsType options; + MemoryPool* pool; + std::unique_ptr out; +}; + +template class ConverterTrait> +static Result> MakeConverter( + std::shared_ptr type, typename BaseConverter::OptionsType options, + MemoryPool* pool) { + MakeConverterImpl visitor{ + std::move(type), std::move(options), pool, NULLPTR}; + ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor)); + return std::move(visitor.out); +} + +template +class Chunker { + public: + using InputType = typename Converter::InputType; + + explicit Chunker(std::unique_ptr converter) + : converter_(std::move(converter)) {} + + Status Reserve(int64_t additional_capacity) { + ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity)); + reserved_ += additional_capacity; + return Status::OK(); + } + + Status AppendNull() { + auto status = converter_->AppendNull(); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return converter_->AppendNull(); + } + ++length_; + return status; + } + + Status Append(InputType value) { + auto status = converter_->Append(value); + if (ARROW_PREDICT_FALSE(status.IsCapacityError())) { + if (converter_->builder()->length() == 0) { + return status; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + return Append(value); + } + ++length_; + return status; + } + + Status Extend(InputType values, int64_t size, int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->Extend(values, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status ExtendMasked(InputType values, InputType mask, int64_t size, + int64_t offset = 0) { + while (offset < size) { + auto length_before = converter_->builder()->length(); + auto status = converter_->ExtendMasked(values, mask, size, offset); + auto length_after = converter_->builder()->length(); + auto num_converted = length_after - length_before; + + offset += num_converted; + length_ += num_converted; + + if (status.IsCapacityError()) { + if (converter_->builder()->length() == 0) { + // Builder length == 0 means the individual element is too large to append. + // In this case, no need to try again. + return status; + } else if (converter_->rewind_on_overflow()) { + // The list-like and binary-like conversion paths may raise a capacity error, + // we need to handle them differently. While the binary-like converters check + // the capacity before append/extend the list-like converters just check after + // append/extend. Thus depending on the implementation semantics we may need + // to rewind (slice) the output chunk by one. + length_ -= 1; + offset -= 1; + } + ARROW_RETURN_NOT_OK(FinishChunk()); + } else if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + Status FinishChunk() { + ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_)); + chunks_.push_back(chunk); + // Reserve space for the remaining items. + // Besides being an optimization, it is also required if the converter's + // implementation relies on unsafe builder methods in converter->Append(). + auto remaining = reserved_ - length_; + Reset(); + return Reserve(remaining); + } + + Result> ToChunkedArray() { + ARROW_RETURN_NOT_OK(FinishChunk()); + return std::make_shared(chunks_); + } + + protected: + void Reset() { + converter_->builder()->Reset(); + length_ = 0; + reserved_ = 0; + } + + int64_t length_ = 0; + int64_t reserved_ = 0; + std::unique_ptr converter_; + std::vector> chunks_; +}; + +template +static Result>> MakeChunker(std::unique_ptr converter) { + return std::make_unique>(std::move(converter)); +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h new file mode 100644 index 0000000000000000000000000000000000000000..949719b97ed84da6277139a70e22203706ed6055 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal +// set of functions needed for Apache Arrow / Apache parquet-cpp + +#pragma once + +#include +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// CpuInfo is an interface to query for cpu information at runtime. The caller can +/// ask for the sizes of the caches and what hardware features are supported. +/// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and +/// /sys/devices) +class ARROW_EXPORT CpuInfo { + public: + ~CpuInfo(); + + /// x86 features + static constexpr int64_t SSSE3 = (1LL << 0); + static constexpr int64_t SSE4_1 = (1LL << 1); + static constexpr int64_t SSE4_2 = (1LL << 2); + static constexpr int64_t POPCNT = (1LL << 3); + static constexpr int64_t AVX = (1LL << 4); + static constexpr int64_t AVX2 = (1LL << 5); + static constexpr int64_t AVX512F = (1LL << 6); + static constexpr int64_t AVX512CD = (1LL << 7); + static constexpr int64_t AVX512VL = (1LL << 8); + static constexpr int64_t AVX512DQ = (1LL << 9); + static constexpr int64_t AVX512BW = (1LL << 10); + static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW; + static constexpr int64_t BMI1 = (1LL << 11); + static constexpr int64_t BMI2 = (1LL << 12); + + /// Arm features + static constexpr int64_t ASIMD = (1LL << 32); + + /// Cache enums for L1 (data), L2 and L3 + enum class CacheLevel { L1 = 0, L2, L3, Last = L3 }; + + /// CPU vendors + enum class Vendor { Unknown, Intel, AMD }; + + static const CpuInfo* GetInstance(); + + /// Returns all the flags for this cpu + int64_t hardware_flags() const; + + /// Returns the number of cores (including hyper-threaded) on this machine. + int num_cores() const; + + /// Returns the vendor of the cpu. + Vendor vendor() const; + + /// Returns the model name of the cpu (e.g. Intel i7-2600) + const std::string& model_name() const; + + /// Returns the size of the cache in KB at this cache level + int64_t CacheSize(CacheLevel level) const; + + /// \brief Returns whether or not the given feature is enabled. + /// + /// IsSupported() is true iff IsDetected() is also true and the feature + /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL + /// environment variable). + bool IsSupported(int64_t flags) const; + + /// Returns whether or not the given feature is available on the CPU. + bool IsDetected(int64_t flags) const; + + /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error + /// and terminate. + void VerifyCpuRequirements() const; + + /// Toggle a hardware feature on and off. It is not valid to turn on a feature + /// that the underlying hardware cannot support. This is useful for testing. + void EnableFeature(int64_t flag, bool enable); + + bool HasEfficientBmi2() const { + // BMI2 (pext, pdep) is only efficient on Intel X86 processors. + return vendor() == Vendor::Intel && IsSupported(BMI2); + } + + private: + CpuInfo(); + + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h new file mode 100644 index 0000000000000000000000000000000000000000..155cf7cfae1061feda9ae436a5f966b90cbabc6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// \brief Compute the CRC32 checksum of the given data +/// +/// This function computes CRC32 with the polynomial 0x04C11DB7, +/// as used in zlib and others (note this is different from CRC32C). +/// To compute a running CRC32, pass the previous value in `prev`, +/// otherwise `prev` should be 0. +ARROW_EXPORT +uint32_t crc32(uint32_t prev, const void* data, size_t length); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..ed38a4dcf7ab87aad4db906dd8b6abc058387f8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +ARROW_EXPORT +void DebugTrap(); + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fae9293f9e79891dcd85b536d697291289804ce5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/status.h" +#include "arrow/util/cpu_info.h" + +namespace arrow { +namespace internal { + +enum class DispatchLevel : int { + // These dispatch levels, corresponding to instruction set features, + // are sorted in increasing order of preference. + NONE = 0, + SSE4_2, + AVX2, + AVX512, + NEON, + MAX +}; + +/* + A facility for dynamic dispatch according to available DispatchLevel. + + Typical use: + + static void my_function_default(...); + static void my_function_avx2(...); + + struct MyDynamicFunction { + using FunctionType = decltype(&my_function_default); + + static std::vector> implementations() { + return { + { DispatchLevel::NONE, my_function_default } + #if defined(ARROW_HAVE_RUNTIME_AVX2) + , { DispatchLevel::AVX2, my_function_avx2 } + #endif + }; + } + }; + + void my_function(...) { + static DynamicDispatch dispatch; + return dispatch.func(...); + } +*/ +template +class DynamicDispatch { + protected: + using FunctionType = typename DynamicFunction::FunctionType; + using Implementation = std::pair; + + public: + DynamicDispatch() { Resolve(DynamicFunction::implementations()); } + + FunctionType func = {}; + + protected: + // Use the Implementation with the highest DispatchLevel + void Resolve(const std::vector& implementations) { + Implementation cur{DispatchLevel::NONE, {}}; + + for (const auto& impl : implementations) { + if (impl.first >= cur.first && IsSupported(impl.first)) { + // Higher (or same) level than current + cur = impl; + } + } + + if (!cur.second) { + Status::Invalid("No appropriate implementation found").Abort(); + } + func = cur.second; + } + + private: + bool IsSupported(DispatchLevel level) const { + static const auto cpu_info = arrow::internal::CpuInfo::GetInstance(); + + switch (level) { + case DispatchLevel::NONE: + return true; + case DispatchLevel::SSE4_2: + return cpu_info->IsSupported(CpuInfo::SSE4_2); + case DispatchLevel::AVX2: + return cpu_info->IsSupported(CpuInfo::AVX2); + case DispatchLevel::AVX512: + return cpu_info->IsSupported(CpuInfo::AVX512); + default: + return false; + } + } +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/double_conversion.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/double_conversion.h new file mode 100644 index 0000000000000000000000000000000000000000..0b07b1a2b9f295cbe01d02af5eb02775183f059d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/double_conversion.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/double-conversion/double-conversion.h" // IWYU pragma: export + +namespace arrow { +namespace util { +namespace double_conversion { + +using ::arrow_vendored::double_conversion::DoubleToStringConverter; +using ::arrow_vendored::double_conversion::StringBuilder; +using ::arrow_vendored::double_conversion::StringToDoubleConverter; + +} // namespace double_conversion +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h new file mode 100644 index 0000000000000000000000000000000000000000..3d394ba8b78017b8e06457510fc7748fc3c45f45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h @@ -0,0 +1,245 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef _WIN32 +#define ARROW_LITTLE_ENDIAN 1 +#else +#if defined(__APPLE__) || defined(__FreeBSD__) +#include // IWYU pragma: keep +#elif defined(sun) || defined(__sun) +#include // IWYU pragma: keep +#else +#include // IWYU pragma: keep +#endif +# +#ifndef __BYTE_ORDER__ +#error "__BYTE_ORDER__ not defined" +#endif +# +#ifndef __ORDER_LITTLE_ENDIAN__ +#error "__ORDER_LITTLE_ENDIAN__ not defined" +#endif +# +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define ARROW_LITTLE_ENDIAN 1 +#else +#define ARROW_LITTLE_ENDIAN 0 +#endif +#endif + +#if defined(_MSC_VER) +#include // IWYU pragma: keep +#define ARROW_BYTE_SWAP64 _byteswap_uint64 +#define ARROW_BYTE_SWAP32 _byteswap_ulong +#else +#define ARROW_BYTE_SWAP64 __builtin_bswap64 +#define ARROW_BYTE_SWAP32 __builtin_bswap32 +#endif + +#include +#include + +#include "arrow/util/type_traits.h" +#include "arrow/util/ubsan.h" + +namespace arrow { +namespace bit_util { + +// +// Byte-swap 16-bit, 32-bit and 64-bit values +// + +// Swap the byte order (i.e. endianness) +static inline int64_t ByteSwap(int64_t value) { return ARROW_BYTE_SWAP64(value); } +static inline uint64_t ByteSwap(uint64_t value) { + return static_cast(ARROW_BYTE_SWAP64(value)); +} +static inline int32_t ByteSwap(int32_t value) { return ARROW_BYTE_SWAP32(value); } +static inline uint32_t ByteSwap(uint32_t value) { + return static_cast(ARROW_BYTE_SWAP32(value)); +} +static inline int16_t ByteSwap(int16_t value) { + constexpr auto m = static_cast(0xff); + return static_cast(((value >> 8) & m) | ((value & m) << 8)); +} +static inline uint16_t ByteSwap(uint16_t value) { + return static_cast(ByteSwap(static_cast(value))); +} +static inline uint8_t ByteSwap(uint8_t value) { return value; } +static inline int8_t ByteSwap(int8_t value) { return value; } +static inline double ByteSwap(double value) { + const uint64_t swapped = ARROW_BYTE_SWAP64(util::SafeCopy(value)); + return util::SafeCopy(swapped); +} +static inline float ByteSwap(float value) { + const uint32_t swapped = ARROW_BYTE_SWAP32(util::SafeCopy(value)); + return util::SafeCopy(swapped); +} + +// Write the swapped bytes into dst. Src and dst cannot overlap. +static inline void ByteSwap(void* dst, const void* src, int len) { + switch (len) { + case 1: + *reinterpret_cast(dst) = *reinterpret_cast(src); + return; + case 2: + *reinterpret_cast(dst) = ByteSwap(*reinterpret_cast(src)); + return; + case 4: + *reinterpret_cast(dst) = ByteSwap(*reinterpret_cast(src)); + return; + case 8: + *reinterpret_cast(dst) = ByteSwap(*reinterpret_cast(src)); + return; + default: + break; + } + + auto d = reinterpret_cast(dst); + auto s = reinterpret_cast(src); + for (int i = 0; i < len; ++i) { + d[i] = s[len - i - 1]; + } +} + +// Convert to little/big endian format from the machine's native endian format. +#if ARROW_LITTLE_ENDIAN +template > +static inline T ToBigEndian(T value) { + return ByteSwap(value); +} + +template > +static inline T ToLittleEndian(T value) { + return value; +} +#else +template > +static inline T ToBigEndian(T value) { + return value; +} + +template > +static inline T ToLittleEndian(T value) { + return ByteSwap(value); +} +#endif + +// Convert from big/little endian format to the machine's native endian format. +#if ARROW_LITTLE_ENDIAN +template > +static inline T FromBigEndian(T value) { + return ByteSwap(value); +} + +template > +static inline T FromLittleEndian(T value) { + return value; +} +#else +template > +static inline T FromBigEndian(T value) { + return value; +} + +template > +static inline T FromLittleEndian(T value) { + return ByteSwap(value); +} +#endif + +// Handle endianness in *word* granularity (keep individual array element untouched) +namespace little_endian { + +namespace detail { + +// Read a native endian array as little endian +template +struct Reader { + const std::array& native_array; + + explicit Reader(const std::array& native_array) : native_array(native_array) {} + + const T& operator[](size_t i) const { + return native_array[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; + } +}; + +// Read/write a native endian array as little endian +template +struct Writer { + std::array* native_array; + + explicit Writer(std::array* native_array) : native_array(native_array) {} + + const T& operator[](size_t i) const { + return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; + } + T& operator[](size_t i) { return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; } +}; + +} // namespace detail + +// Construct array reader and try to deduce template augments +template +static inline detail::Reader Make(const std::array& native_array) { + return detail::Reader(native_array); +} + +// Construct array writer and try to deduce template augments +template +static inline detail::Writer Make(std::array* native_array) { + return detail::Writer(native_array); +} + +// Convert little endian array to native endian +template +static inline std::array ToNative(std::array array) { + if (!ARROW_LITTLE_ENDIAN) { + std::reverse(array.begin(), array.end()); + } + return array; +} + +// Convert native endian array to little endian +template +static inline std::array FromNative(std::array array) { + return ToNative(array); +} + +} // namespace little_endian + +} // namespace bit_util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..dd9af907ecc374e94138e0fec20e87739a271658 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h @@ -0,0 +1,656 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This is a private header for number-to-string formatting utilities + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/double_conversion.h" +#include "arrow/util/macros.h" +#include "arrow/util/string.h" +#include "arrow/util/time.h" +#include "arrow/util/visibility.h" +#include "arrow/vendored/datetime.h" + +namespace arrow { +namespace internal { + +/// \brief The entry point for conversion to strings. +template +class StringFormatter; + +template +struct is_formattable { + template ::value_type> + static std::true_type Test(U*); + + template + static std::false_type Test(...); + + static constexpr bool value = decltype(Test(NULLPTR))::value; +}; + +template +using enable_if_formattable = enable_if_t::value, R>; + +template +using Return = decltype(std::declval()(std::string_view{})); + +///////////////////////////////////////////////////////////////////////// +// Boolean formatting + +template <> +class StringFormatter { + public: + explicit StringFormatter(const DataType* = NULLPTR) {} + + using value_type = bool; + + template + Return operator()(bool value, Appender&& append) { + if (value) { + const char string[] = "true"; + return append(std::string_view(string)); + } else { + const char string[] = "false"; + return append(std::string_view(string)); + } + } +}; + +///////////////////////////////////////////////////////////////////////// +// Decimals formatting + +template +class DecimalToStringFormatterMixin { + public: + explicit DecimalToStringFormatterMixin(const DataType* type) + : scale_(static_cast(type)->scale()) {} + + using value_type = typename TypeTraits::CType; + + template + Return operator()(const value_type& value, Appender&& append) { + return append(value.ToString(scale_)); + } + + private: + int32_t scale_; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +template <> +class StringFormatter + : public DecimalToStringFormatterMixin { + using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Integer formatting + +namespace detail { + +// A 2x100 direct table mapping integers in [0..99] to their decimal representations. +ARROW_EXPORT extern const char digit_pairs[]; + +// Based on fmtlib's format_int class: +// Write digits from right to left into a stack allocated buffer. +// \pre *cursor points to the byte after the one that will be written. +// \post *cursor points to the byte that was written. +inline void FormatOneChar(char c, char** cursor) { *(--(*cursor)) = c; } + +template +void FormatOneDigit(Int value, char** cursor) { + assert(value >= 0 && value <= 9); + FormatOneChar(static_cast('0' + value), cursor); +} + +// GH-35662: I don't know why but the following combination causes SEGV: +// * template implementation without inline +// * MinGW +// * Release build +template +inline void FormatTwoDigits(Int value, char** cursor) { + assert(value >= 0 && value <= 99); + auto digit_pair = &digit_pairs[value * 2]; + FormatOneChar(digit_pair[1], cursor); + FormatOneChar(digit_pair[0], cursor); +} + +template +void FormatAllDigits(Int value, char** cursor) { + assert(value >= 0); + while (value >= 100) { + FormatTwoDigits(value % 100, cursor); + value /= 100; + } + + if (value >= 10) { + FormatTwoDigits(value, cursor); + } else { + FormatOneDigit(value, cursor); + } +} + +template +void FormatAllDigitsLeftPadded(Int value, size_t pad, char pad_char, char** cursor) { + auto end = *cursor - pad; + FormatAllDigits(value, cursor); + while (*cursor > end) { + FormatOneChar(pad_char, cursor); + } +} + +template +std::string_view ViewDigitBuffer(const std::array& buffer, + char* cursor) { + auto buffer_end = buffer.data() + BUFFER_SIZE; + return {cursor, static_cast(buffer_end - cursor)}; +} + +template ::type> +constexpr UInt Abs(Int value) { + return value < 0 ? ~static_cast(value) + 1 : static_cast(value); +} + +template +constexpr size_t Digits10(Int value) { + return value <= 9 ? 1 : Digits10(value / 10) + 1; +} + +} // namespace detail + +template +class IntToStringFormatterMixin { + public: + explicit IntToStringFormatterMixin(const DataType* = NULLPTR) {} + + using value_type = typename ARROW_TYPE::c_type; + + template + Return operator()(value_type value, Appender&& append) { + constexpr size_t buffer_size = + detail::Digits10(std::numeric_limits::max()) + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + detail::FormatAllDigits(detail::Abs(value), &cursor); + if (value < 0) { + detail::FormatOneChar('-', &cursor); + } + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Floating-point formatting + +class ARROW_EXPORT FloatToStringFormatter { + public: + FloatToStringFormatter(); + FloatToStringFormatter(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode); + ~FloatToStringFormatter(); + + // Returns the number of characters written + int FormatFloat(float v, char* out_buffer, int out_size); + int FormatFloat(double v, char* out_buffer, int out_size); + int FormatFloat(uint16_t v, char* out_buffer, int out_size); + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +template +class FloatToStringFormatterMixin : public FloatToStringFormatter { + public: + using value_type = typename ARROW_TYPE::c_type; + + static constexpr int buffer_size = 50; + + explicit FloatToStringFormatterMixin(const DataType* = NULLPTR) {} + + FloatToStringFormatterMixin(int flags, const char* inf_symbol, const char* nan_symbol, + char exp_character, int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode) + : FloatToStringFormatter(flags, inf_symbol, nan_symbol, exp_character, + decimal_in_shortest_low, decimal_in_shortest_high, + max_leading_padding_zeroes_in_precision_mode, + max_trailing_padding_zeroes_in_precision_mode) {} + + template + Return operator()(value_type value, Appender&& append) { + char buffer[buffer_size]; + int size = FormatFloat(value, buffer, buffer_size); + return append(std::string_view(buffer, size)); + } +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +template <> +class StringFormatter : public FloatToStringFormatterMixin { + public: + using FloatToStringFormatterMixin::FloatToStringFormatterMixin; +}; + +///////////////////////////////////////////////////////////////////////// +// Temporal formatting + +namespace detail { + +constexpr size_t BufferSizeYYYY_MM_DD() { + // "-"? "99999-12-31" + return 1 + detail::Digits10(99999) + 1 + detail::Digits10(12) + 1 + + detail::Digits10(31); +} + +inline void FormatYYYY_MM_DD(arrow_vendored::date::year_month_day ymd, char** cursor) { + FormatTwoDigits(static_cast(ymd.day()), cursor); + FormatOneChar('-', cursor); + FormatTwoDigits(static_cast(ymd.month()), cursor); + FormatOneChar('-', cursor); + auto year = static_cast(ymd.year()); + const auto is_neg_year = year < 0; + year = std::abs(year); + assert(year <= 99999); + FormatTwoDigits(year % 100, cursor); + year /= 100; + FormatTwoDigits(year % 100, cursor); + if (year >= 100) { + FormatOneDigit(year / 100, cursor); + } + if (is_neg_year) { + FormatOneChar('-', cursor); + } +} + +template +constexpr size_t BufferSizeHH_MM_SS() { + // "23:59:59" ("." "9"+)? + return detail::Digits10(23) + 1 + detail::Digits10(59) + 1 + detail::Digits10(59) + 1 + + detail::Digits10(Duration::period::den) - 1; +} + +template +void FormatHH_MM_SS(arrow_vendored::date::hh_mm_ss hms, char** cursor) { + constexpr size_t subsecond_digits = Digits10(Duration::period::den) - 1; + if (subsecond_digits != 0) { + FormatAllDigitsLeftPadded(hms.subseconds().count(), subsecond_digits, '0', cursor); + FormatOneChar('.', cursor); + } + FormatTwoDigits(hms.seconds().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.minutes().count(), cursor); + FormatOneChar(':', cursor); + FormatTwoDigits(hms.hours().count(), cursor); +} + +// Some out-of-bound datetime values would result in erroneous printing +// because of silent integer wraparound in the `arrow_vendored::date` library. +// +// To avoid such misprinting, we must therefore check the bounds explicitly. +// The bounds correspond to start of year -32767 and end of year 32767, +// respectively (-32768 is an invalid year value in `arrow_vendored::date`). +// +// Note these values are the same as documented for C++20: +// https://en.cppreference.com/w/cpp/chrono/year_month_day/operator_days +template +bool IsDateTimeInRange(Unit duration) { + constexpr Unit kMinIncl = + std::chrono::duration_cast(arrow_vendored::date::days{-12687428}); + constexpr Unit kMaxExcl = + std::chrono::duration_cast(arrow_vendored::date::days{11248738}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +// IsDateTimeInRange() specialization for nanoseconds: a 64-bit number of +// nanoseconds cannot represent years outside of the [-32767, 32767] +// range, and the {kMinIncl, kMaxExcl} constants above would overflow. +constexpr bool IsDateTimeInRange(std::chrono::nanoseconds duration) { return true; } + +template +bool IsTimeInRange(Unit duration) { + constexpr Unit kMinIncl = std::chrono::duration_cast(std::chrono::seconds{0}); + constexpr Unit kMaxExcl = std::chrono::duration_cast(std::chrono::seconds{86400}); + return duration >= kMinIncl && duration < kMaxExcl; +} + +template +Return FormatOutOfRange(RawValue&& raw_value, Appender&& append) { + // XXX locale-sensitive but good enough for now + std::string formatted = ""; + return append(std::move(formatted)); +} + +const auto kEpoch = arrow_vendored::date::sys_days{arrow_vendored::date::jan / 1 / 1970}; + +} // namespace detail + +template <> +class StringFormatter : public IntToStringFormatterMixin { + using IntToStringFormatterMixin::IntToStringFormatterMixin; +}; + +class DateToStringFormatterMixin { + public: + explicit DateToStringFormatterMixin(const DataType* = NULLPTR) {} + + protected: + template + Return FormatDays(arrow_vendored::date::days since_epoch, Appender&& append) { + arrow_vendored::date::sys_days timepoint_days{since_epoch}; + + constexpr size_t buffer_size = detail::BufferSizeYYYY_MM_DD(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatYYYY_MM_DD(arrow_vendored::date::year_month_day{timepoint_days}, + &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date32Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = arrow_vendored::date::days{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(since_epoch, std::forward(append)); + } +}; + +template <> +class StringFormatter : public DateToStringFormatterMixin { + public: + using value_type = typename Date64Type::c_type; + + using DateToStringFormatterMixin::DateToStringFormatterMixin; + + template + Return operator()(value_type value, Appender&& append) { + const auto since_epoch = std::chrono::milliseconds{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + return FormatDays(std::chrono::duration_cast(since_epoch), + std::forward(append)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = int64_t; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()), + timezone_(checked_cast(*type).timezone()) {} + + template + Return operator()(Duration, value_type value, Appender&& append) { + using arrow_vendored::date::days; + + const Duration since_epoch{value}; + if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) { + return detail::FormatOutOfRange(value, append); + } + + const auto timepoint = detail::kEpoch + since_epoch; + // Round days towards zero + // (the naive approach of using arrow_vendored::date::floor() would + // result in UB for very large negative timestamps, similarly as + // https://github.com/HowardHinnant/date/issues/696) + auto timepoint_days = std::chrono::time_point_cast(timepoint); + Duration since_midnight; + if (timepoint_days <= timepoint) { + // Year >= 1970 + since_midnight = timepoint - timepoint_days; + } else { + // Year < 1970 + since_midnight = days(1) - (timepoint_days - timepoint); + timepoint_days -= days(1); + } + + // YYYY_MM_DD " " HH_MM_SS "Z"? + constexpr size_t buffer_size = + detail::BufferSizeYYYY_MM_DD() + 1 + detail::BufferSizeHH_MM_SS() + 1; + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + if (timezone_.size() > 0) { + detail::FormatOneChar('Z', &cursor); + } + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + detail::FormatOneChar(' ', &cursor); + detail::FormatYYYY_MM_DD(timepoint_days, &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; + std::string timezone_; +}; + +template +class StringFormatter> { + public: + using value_type = typename T::c_type; + + explicit StringFormatter(const DataType* type) + : unit_(checked_cast(*type).unit()) {} + + template + Return operator()(Duration, value_type count, Appender&& append) { + const Duration since_midnight{count}; + if (!ARROW_PREDICT_TRUE(detail::IsTimeInRange(since_midnight))) { + return detail::FormatOutOfRange(count, append); + } + + constexpr size_t buffer_size = detail::BufferSizeHH_MM_SS(); + + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor); + return append(detail::ViewDigitBuffer(buffer, cursor)); + } + + template + Return operator()(value_type value, Appender&& append) { + return util::VisitDuration(unit_, *this, value, std::forward(append)); + } + + private: + TimeUnit::type unit_; +}; + +template <> +class StringFormatter { + public: + using value_type = MonthIntervalType::c_type; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*'m'*/ 3 + /*negative signs*/ 1 + + /*months*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval), &cursor); + if (interval < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = DayTimeIntervalType::DayMilliseconds; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*d, ms*/ 3 + /*negative signs*/ 2 + + /*days/milliseconds*/ 2 * detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('m', &cursor); + detail::FormatAllDigits(detail::Abs(interval.milliseconds), &cursor); + if (interval.milliseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +template <> +class StringFormatter { + public: + using value_type = MonthDayNanoIntervalType::MonthDayNanos; + + explicit StringFormatter(const DataType*) {} + + template + Return operator()(value_type interval, Appender&& append) { + constexpr size_t buffer_size = + /*m, d, ns*/ 4 + /*negative signs*/ 3 + + /*months/days*/ 2 * detail::Digits10(std::numeric_limits::max()) + + /*nanoseconds*/ detail::Digits10(std::numeric_limits::max()); + std::array buffer; + char* cursor = buffer.data() + buffer_size; + + detail::FormatOneChar('s', &cursor); + detail::FormatOneChar('n', &cursor); + detail::FormatAllDigits(detail::Abs(interval.nanoseconds), &cursor); + if (interval.nanoseconds < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('d', &cursor); + detail::FormatAllDigits(detail::Abs(interval.days), &cursor); + if (interval.days < 0) detail::FormatOneChar('-', &cursor); + + detail::FormatOneChar('M', &cursor); + detail::FormatAllDigits(detail::Abs(interval.months), &cursor); + if (interval.months < 0) detail::FormatOneChar('-', &cursor); + + return append(detail::ViewDigitBuffer(buffer, cursor)); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..41e268852fa6ea76ce195240498bb11277a7228c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +struct Empty { + static Result ToResult(Status s) { + if (ARROW_PREDICT_TRUE(s.ok())) { + return Empty{}; + } + return s; + } +}; + +/// Helper struct for examining lambdas and other callables. +/// TODO(ARROW-12655) support function pointers +struct call_traits { + public: + template + static std::false_type is_overloaded_impl(R(A...)); + + template + static std::false_type is_overloaded_impl(decltype(&F::operator())*); + + template + static std::true_type is_overloaded_impl(...); + + template + static R return_type_impl(R (F::*)(A...)); + + template + static R return_type_impl(R (F::*)(A...) const); + + template + static typename std::tuple_element>::type argument_type_impl( + R (F::*)(A...)); + + template + static typename std::tuple_element>::type argument_type_impl( + R (F::*)(A...) const); + + template + static typename std::tuple_element>::type argument_type_impl( + R (F::*)(A...) &&); + + template + static std::integral_constant argument_count_impl(R (F::*)(A...)); + + template + static std::integral_constant argument_count_impl(R (F::*)(A...) + const); + + template + static std::integral_constant argument_count_impl(R (F::*)(A...) &&); + + /// bool constant indicating whether F is a callable with more than one possible + /// signature. Will be true_type for objects which define multiple operator() or which + /// define a template operator() + template + using is_overloaded = + decltype(is_overloaded_impl::type>(NULLPTR)); + + template + using enable_if_overloaded = typename std::enable_if::value, T>::type; + + template + using disable_if_overloaded = + typename std::enable_if::value, T>::type; + + /// If F is not overloaded, the argument types of its call operator can be + /// extracted via call_traits::argument_type + template + using argument_type = decltype(argument_type_impl(&std::decay::type::operator())); + + template + using argument_count = decltype(argument_count_impl(&std::decay::type::operator())); + + template + using return_type = decltype(return_type_impl(&std::decay::type::operator())); + + template + using enable_if_return = + typename std::enable_if, T>::value, RT>; + + template + using enable_if_empty = typename std::enable_if::value, R>::type; + + template + using enable_if_not_empty = + typename std::enable_if::value, R>::type; +}; + +/// A type erased callable object which may only be invoked once. +/// It can be constructed from any lambda which matches the provided call signature. +/// Invoking it results in destruction of the lambda, freeing any state/references +/// immediately. Invoking a default constructed FnOnce or one which has already been +/// invoked will segfault. +template +class FnOnce; + +template +class FnOnce { + public: + FnOnce() = default; + + template ()(std::declval()...)), R>::value>::type> + FnOnce(Fn fn) : impl_(new FnImpl(std::move(fn))) { // NOLINT runtime/explicit + } + + explicit operator bool() const { return impl_ != NULLPTR; } + + R operator()(A... a) && { + auto bye = std::move(impl_); + return bye->invoke(std::forward(a)...); + } + + private: + struct Impl { + virtual ~Impl() = default; + virtual R invoke(A&&... a) = 0; + }; + + template + struct FnImpl : Impl { + explicit FnImpl(Fn fn) : fn_(std::move(fn)) {} + R invoke(A&&... a) override { return std::move(fn_)(std::forward(a)...); } + Fn fn_; + }; + + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h new file mode 100644 index 0000000000000000000000000000000000000000..2de9f4153248f0acebf4589fc492eed912a847a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h @@ -0,0 +1,944 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Private header, not to be exported + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array/builder_binary.h" +#include "arrow/buffer_builder.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_builders.h" +#include "arrow/util/endian.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" + +#define XXH_INLINE_ALL + +#include "arrow/vendored/xxhash.h" // IWYU pragma: keep + +namespace arrow { +namespace internal { + +// XXX would it help to have a 32-bit hash value on large datasets? +typedef uint64_t hash_t; + +// Notes about the choice of a hash function. +// - XXH3 is extremely fast on most data sizes, from small to huge; +// faster even than HW CRC-based hashing schemes +// - our custom hash function for tiny values (< 16 bytes) is still +// significantly faster (~30%), at least on this machine and compiler + +template +inline hash_t ComputeStringHash(const void* data, int64_t length); + +/// \brief A hash function for bitmaps that can handle offsets and lengths in +/// terms of number of bits. The hash only depends on the bits actually hashed. +/// +/// It's the caller's responsibility to ensure that bits_offset + num_bits are +/// readable from the bitmap. +/// +/// \pre bits_offset >= 0 +/// \pre num_bits >= 0 +/// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap +/// +/// \param bitmap The pointer to the bitmap. +/// \param seed The seed for the hash function (useful when chaining hash functions). +/// \param bits_offset The offset in bits relative to the start of the bitmap. +/// \param num_bits The number of bits after the offset to be hashed. +ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed, + int64_t bits_offset, int64_t num_bits); + +template +struct ScalarHelperBase { + static bool CompareScalars(Scalar u, Scalar v) { return u == v; } + + static hash_t ComputeHash(const Scalar& value) { + // Generic hash computation for scalars. Simply apply the string hash + // to the bit representation of the value. + + // XXX in the case of FP values, we'd like equal values to have the same hash, + // even if they have different bit representations... + return ComputeStringHash(&value, sizeof(value)); + } +}; + +template +struct ScalarHelper : public ScalarHelperBase {}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for integers + + static hash_t ComputeHash(const Scalar& value) { + // Faster hash computation for integers. + + // Two of xxhash's prime multipliers (which are chosen for their + // bit dispersion properties) + static constexpr uint64_t multipliers[] = {11400714785074694791ULL, + 14029467366897019727ULL}; + + // Multiplying by the prime number mixes the low bits into the high bits, + // then byte-swapping (which is a single CPU instruction) allows the + // combined high and low bits to participate in the initial hash table index. + auto h = static_cast(value); + return bit_util::ByteSwap(multipliers[AlgNum] * h); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for std::string_view + + static hash_t ComputeHash(std::string_view value) { + return ComputeStringHash(value.data(), static_cast(value.size())); + } +}; + +template +struct ScalarHelper::value>> + : public ScalarHelperBase { + // ScalarHelper specialization for reals + + static bool CompareScalars(Scalar u, Scalar v) { + if (std::isnan(u)) { + // XXX should we do a bit-precise comparison? + return std::isnan(v); + } + return u == v; + } +}; + +template +hash_t ComputeStringHash(const void* data, int64_t length) { + if (ARROW_PREDICT_TRUE(length <= 16)) { + // Specialize for small hash strings, as they are quite common as + // hash table keys. Even XXH3 isn't quite as fast. + auto p = reinterpret_cast(data); + auto n = static_cast(length); + if (n <= 8) { + if (n <= 3) { + if (n == 0) { + return 1U; + } + uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1]; + return ScalarHelper::ComputeHash(x); + } + // 4 <= length <= 8 + // We can read the string as two overlapping 32-bit ints, apply + // different hash functions to each of them in parallel, then XOR + // the results + uint32_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 4); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + // 8 <= length <= 16 + // Apply the same principle as above + uint64_t x, y; + hash_t hx, hy; + x = util::SafeLoadAs(p + n - 8); + y = util::SafeLoadAs(p); + hx = ScalarHelper::ComputeHash(x); + hy = ScalarHelper::ComputeHash(y); + return n ^ hx ^ hy; + } + +#if XXH3_SECRET_SIZE_MIN != 136 +#error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets +#endif + + // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow. + // Instead, we use hard-coded random secrets. To maximize cache efficiency, + // they reuse the same memory area. + static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = { + 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f, + 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24, + 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26, + 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75, + 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce, + 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3, + 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42, + 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1, + 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5, + 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87}; + + static_assert(AlgNum < 2, "AlgNum too large"); + static constexpr auto secret = kXxh3Secrets + AlgNum; + return XXH3_64bits_withSecret(data, static_cast(length), secret, + XXH3_SECRET_SIZE_MIN); +} + +// XXX add a HashEq struct with both hash and compare functions? + +// ---------------------------------------------------------------------- +// An open-addressing insert-only hash table (no deletes) + +template +class HashTable { + public: + static constexpr hash_t kSentinel = 0ULL; + static constexpr int64_t kLoadFactor = 2UL; + + struct Entry { + hash_t h; + Payload payload; + + // An entry is valid if the hash is different from the sentinel value + operator bool() const { return h != kSentinel; } + }; + + HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) { + DCHECK_NE(pool, nullptr); + // Minimum of 32 elements + capacity = std::max(capacity, 32UL); + capacity_ = bit_util::NextPower2(capacity); + capacity_mask_ = capacity_ - 1; + size_ = 0; + + DCHECK_OK(UpsizeBuffer(capacity_)); + } + + // Lookup with non-linear probing + // cmp_func should have signature bool(const Payload*). + // Return a (Entry*, found) pair. + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + template + std::pair Lookup(hash_t h, CmpFunc&& cmp_func) const { + auto p = Lookup(h, entries_, capacity_mask_, + std::forward(cmp_func)); + return {&entries_[p.first], p.second}; + } + + Status Insert(Entry* entry, hash_t h, const Payload& payload) { + // Ensure entry is empty before inserting + assert(!*entry); + entry->h = FixHash(h); + entry->payload = payload; + ++size_; + + if (ARROW_PREDICT_FALSE(NeedUpsizing())) { + // Resize less frequently since it is expensive + return Upsize(capacity_ * kLoadFactor * 2); + } + return Status::OK(); + } + + uint64_t size() const { return size_; } + + // Visit all non-empty entries in the table + // The visit_func should have signature void(const Entry*) + template + void VisitEntries(VisitFunc&& visit_func) const { + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = entries_[i]; + if (entry) { + visit_func(&entry); + } + } + } + + protected: + // NoCompare is for when the value is known not to exist in the table + enum CompareKind { DoCompare, NoCompare }; + + // The workhorse lookup function + template + std::pair Lookup(hash_t h, const Entry* entries, uint64_t size_mask, + CmpFunc&& cmp_func) const { + static constexpr uint8_t perturb_shift = 5; + + uint64_t index, perturb; + const Entry* entry; + + h = FixHash(h); + index = h & size_mask; + perturb = (h >> perturb_shift) + 1U; + + while (true) { + entry = &entries[index]; + if (CompareEntry(h, entry, std::forward(cmp_func))) { + // Found + return {index, true}; + } + if (entry->h == kSentinel) { + // Empty slot + return {index, false}; + } + + // Perturbation logic inspired from CPython's set / dict object. + // The goal is that all 64 bits of the unmasked hash value eventually + // participate in the probing sequence, to minimize clustering. + index = (index + perturb) & size_mask; + perturb = (perturb >> perturb_shift) + 1U; + } + } + + template + bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const { + if (CKind == NoCompare) { + return false; + } else { + return entry->h == h && cmp_func(&entry->payload); + } + } + + bool NeedUpsizing() const { + // Keep the load factor <= 1/2 + return size_ * kLoadFactor >= capacity_; + } + + Status UpsizeBuffer(uint64_t capacity) { + RETURN_NOT_OK(entries_builder_.Resize(capacity)); + entries_ = entries_builder_.mutable_data(); + memset(static_cast(entries_), 0, capacity * sizeof(Entry)); + + return Status::OK(); + } + + Status Upsize(uint64_t new_capacity) { + assert(new_capacity > capacity_); + uint64_t new_mask = new_capacity - 1; + assert((new_capacity & new_mask) == 0); // it's a power of two + + // Stash old entries and seal builder, effectively resetting the Buffer + const Entry* old_entries = entries_; + ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_)); + // Allocate new buffer + RETURN_NOT_OK(UpsizeBuffer(new_capacity)); + + for (uint64_t i = 0; i < capacity_; i++) { + const auto& entry = old_entries[i]; + if (entry) { + // Dummy compare function will not be called + auto p = Lookup(entry.h, entries_, new_mask, + [](const Payload*) { return false; }); + // Lookup (and CompareEntry) ensure that an + // empty slots is always returned + assert(!p.second); + entries_[p.first] = entry; + } + } + capacity_ = new_capacity; + capacity_mask_ = new_mask; + + return Status::OK(); + } + + hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; } + + // The number of slots available in the hash table array. + uint64_t capacity_; + uint64_t capacity_mask_; + // The number of used slots in the hash table array. + uint64_t size_; + + Entry* entries_; + TypedBufferBuilder entries_builder_; +}; + +// XXX typedef memo_index_t int32_t ? + +constexpr int32_t kKeyNotFound = -1; + +// ---------------------------------------------------------------------- +// A base class for memoization table. + +class MemoTable { + public: + virtual ~MemoTable() = default; + + virtual int32_t size() const = 0; +}; + +// ---------------------------------------------------------------------- +// A memoization table for memory-cheap scalar values. + +// The memoization table remembers and allows to look up the insertion +// index for each key. + +template class HashTableTemplateType = HashTable> +class ScalarMemoTable : public MemoTable { + public: + explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0) + : hash_table_(pool, static_cast(entries)) {} + + int32_t Get(const Scalar& value) const { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(payload->value, value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + template + Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto cmp_func = [value](const Payload* payload) -> bool { + return ScalarHelper::CompareScalars(value, payload->value); + }; + hash_t h = ComputeHash(value); + auto p = hash_table_.Lookup(h, cmp_func); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index})); + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index != kKeyNotFound) { + on_found(memo_index); + } else { + null_index_ = memo_index = size(); + on_not_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table +1 if null was added. + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size()) + (GetNull() != kKeyNotFound); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + hash_table_.VisitEntries([=](const HashTableEntry* entry) { + int32_t index = entry->payload.memo_index - start; + if (index >= 0) { + out_data[index] = entry->payload.value; + } + }); + // Zero-initialize the null entry + if (null_index_ != kKeyNotFound) { + int32_t index = null_index_ - start; + if (index >= 0) { + out_data[index] = Scalar{}; + } + } + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + protected: + struct Payload { + Scalar value; + int32_t memo_index; + }; + + using HashTableType = HashTableTemplateType; + using HashTableEntry = typename HashTableType::Entry; + HashTableType hash_table_; + int32_t null_index_ = kKeyNotFound; + + hash_t ComputeHash(const Scalar& value) const { + return ScalarHelper::ComputeHash(value); + } + + public: + // defined here so that `HashTableType` is visible + // Merge entries from `other_table` into `this->hash_table_`. + Status MergeTable(const ScalarMemoTable& other_table) { + const HashTableType& other_hashtable = other_table.hash_table_; + + other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused)); + }); + // TODO: ARROW-17074 - implement proper error handling + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// A memoization table for small scalar values, using direct indexing + +template +struct SmallScalarTraits {}; + +template <> +struct SmallScalarTraits { + static constexpr int32_t cardinality = 2; + + static uint32_t AsIndex(bool value) { return value ? 1 : 0; } +}; + +template +struct SmallScalarTraits::value>> { + using Unsigned = typename std::make_unsigned::type; + + static constexpr int32_t cardinality = 1U + std::numeric_limits::max(); + + static uint32_t AsIndex(Scalar value) { return static_cast(value); } +}; + +template class HashTableTemplateType = HashTable> +class SmallScalarMemoTable : public MemoTable { + public: + explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) { + std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound); + index_to_value_.reserve(cardinality); + } + + int32_t Get(const Scalar value) const { + auto value_index = AsIndex(value); + return value_to_index_[value_index]; + } + + template + Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + auto value_index = AsIndex(value); + auto memo_index = value_to_index_[value_index]; + if (memo_index == kKeyNotFound) { + memo_index = static_cast(index_to_value_.size()); + index_to_value_.push_back(value); + value_to_index_[value_index] = memo_index; + DCHECK_LT(memo_index, cardinality + 1); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + Status GetOrInsert(const Scalar value, int32_t* out_memo_index) { + return GetOrInsert( + value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + int32_t GetNull() const { return value_to_index_[cardinality]; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + auto memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = value_to_index_[cardinality] = size(); + index_to_value_.push_back(0); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { return static_cast(index_to_value_.size()); } + + // Merge entries from `other_table` into `this`. + Status MergeTable(const SmallScalarMemoTable& other_table) { + for (const Scalar& other_val : other_table.index_to_value_) { + int32_t unused; + RETURN_NOT_OK(this->GetOrInsert(other_val, &unused)); + } + return Status::OK(); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, Scalar* out_data) const { + DCHECK_GE(start, 0); + DCHECK_LE(static_cast(start), index_to_value_.size()); + int64_t offset = start * static_cast(sizeof(Scalar)); + memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar)); + } + + void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); } + + const std::vector& values() const { return index_to_value_; } + + protected: + static constexpr auto cardinality = SmallScalarTraits::cardinality; + static_assert(cardinality <= 256, "cardinality too large for direct-addressed table"); + + uint32_t AsIndex(Scalar value) const { + return SmallScalarTraits::AsIndex(value); + } + + // The last index is reserved for the null element. + int32_t value_to_index_[cardinality + 1]; + std::vector index_to_value_; +}; + +// ---------------------------------------------------------------------- +// A memoization table for variable-sized binary data. + +template +class BinaryMemoTable : public MemoTable { + public: + using builder_offset_type = typename BinaryBuilderT::offset_type; + explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0, + int64_t values_size = -1) + : hash_table_(pool, static_cast(entries)), binary_builder_(pool) { + const int64_t data_size = (values_size < 0) ? entries * 4 : values_size; + DCHECK_OK(binary_builder_.Resize(entries)); + DCHECK_OK(binary_builder_.ReserveData(data_size)); + } + + int32_t Get(const void* data, builder_offset_type length) const { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + if (p.second) { + return p.first->payload.memo_index; + } else { + return kKeyNotFound; + } + } + + int32_t Get(std::string_view value) const { + return Get(value.data(), static_cast(value.length())); + } + + template + Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found, + Func2&& on_not_found, int32_t* out_memo_index) { + hash_t h = ComputeStringHash<0>(data, length); + auto p = Lookup(h, data, length); + int32_t memo_index; + if (p.second) { + memo_index = p.first->payload.memo_index; + on_found(memo_index); + } else { + memo_index = size(); + // Insert string value + RETURN_NOT_OK(binary_builder_.Append(static_cast(data), length)); + // Insert hash entry + RETURN_NOT_OK( + hash_table_.Insert(const_cast(p.first), h, {memo_index})); + + on_not_found(memo_index); + } + *out_memo_index = memo_index; + return Status::OK(); + } + + template + Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found, + int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + std::forward(on_found), std::forward(on_not_found), + out_memo_index); + } + + Status GetOrInsert(const void* data, builder_offset_type length, + int32_t* out_memo_index) { + return GetOrInsert( + data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index); + } + + Status GetOrInsert(std::string_view value, int32_t* out_memo_index) { + return GetOrInsert(value.data(), static_cast(value.length()), + out_memo_index); + } + + int32_t GetNull() const { return null_index_; } + + template + int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) { + int32_t memo_index = GetNull(); + if (memo_index == kKeyNotFound) { + memo_index = null_index_ = size(); + DCHECK_OK(binary_builder_.AppendNull()); + on_not_found(memo_index); + } else { + on_found(memo_index); + } + return memo_index; + } + + int32_t GetOrInsertNull() { + return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {}); + } + + // The number of entries in the memo table + // (which is also 1 + the largest memo index) + int32_t size() const override { + return static_cast(hash_table_.size() + (GetNull() != kKeyNotFound)); + } + + int64_t values_size() const { return binary_builder_.value_data_length(); } + + // Copy (n + 1) offsets starting from index `start` into `out_data` + template + void CopyOffsets(int32_t start, Offset* out_data) const { + DCHECK_LE(start, size()); + + const builder_offset_type* offsets = binary_builder_.offsets_data(); + const builder_offset_type delta = + start < binary_builder_.length() ? offsets[start] : 0; + for (int32_t i = start; i < size(); ++i) { + const builder_offset_type adjusted_offset = offsets[i] - delta; + Offset cast_offset = static_cast(adjusted_offset); + assert(static_cast(cast_offset) == + adjusted_offset); // avoid truncation + *out_data++ = cast_offset; + } + + // Copy last value since BinaryBuilder only materializes it on in Finish() + *out_data = static_cast(binary_builder_.value_data_length() - delta); + } + + template + void CopyOffsets(Offset* out_data) const { + CopyOffsets(0, out_data); + } + + // Copy values starting from index `start` into `out_data` + void CopyValues(int32_t start, uint8_t* out_data) const { + CopyValues(start, -1, out_data); + } + + // Same as above, but check output size in debug mode + void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const { + DCHECK_LE(start, size()); + + // The absolute byte offset of `start` value in the binary buffer. + const builder_offset_type offset = binary_builder_.offset(start); + const auto length = binary_builder_.value_data_length() - static_cast(offset); + + if (out_size != -1) { + assert(static_cast(length) <= out_size); + } + + auto view = binary_builder_.GetView(start); + memcpy(out_data, view.data(), length); + } + + void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); } + + void CopyValues(int64_t out_size, uint8_t* out_data) const { + CopyValues(0, out_size, out_data); + } + + void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size, + uint8_t* out_data) const { + // This method exists to cope with the fact that the BinaryMemoTable does + // not know the fixed width when inserting the null value. The data + // buffer hold a zero length string for the null value (if found). + // + // Thus, the method will properly inject an empty value of the proper width + // in the output buffer. + // + if (start >= size()) { + return; + } + + int32_t null_index = GetNull(); + if (null_index < start) { + // Nothing to skip, proceed as usual. + CopyValues(start, out_size, out_data); + return; + } + + builder_offset_type left_offset = binary_builder_.offset(start); + + // Ensure that the data length is exactly missing width_size bytes to fit + // in the expected output (n_values * width_size). +#ifndef NDEBUG + int64_t data_length = values_size() - static_cast(left_offset); + assert(data_length + width_size == out_size); + ARROW_UNUSED(data_length); +#endif + + auto in_data = binary_builder_.value_data() + left_offset; + // The null use 0-length in the data, slice the data in 2 and skip by + // width_size in out_data. [part_1][width_size][part_2] + auto null_data_offset = binary_builder_.offset(null_index); + auto left_size = null_data_offset - left_offset; + if (left_size > 0) { + memcpy(out_data, in_data + left_offset, left_size); + } + // Zero-initialize the null entry + memset(out_data + left_size, 0, width_size); + + auto right_size = values_size() - static_cast(null_data_offset); + if (right_size > 0) { + // skip the null fixed size value. + auto out_offset = left_size + width_size; + assert(out_data + out_offset + right_size == out_data + out_size); + memcpy(out_data + out_offset, in_data + null_data_offset, right_size); + } + } + + // Visit the stored values in insertion order. + // The visitor function should have the signature `void(std::string_view)` + // or `void(const std::string_view&)`. + template + void VisitValues(int32_t start, VisitFunc&& visit) const { + for (int32_t i = start; i < size(); ++i) { + visit(binary_builder_.GetView(i)); + } + } + + protected: + struct Payload { + int32_t memo_index; + }; + + using HashTableType = HashTable; + using HashTableEntry = typename HashTable::Entry; + HashTableType hash_table_; + BinaryBuilderT binary_builder_; + + int32_t null_index_ = kKeyNotFound; + + std::pair Lookup(hash_t h, const void* data, + builder_offset_type length) const { + auto cmp_func = [&](const Payload* payload) { + std::string_view lhs = binary_builder_.GetView(payload->memo_index); + std::string_view rhs(static_cast(data), length); + return lhs == rhs; + }; + return hash_table_.Lookup(h, cmp_func); + } + + public: + Status MergeTable(const BinaryMemoTable& other_table) { + other_table.VisitValues(0, [this](std::string_view other_value) { + int32_t unused; + DCHECK_OK(this->GetOrInsert(other_value, &unused)); + }); + return Status::OK(); + } +}; + +template +struct HashTraits {}; + +template <> +struct HashTraits { + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits> { + using c_type = typename T::c_type; + using MemoTableType = SmallScalarMemoTable; +}; + +template +struct HashTraits::value && !is_8bit_int::value>> { + using c_type = typename T::c_type; + using MemoTableType = ScalarMemoTable; +}; + +template +struct HashTraits::value && + !std::is_base_of::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits> { + using MemoTableType = BinaryMemoTable; +}; + +template +struct HashTraits::value>> { + using MemoTableType = BinaryMemoTable; +}; + +template +static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table, + int64_t start_offset, int64_t* null_count, + std::shared_ptr* null_bitmap) { + int64_t dict_length = static_cast(memo_table.size()) - start_offset; + int64_t null_index = memo_table.GetNull(); + + *null_count = 0; + *null_bitmap = nullptr; + + if (null_index != kKeyNotFound && null_index >= start_offset) { + null_index -= start_offset; + *null_count = 1; + ARROW_ASSIGN_OR_RAISE(*null_bitmap, + internal::BitmapAllButOne(pool, dict_length, null_index)); + } + + return Status::OK(); +} + +struct StringViewHash { + // std::hash compatible hasher for use with std::unordered_* + // (the std::hash specialization provided by nonstd constructs std::string + // temporaries then invokes std::hash against those) + hash_t operator()(std::string_view value) const { + return ComputeStringHash<0>(value.data(), static_cast(value.size())); + } +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe78be2470ddb846b5816be632e9921c041a23e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +// "safe-math.h" includes from the Windows headers. +#include "arrow/util/windows_compatibility.h" +#include "arrow/vendored/portable-snippets/safe-math.h" +// clang-format off (avoid include reordering) +#include "arrow/util/windows_fixup.h" +// clang-format on + +namespace arrow { +namespace internal { + +// Define functions AddWithOverflow, SubtractWithOverflow, MultiplyWithOverflow +// with the signature `bool(T u, T v, T* out)` where T is an integer type. +// On overflow, these functions return true. Otherwise, false is returned +// and `out` is updated with the result of the operation. + +#define OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type v, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u, v); \ + } + +#define OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint8_t, uint8) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint16_t, uint16) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint32_t, uint32) \ + OP_WITH_OVERFLOW(_func_name, _psnip_op, uint64_t, uint64) + +OPS_WITH_OVERFLOW(AddWithOverflow, add) +OPS_WITH_OVERFLOW(SubtractWithOverflow, sub) +OPS_WITH_OVERFLOW(MultiplyWithOverflow, mul) +OPS_WITH_OVERFLOW(DivideWithOverflow, div) + +#undef OP_WITH_OVERFLOW +#undef OPS_WITH_OVERFLOW + +// Define function NegateWithOverflow with the signature `bool(T u, T* out)` +// where T is a signed integer type. On overflow, these functions return true. +// Otherwise, false is returned and `out` is updated with the result of the +// operation. + +#define UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \ + [[nodiscard]] static inline bool _func_name(_type u, _type* out) { \ + return !psnip_safe_##_psnip_type##_##_psnip_op(out, u); \ + } + +#define SIGNED_UNARY_OPS_WITH_OVERFLOW(_func_name, _psnip_op) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \ + UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) + +SIGNED_UNARY_OPS_WITH_OVERFLOW(NegateWithOverflow, neg) + +#undef UNARY_OP_WITH_OVERFLOW +#undef SIGNED_UNARY_OPS_WITH_OVERFLOW + +/// Signed addition with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedAdd(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) + + static_cast(v)); +} + +/// Signed subtraction with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedSubtract(SignedInt u, SignedInt v) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) - + static_cast(v)); +} + +/// Signed negation with well-defined behaviour on overflow (as unsigned) +template +SignedInt SafeSignedNegate(SignedInt u) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(~static_cast(u) + 1); +} + +/// Signed left shift with well-defined behaviour on negative numbers or overflow +template +SignedInt SafeLeftShift(SignedInt u, Shift shift) { + using UnsignedInt = typename std::make_unsigned::type; + return static_cast(static_cast(u) << shift); +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h new file mode 100644 index 0000000000000000000000000000000000000000..5f5bbd169e2eb60e97958d7375f63c15ae5d9fe4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h @@ -0,0 +1,452 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifndef _WIN32 +#define ARROW_HAVE_SIGACTION 1 +#endif + +#include +#include +#include +#include +#include +#include + +#if ARROW_HAVE_SIGACTION +#include // Needed for struct sigaction +#endif + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/windows_fixup.h" + +namespace arrow::internal { + +// NOTE: 8-bit path strings on Windows are encoded using UTF-8. +// Using MBCS would fail encoding some paths. + +#if defined(_WIN32) +using NativePathString = std::wstring; +#else +using NativePathString = std::string; +#endif + +class ARROW_EXPORT PlatformFilename { + public: + struct Impl; + + ~PlatformFilename(); + PlatformFilename(); + PlatformFilename(const PlatformFilename&); + PlatformFilename(PlatformFilename&&); + PlatformFilename& operator=(const PlatformFilename&); + PlatformFilename& operator=(PlatformFilename&&); + explicit PlatformFilename(NativePathString path); + explicit PlatformFilename(const NativePathString::value_type* path); + + const NativePathString& ToNative() const; + std::string ToString() const; + + PlatformFilename Parent() const; + Result Real() const; + + // These functions can fail for character encoding reasons. + static Result FromString(std::string_view file_name); + Result Join(std::string_view child_name) const; + + PlatformFilename Join(const PlatformFilename& child_name) const; + + bool operator==(const PlatformFilename& other) const; + bool operator!=(const PlatformFilename& other) const; + + // Made public to avoid the proliferation of friend declarations. + const Impl* impl() const { return impl_.get(); } + + private: + std::unique_ptr impl_; + + explicit PlatformFilename(Impl impl); +}; + +/// Create a directory if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDir(const PlatformFilename& dir_path); + +/// Create a directory and its parents if it doesn't exist. +/// +/// Return whether the directory was created. +ARROW_EXPORT +Result CreateDirTree(const PlatformFilename& dir_path); + +/// Delete a directory's contents (but not the directory itself) if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirContents(const PlatformFilename& dir_path, + bool allow_not_found = true); + +/// Delete a directory tree if it exists. +/// +/// Return whether the directory existed. +ARROW_EXPORT +Result DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true); + +// Non-recursively list the contents of the given directory. +// The returned names are the children's base names, not including dir_path. +ARROW_EXPORT +Result> ListDir(const PlatformFilename& dir_path); + +/// Delete a file if it exists. +/// +/// Return whether the file existed. +ARROW_EXPORT +Result DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true); + +/// Return whether a file exists. +ARROW_EXPORT +Result FileExists(const PlatformFilename& path); + +// TODO expose this more publicly to make it available from io/file.h? +/// A RAII wrapper for a file descriptor. +/// +/// The underlying file descriptor is automatically closed on destruction. +/// Moving is supported with well-defined semantics. +/// Furthermore, closing is idempotent. +class ARROW_EXPORT FileDescriptor { + public: + FileDescriptor() = default; + explicit FileDescriptor(int fd) : fd_(fd) {} + FileDescriptor(FileDescriptor&&); + FileDescriptor& operator=(FileDescriptor&&); + + ~FileDescriptor(); + + Status Close(); + + /// May return -1 if closed or default-initialized + int fd() const { return fd_.load(); } + + /// Detach and return the underlying file descriptor + int Detach(); + + bool closed() const { return fd_.load() == -1; } + + protected: + static void CloseFromDestructor(int fd); + + std::atomic fd_{-1}; +}; + +/// Open a file for reading and return a file descriptor. +ARROW_EXPORT +Result FileOpenReadable(const PlatformFilename& file_name); + +/// Open a file for writing and return a file descriptor. +ARROW_EXPORT +Result FileOpenWritable(const PlatformFilename& file_name, + bool write_only = true, bool truncate = true, + bool append = false); + +/// Read from current file position. Return number of bytes read. +ARROW_EXPORT +Result FileRead(int fd, uint8_t* buffer, int64_t nbytes); +/// Read from given file position. Return number of bytes read. +ARROW_EXPORT +Result FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes); + +ARROW_EXPORT +Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes); +ARROW_EXPORT +Status FileTruncate(int fd, const int64_t size); + +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos); +ARROW_EXPORT +Status FileSeek(int fd, int64_t pos, int whence); +ARROW_EXPORT +Result FileTell(int fd); +ARROW_EXPORT +Result FileGetSize(int fd); + +ARROW_EXPORT +Status FileClose(int fd); + +struct Pipe { + FileDescriptor rfd; + FileDescriptor wfd; + + Status Close() { return rfd.Close() & wfd.Close(); } +}; + +ARROW_EXPORT +Result CreatePipe(); + +ARROW_EXPORT +Status SetPipeFileDescriptorNonBlocking(int fd); + +class ARROW_EXPORT SelfPipe { + public: + static Result> Make(bool signal_safe); + virtual ~SelfPipe(); + + /// \brief Wait for a wakeup. + /// + /// Status::Invalid is returned if the pipe has been shutdown. + /// Otherwise the next sent payload is returned. + virtual Result Wait() = 0; + + /// \brief Wake up the pipe by sending a payload. + /// + /// This method is async-signal-safe if `signal_safe` was set to true. + virtual void Send(uint64_t payload) = 0; + + /// \brief Wake up the pipe and shut it down. + virtual Status Shutdown() = 0; +}; + +ARROW_EXPORT +int64_t GetPageSize(); + +struct MemoryRegion { + void* addr; + size_t size; +}; + +ARROW_EXPORT +Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes, + void** new_addr); +ARROW_EXPORT +Status MemoryAdviseWillNeed(const std::vector& regions); + +ARROW_EXPORT +Result GetEnvVar(const char* name); +ARROW_EXPORT +Result GetEnvVar(const std::string& name); +ARROW_EXPORT +Result GetEnvVarNative(const char* name); +ARROW_EXPORT +Result GetEnvVarNative(const std::string& name); + +ARROW_EXPORT +Status SetEnvVar(const char* name, const char* value); +ARROW_EXPORT +Status SetEnvVar(const std::string& name, const std::string& value); +ARROW_EXPORT +Status DelEnvVar(const char* name); +ARROW_EXPORT +Status DelEnvVar(const std::string& name); + +ARROW_EXPORT +std::string ErrnoMessage(int errnum); +#if _WIN32 +ARROW_EXPORT +std::string WinErrorMessage(int errnum); +#endif + +ARROW_EXPORT +std::shared_ptr StatusDetailFromErrno(int errnum); +ARROW_EXPORT +std::optional ErrnoFromStatusDetail(const StatusDetail& detail); +#if _WIN32 +ARROW_EXPORT +std::shared_ptr StatusDetailFromWinError(int errnum); +#endif +ARROW_EXPORT +std::shared_ptr StatusDetailFromSignal(int signum); + +template +Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromErrno(int errnum, Args&&... args) { + return StatusFromErrno(errnum, StatusCode::IOError, std::forward(args)...); +} + +#if _WIN32 +template +Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum), + std::forward(args)...); +} + +template +Status IOErrorFromWinError(int errnum, Args&&... args) { + return StatusFromWinError(errnum, StatusCode::IOError, std::forward(args)...); +} +#endif + +template +Status StatusFromSignal(int signum, StatusCode code, Args&&... args) { + return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum), + std::forward(args)...); +} + +template +Status CancelledFromSignal(int signum, Args&&... args) { + return StatusFromSignal(signum, StatusCode::Cancelled, std::forward(args)...); +} + +ARROW_EXPORT +int ErrnoFromStatus(const Status&); + +// Always returns 0 on non-Windows platforms (for Python). +ARROW_EXPORT +int WinErrorFromStatus(const Status&); + +ARROW_EXPORT +int SignalFromStatus(const Status&); + +class ARROW_EXPORT TemporaryDir { + public: + ~TemporaryDir(); + + /// '/'-terminated path to the temporary dir + const PlatformFilename& path() { return path_; } + + /// Create a temporary subdirectory in the system temporary dir, + /// named starting with `prefix`. + static Result> Make(const std::string& prefix); + + private: + PlatformFilename path_; + + explicit TemporaryDir(PlatformFilename&&); +}; + +class ARROW_EXPORT SignalHandler { + public: + using Callback = void (*)(int); + + SignalHandler(); + explicit SignalHandler(Callback cb); +#if ARROW_HAVE_SIGACTION + explicit SignalHandler(const struct sigaction& sa); +#endif + + Callback callback() const; +#if ARROW_HAVE_SIGACTION + const struct sigaction& action() const; +#endif + + protected: +#if ARROW_HAVE_SIGACTION + // Storing the full sigaction allows to restore the entire signal handling + // configuration. + struct sigaction sa_; +#else + Callback cb_; +#endif +}; + +/// \brief Return the current handler for the given signal number. +ARROW_EXPORT +Result GetSignalHandler(int signum); + +/// \brief Set a new handler for the given signal number. +/// +/// The old signal handler is returned. +ARROW_EXPORT +Result SetSignalHandler(int signum, const SignalHandler& handler); + +/// \brief Reinstate the signal handler +/// +/// For use in signal handlers. This is needed on platforms without sigaction() +/// such as Windows, as the default signal handler is restored there as +/// soon as a signal is raised. +ARROW_EXPORT +void ReinstateSignalHandler(int signum, SignalHandler::Callback handler); + +/// \brief Send a signal to the current process +/// +/// The thread which will receive the signal is unspecified. +ARROW_EXPORT +Status SendSignal(int signum); + +/// \brief Send a signal to the given thread +/// +/// This function isn't supported on Windows. +ARROW_EXPORT +Status SendSignalToThread(int signum, uint64_t thread_id); + +/// \brief Get an unpredictable random seed +/// +/// This function may be slightly costly, so should only be used to initialize +/// a PRNG, not to generate a large amount of random numbers. +/// It is better to use this function rather than std::random_device, unless +/// absolutely necessary (e.g. to generate a cryptographic secret). +ARROW_EXPORT +int64_t GetRandomSeed(); + +/// \brief Get the current thread id +/// +/// In addition to having the same properties as std::thread, the returned value +/// is a regular integer value, which is more convenient than an opaque type. +ARROW_EXPORT +uint64_t GetThreadId(); + +/// \brief Get the current memory used by the current process in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetCurrentRSS(); + +/// \brief Get the total memory available to the system in bytes +/// +/// This function supports Windows, Linux, and Mac and will return 0 otherwise +ARROW_EXPORT +int64_t GetTotalMemoryBytes(); + +/// \brief Load a dynamic library +/// +/// This wraps dlopen() except on Windows, where LoadLibrary() is called. +/// These two platforms handle absolute paths consistently; relative paths +/// or the library's bare name may be handled but inconsistently. +/// +/// \return An opaque handle for the dynamic library, which can be used for +/// subsequent symbol lookup. Nullptr will never be returned; instead +/// an error will be raised. +ARROW_EXPORT Result LoadDynamicLibrary(const PlatformFilename& path); + +/// \brief Load a dynamic library +/// +/// An overload taking null terminated string. +ARROW_EXPORT Result LoadDynamicLibrary(const char* path); + +/// \brief Retrieve a symbol by name from a library handle. +/// +/// This wraps dlsym() except on Windows, where GetProcAddress() is called. +/// +/// \return The address associated with the named symbol. Nullptr will never be +/// returned; instead an error will be raised. +ARROW_EXPORT Result GetSymbol(void* handle, const char* name); + +template +Result GetSymbolAs(void* handle, const char* name) { + ARROW_ASSIGN_OR_RAISE(void* sym, GetSymbol(handle, name)); + return reinterpret_cast(sym); +} + +} // namespace arrow::internal diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5e716d0fd113d339a34b16e6f7353a169829e3e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h @@ -0,0 +1,568 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/compare.h" +#include "arrow/util/functional.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +template +class Iterator; + +template +struct IterationTraits { + /// \brief a reserved value which indicates the end of iteration. By + /// default this is NULLPTR since most iterators yield pointer types. + /// Specialize IterationTraits if different end semantics are required. + /// + /// Note: This should not be used to determine if a given value is a + /// terminal value. Use IsIterationEnd (which uses IsEnd) instead. This + /// is only for returning terminal values. + static T End() { return T(NULLPTR); } + + /// \brief Checks to see if the value is a terminal value. + /// A method is used here since T is not necessarily comparable in many + /// cases even though it has a distinct final value + static bool IsEnd(const T& val) { return val == End(); } +}; + +template +T IterationEnd() { + return IterationTraits::End(); +} + +template +bool IsIterationEnd(const T& val) { + return IterationTraits::IsEnd(val); +} + +template +struct IterationTraits> { + /// \brief by default when iterating through a sequence of optional, + /// nullopt indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static std::optional End() { return std::nullopt; } + + /// \brief by default when iterating through a sequence of optional, + /// nullopt (!has_value()) indicates the end of iteration. + /// Specialize IterationTraits if different end semantics are required. + static bool IsEnd(const std::optional& val) { return !val.has_value(); } + + // TODO(bkietz) The range-for loop over Iterator> yields + // Result> which is unnecessary (since only the unyielded end optional + // is nullopt. Add IterationTraits::GetRangeElement() to handle this case +}; + +/// \brief A generic Iterator that can return errors +template +class Iterator : public util::EqualityComparable> { + public: + /// \brief Iterator may be constructed from any type which has a member function + /// with signature Result Next(); + /// End of iterator is signalled by returning IteratorTraits::End(); + /// + /// The argument is moved or copied to the heap and kept in a unique_ptr. Only + /// its destructor and its Next method (which are stored in function pointers) are + /// referenced after construction. + /// + /// This approach is used to dodge MSVC linkage hell (ARROW-6244, ARROW-6558) when using + /// an abstract template base class: instead of being inlined as usual for a template + /// function the base's virtual destructor will be exported, leading to multiple + /// definition errors when linking to any other TU where the base is instantiated. + template + explicit Iterator(Wrapped has_next) + : ptr_(new Wrapped(std::move(has_next)), Delete), next_(Next) {} + + Iterator() : ptr_(NULLPTR, [](void*) {}) {} + + /// \brief Return the next element of the sequence, IterationTraits::End() when the + /// iteration is completed. Calling this on a default constructed Iterator + /// will result in undefined behavior. + Result Next() { return next_(ptr_.get()); } + + /// Pass each element of the sequence to a visitor. Will return any error status + /// returned by the visitor, terminating iteration. + template + Status Visit(Visitor&& visitor) { + for (;;) { + ARROW_ASSIGN_OR_RAISE(auto value, Next()); + + if (IsIterationEnd(value)) break; + + ARROW_RETURN_NOT_OK(visitor(std::move(value))); + } + + return Status::OK(); + } + + /// Iterators will only compare equal if they are both null. + /// Equality comparability is required to make an Iterator of Iterators + /// (to check for the end condition). + bool Equals(const Iterator& other) const { return ptr_ == other.ptr_; } + + explicit operator bool() const { return ptr_ != NULLPTR; } + + class RangeIterator { + public: + RangeIterator() : value_(IterationTraits::End()) {} + + explicit RangeIterator(Iterator i) + : value_(IterationTraits::End()), + iterator_(std::make_shared(std::move(i))) { + Next(); + } + + bool operator!=(const RangeIterator& other) const { return value_ != other.value_; } + + RangeIterator& operator++() { + Next(); + return *this; + } + + Result operator*() { + ARROW_RETURN_NOT_OK(value_.status()); + + auto value = std::move(value_); + value_ = IterationTraits::End(); + return value; + } + + private: + void Next() { + if (!value_.ok()) { + value_ = IterationTraits::End(); + return; + } + value_ = iterator_->Next(); + } + + Result value_; + std::shared_ptr iterator_; + }; + + RangeIterator begin() { return RangeIterator(std::move(*this)); } + + RangeIterator end() { return RangeIterator(); } + + /// \brief Move every element of this iterator into a vector. + Result> ToVector() { + std::vector out; + for (auto maybe_element : *this) { + ARROW_ASSIGN_OR_RAISE(auto element, maybe_element); + out.push_back(std::move(element)); + } + // ARROW-8193: On gcc-4.8 without the explicit move it tries to use the + // copy constructor, which may be deleted on the elements of type T + return std::move(out); + } + + private: + /// Implementation of deleter for ptr_: Casts from void* to the wrapped type and + /// deletes that. + template + static void Delete(void* ptr) { + delete static_cast(ptr); + } + + /// Implementation of Next: Casts from void* to the wrapped type and invokes that + /// type's Next member function. + template + static Result Next(void* ptr) { + return static_cast(ptr)->Next(); + } + + /// ptr_ is a unique_ptr to void with a custom deleter: a function pointer which first + /// casts from void* to a pointer to the wrapped type then deletes that. + std::unique_ptr ptr_; + + /// next_ is a function pointer which first casts from void* to a pointer to the wrapped + /// type then invokes its Next member function. + Result (*next_)(void*) = NULLPTR; +}; + +template +struct TransformFlow { + using YieldValueType = T; + + TransformFlow(YieldValueType value, bool ready_for_next) + : finished_(false), + ready_for_next_(ready_for_next), + yield_value_(std::move(value)) {} + TransformFlow(bool finished, bool ready_for_next) + : finished_(finished), ready_for_next_(ready_for_next), yield_value_() {} + + bool HasValue() const { return yield_value_.has_value(); } + bool Finished() const { return finished_; } + bool ReadyForNext() const { return ready_for_next_; } + T Value() const { return *yield_value_; } + + bool finished_ = false; + bool ready_for_next_ = false; + std::optional yield_value_; +}; + +struct TransformFinish { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(true, true); + } +}; + +struct TransformSkip { + template + operator TransformFlow() && { // NOLINT explicit + return TransformFlow(false, true); + } +}; + +template +TransformFlow TransformYield(T value = {}, bool ready_for_next = true) { + return TransformFlow(std::move(value), ready_for_next); +} + +template +using Transformer = std::function>(T)>; + +template +class TransformIterator { + public: + explicit TransformIterator(Iterator it, Transformer transformer) + : it_(std::move(it)), + transformer_(std::move(transformer)), + last_value_(), + finished_() {} + + Result Next() { + while (!finished_) { + ARROW_ASSIGN_OR_RAISE(std::optional next, Pump()); + if (next.has_value()) { + return std::move(*next); + } + ARROW_ASSIGN_OR_RAISE(last_value_, it_.Next()); + } + return IterationTraits::End(); + } + + private: + // Calls the transform function on the current value. Can return in several ways + // * If the next value is requested (e.g. skip) it will return an empty optional + // * If an invalid status is encountered that will be returned + // * If finished it will return IterationTraits::End() + // * If a value is returned by the transformer that will be returned + Result> Pump() { + if (!finished_ && last_value_.has_value()) { + auto next_res = transformer_(*last_value_); + if (!next_res.ok()) { + finished_ = true; + return next_res.status(); + } + auto next = *next_res; + if (next.ReadyForNext()) { + if (IsIterationEnd(*last_value_)) { + finished_ = true; + } + last_value_.reset(); + } + if (next.Finished()) { + finished_ = true; + } + if (next.HasValue()) { + return next.Value(); + } + } + if (finished_) { + return IterationTraits::End(); + } + return std::nullopt; + } + + Iterator it_; + Transformer transformer_; + std::optional last_value_; + bool finished_ = false; +}; + +/// \brief Transforms an iterator according to a transformer, returning a new Iterator. +/// +/// The transformer will be called on each element of the source iterator and for each +/// call it can yield a value, skip, or finish the iteration. When yielding a value the +/// transformer can choose to consume the source item (the default, ready_for_next = true) +/// or to keep it and it will be called again on the same value. +/// +/// This is essentially a more generic form of the map operation that can return 0, 1, or +/// many values for each of the source items. +/// +/// The transformer will be exposed to the end of the source sequence +/// (IterationTraits::End) in case it needs to return some penultimate item(s). +/// +/// Any invalid status returned by the transformer will be returned immediately. +template +Iterator MakeTransformedIterator(Iterator it, Transformer op) { + return Iterator(TransformIterator(std::move(it), std::move(op))); +} + +template +struct IterationTraits> { + // The end condition for an Iterator of Iterators is a default constructed (null) + // Iterator. + static Iterator End() { return Iterator(); } + static bool IsEnd(const Iterator& val) { return !val; } +}; + +template +class FunctionIterator { + public: + explicit FunctionIterator(Fn fn) : fn_(std::move(fn)) {} + + Result Next() { return fn_(); } + + private: + Fn fn_; +}; + +/// \brief Construct an Iterator which invokes a callable on Next() +template ::ValueType> +Iterator MakeFunctionIterator(Fn fn) { + return Iterator(FunctionIterator(std::move(fn))); +} + +template +Iterator MakeEmptyIterator() { + return MakeFunctionIterator([]() -> Result { return IterationTraits::End(); }); +} + +template +Iterator MakeErrorIterator(Status s) { + return MakeFunctionIterator([s]() -> Result { + ARROW_RETURN_NOT_OK(s); + return IterationTraits::End(); + }); +} + +/// \brief Simple iterator which yields the elements of a std::vector +template +class VectorIterator { + public: + explicit VectorIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return IterationTraits::End(); + } + return std::move(elements_[i_++]); + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorIterator(std::vector v) { + return Iterator(VectorIterator(std::move(v))); +} + +/// \brief Simple iterator which yields *pointers* to the elements of a std::vector. +/// This is provided to support T where IterationTraits::End is not specialized +template +class VectorPointingIterator { + public: + explicit VectorPointingIterator(std::vector v) : elements_(std::move(v)) {} + + Result Next() { + if (i_ == elements_.size()) { + return NULLPTR; + } + return &elements_[i_++]; + } + + private: + std::vector elements_; + size_t i_ = 0; +}; + +template +Iterator MakeVectorPointingIterator(std::vector v) { + return Iterator(VectorPointingIterator(std::move(v))); +} + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template +class MapIterator { + public: + explicit MapIterator(Fn map, Iterator it) + : map_(std::move(map)), it_(std::move(it)) {} + + Result Next() { + ARROW_ASSIGN_OR_RAISE(I i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + return map_(std::move(i)); + } + + private: + Fn map_; + Iterator it_; +}; + +/// \brief MapIterator takes ownership of an iterator and a function to apply +/// on every element. The mapped function is not allowed to fail. +template , + typename To = internal::call_traits::return_type> +Iterator MakeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +/// \brief Like MapIterator, but where the function can fail. +template , + typename To = typename internal::call_traits::return_type::ValueType> +Iterator MakeMaybeMapIterator(Fn map, Iterator it) { + return Iterator(MapIterator(std::move(map), std::move(it))); +} + +struct FilterIterator { + enum Action { ACCEPT, REJECT }; + + template + static Result> Reject() { + return std::make_pair(IterationTraits::End(), REJECT); + } + + template + static Result> Accept(To out) { + return std::make_pair(std::move(out), ACCEPT); + } + + template + static Result> MaybeAccept(Result maybe_out) { + return std::move(maybe_out).Map(Accept); + } + + template + static Result> Error(Status s) { + return s; + } + + template + class Impl { + public: + explicit Impl(Fn filter, Iterator it) : filter_(filter), it_(std::move(it)) {} + + Result Next() { + To out = IterationTraits::End(); + Action action; + + for (;;) { + ARROW_ASSIGN_OR_RAISE(From i, it_.Next()); + + if (IsIterationEnd(i)) { + return IterationTraits::End(); + } + + ARROW_ASSIGN_OR_RAISE(std::tie(out, action), filter_(std::move(i))); + + if (action == ACCEPT) return out; + } + } + + private: + Fn filter_; + Iterator it_; + }; +}; + +/// \brief Like MapIterator, but where the function can fail or reject elements. +template < + typename Fn, typename From = typename internal::call_traits::argument_type<0, Fn>, + typename Ret = typename internal::call_traits::return_type::ValueType, + typename To = typename std::tuple_element<0, Ret>::type, + typename Enable = typename std::enable_if::type, FilterIterator::Action>::value>::type> +Iterator MakeFilterIterator(Fn filter, Iterator it) { + return Iterator( + FilterIterator::Impl(std::move(filter), std::move(it))); +} + +/// \brief FlattenIterator takes an iterator generating iterators and yields a +/// unified iterator that flattens/concatenates in a single stream. +template +class FlattenIterator { + public: + explicit FlattenIterator(Iterator> it) : parent_(std::move(it)) {} + + Result Next() { + if (IsIterationEnd(child_)) { + // Pop from parent's iterator. + ARROW_ASSIGN_OR_RAISE(child_, parent_.Next()); + + // Check if final iteration reached. + if (IsIterationEnd(child_)) { + return IterationTraits::End(); + } + + return Next(); + } + + // Pop from child_ and check for depletion. + ARROW_ASSIGN_OR_RAISE(T out, child_.Next()); + if (IsIterationEnd(out)) { + // Reset state such that we pop from parent on the recursive call + child_ = IterationTraits>::End(); + + return Next(); + } + + return out; + } + + private: + Iterator> parent_; + Iterator child_ = IterationTraits>::End(); +}; + +template +Iterator MakeFlattenIterator(Iterator> it) { + return Iterator(FlattenIterator(std::move(it))); +} + +template +Iterator MakeIteratorFromReader( + const std::shared_ptr& reader) { + return MakeFunctionIterator([reader] { return reader->Next(); }); +} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h new file mode 100644 index 0000000000000000000000000000000000000000..9e4533c4b4760a416b0aca4b91c32ffd324d7f08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace internal { + +#if __cpp_lib_launder +using std::launder; +#else +template +constexpr T* launder(T* p) noexcept { + return p; +} +#endif + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..2baa560563bb4e8ac798a9ce5e8f4d392b40ec5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h @@ -0,0 +1,259 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#ifdef GANDIVA_IR + +// The LLVM IR code doesn't have an NDEBUG mode. And, it shouldn't include references to +// streams or stdc++. So, making the DCHECK calls void in that case. + +#define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +#define DCHECK(condition) ARROW_IGNORE_EXPR(condition) +#define DCHECK_OK(status) ARROW_IGNORE_EXPR(status) +#define DCHECK_EQ(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_NE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_LE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_LT(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_GE(val1, val2) ARROW_IGNORE_EXPR(val1) +#define DCHECK_GT(val1, val2) ARROW_IGNORE_EXPR(val1) + +#else // !GANDIVA_IR + +#include +#include +#include + +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +enum class ArrowLogLevel : int { + ARROW_DEBUG = -1, + ARROW_INFO = 0, + ARROW_WARNING = 1, + ARROW_ERROR = 2, + ARROW_FATAL = 3 +}; + +#define ARROW_LOG_INTERNAL(level) ::arrow::util::ArrowLog(__FILE__, __LINE__, level) +#define ARROW_LOG(level) ARROW_LOG_INTERNAL(::arrow::util::ArrowLogLevel::ARROW_##level) + +#define ARROW_IGNORE_EXPR(expr) ((void)(expr)) + +#define ARROW_CHECK_OR_LOG(condition, level) \ + ARROW_PREDICT_TRUE(condition) \ + ? ARROW_IGNORE_EXPR(0) \ + : ::arrow::util::Voidify() & ARROW_LOG(level) << " Check failed: " #condition " " + +#define ARROW_CHECK(condition) ARROW_CHECK_OR_LOG(condition, FATAL) + +// If 'to_call' returns a bad status, CHECK immediately with a logged message +// of 'msg' followed by the status. +#define ARROW_CHECK_OK_PREPEND(to_call, msg, level) \ + do { \ + ::arrow::Status _s = (to_call); \ + ARROW_CHECK_OR_LOG(_s.ok(), level) \ + << "Operation failed: " << ARROW_STRINGIFY(to_call) << "\n" \ + << (msg) << ": " << _s.ToString(); \ + } while (false) + +// If the status is bad, CHECK immediately, appending the status to the +// logged message. +#define ARROW_CHECK_OK(s) ARROW_CHECK_OK_PREPEND(s, "Bad status", FATAL) + +#define ARROW_CHECK_EQ(val1, val2) ARROW_CHECK((val1) == (val2)) +#define ARROW_CHECK_NE(val1, val2) ARROW_CHECK((val1) != (val2)) +#define ARROW_CHECK_LE(val1, val2) ARROW_CHECK((val1) <= (val2)) +#define ARROW_CHECK_LT(val1, val2) ARROW_CHECK((val1) < (val2)) +#define ARROW_CHECK_GE(val1, val2) ARROW_CHECK((val1) >= (val2)) +#define ARROW_CHECK_GT(val1, val2) ARROW_CHECK((val1) > (val2)) + +#ifdef NDEBUG +#define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_WARNING + +// CAUTION: DCHECK_OK() always evaluates its argument, but other DCHECK*() macros +// only do so in debug mode. + +#define ARROW_DCHECK(condition) \ + while (false) ARROW_IGNORE_EXPR(condition); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_OK(s) \ + ARROW_IGNORE_EXPR(s); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_EQ(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_NE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_LE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_LT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_GE(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() +#define ARROW_DCHECK_GT(val1, val2) \ + while (false) ARROW_IGNORE_EXPR(val1); \ + while (false) ARROW_IGNORE_EXPR(val2); \ + while (false) ::arrow::util::detail::NullLog() + +#else +#define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_FATAL + +#define ARROW_DCHECK ARROW_CHECK +#define ARROW_DCHECK_OK ARROW_CHECK_OK +#define ARROW_DCHECK_EQ ARROW_CHECK_EQ +#define ARROW_DCHECK_NE ARROW_CHECK_NE +#define ARROW_DCHECK_LE ARROW_CHECK_LE +#define ARROW_DCHECK_LT ARROW_CHECK_LT +#define ARROW_DCHECK_GE ARROW_CHECK_GE +#define ARROW_DCHECK_GT ARROW_CHECK_GT + +#endif // NDEBUG + +#define DCHECK ARROW_DCHECK +#define DCHECK_OK ARROW_DCHECK_OK +#define DCHECK_EQ ARROW_DCHECK_EQ +#define DCHECK_NE ARROW_DCHECK_NE +#define DCHECK_LE ARROW_DCHECK_LE +#define DCHECK_LT ARROW_DCHECK_LT +#define DCHECK_GE ARROW_DCHECK_GE +#define DCHECK_GT ARROW_DCHECK_GT + +// This code is adapted from +// https://github.com/ray-project/ray/blob/master/src/ray/util/logging.h. + +// To make the logging lib pluggable with other logging libs and make +// the implementation unawared by the user, ArrowLog is only a declaration +// which hide the implementation into logging.cc file. +// In logging.cc, we can choose different log libs using different macros. + +// This is also a null log which does not output anything. +class ARROW_EXPORT ArrowLogBase { + public: + virtual ~ArrowLogBase() {} + + virtual bool IsEnabled() const { return false; } + + template + ArrowLogBase& operator<<(const T& t) { + if (IsEnabled()) { + Stream() << t; + } + return *this; + } + + protected: + virtual std::ostream& Stream() = 0; +}; + +class ARROW_EXPORT ArrowLog : public ArrowLogBase { + public: + ArrowLog(const char* file_name, int line_number, ArrowLogLevel severity); + ~ArrowLog() override; + + /// Return whether or not current logging instance is enabled. + /// + /// \return True if logging is enabled and false otherwise. + bool IsEnabled() const override; + + /// The init function of arrow log for a program which should be called only once. + /// + /// \param appName The app name which starts the log. + /// \param severity_threshold Logging threshold for the program. + /// \param logDir Logging output file name. If empty, the log won't output to file. + static void StartArrowLog(const std::string& appName, + ArrowLogLevel severity_threshold = ArrowLogLevel::ARROW_INFO, + const std::string& logDir = ""); + + /// The shutdown function of arrow log, it should be used with StartArrowLog as a pair. + static void ShutDownArrowLog(); + + /// Install the failure signal handler to output call stack when crash. + /// If glog is not installed, this function won't do anything. + static void InstallFailureSignalHandler(); + + /// Uninstall the signal actions installed by InstallFailureSignalHandler. + static void UninstallSignalAction(); + + /// Return whether or not the log level is enabled in current setting. + /// + /// \param log_level The input log level to test. + /// \return True if input log level is not lower than the threshold. + static bool IsLevelEnabled(ArrowLogLevel log_level); + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(ArrowLog); + + // Hide the implementation of log provider by void *. + // Otherwise, lib user may define the same macro to use the correct header file. + void* logging_provider_; + /// True if log messages should be logged and false if they should be ignored. + bool is_enabled_; + + static ArrowLogLevel severity_threshold_; + + protected: + std::ostream& Stream() override; +}; + +// This class make ARROW_CHECK compilation pass to change the << operator to void. +// This class is copied from glog. +class ARROW_EXPORT Voidify { + public: + Voidify() {} + // This has to be an operator with a precedence lower than << but + // higher than ?: + void operator&(ArrowLogBase&) {} +}; + +namespace detail { + +/// @brief A helper for the nil log sink. +/// +/// Using this helper is analogous to sending log messages to /dev/null: +/// nothing gets logged. +class NullLog { + public: + /// The no-op output operator. + /// + /// @param [in] t + /// The object to send into the nil sink. + /// @return Reference to the updated object. + template + NullLog& operator<<(const T& t) { + return *this; + } +}; + +} // namespace detail +} // namespace util +} // namespace arrow + +#endif // GANDIVA_IR diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..1d23e829d74a93f603b60f00dc56319de66149ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/macros.h @@ -0,0 +1,195 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#define ARROW_EXPAND(x) x +#define ARROW_STRINGIFY(x) #x +#define ARROW_CONCAT(x, y) x##y + +// From Google gutil +#ifndef ARROW_DISALLOW_COPY_AND_ASSIGN +#define ARROW_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete +#endif + +#ifndef ARROW_DEFAULT_MOVE_AND_ASSIGN +#define ARROW_DEFAULT_MOVE_AND_ASSIGN(TypeName) \ + TypeName(TypeName&&) = default; \ + TypeName& operator=(TypeName&&) = default +#endif + +#define ARROW_UNUSED(x) (void)(x) +#define ARROW_ARG_UNUSED(x) +// +// GCC can be told that a certain branch is not likely to be taken (for +// instance, a CHECK failure), and use that information in static analysis. +// Giving it this information can help it optimize for the common case in +// the absence of better information (ie. -fprofile-arcs). +// +#if defined(__GNUC__) +#define ARROW_PREDICT_FALSE(x) (__builtin_expect(!!(x), 0)) +#define ARROW_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) +#define ARROW_NORETURN __attribute__((noreturn)) +#define ARROW_NOINLINE __attribute__((noinline)) +#define ARROW_FORCE_INLINE __attribute__((always_inline)) +#define ARROW_PREFETCH(addr) __builtin_prefetch(addr) +#elif defined(_MSC_VER) +#define ARROW_NORETURN __declspec(noreturn) +#define ARROW_NOINLINE __declspec(noinline) +#define ARROW_FORCE_INLINE __declspec(forceinline) +#define ARROW_PREDICT_FALSE(x) (x) +#define ARROW_PREDICT_TRUE(x) (x) +#define ARROW_PREFETCH(addr) +#else +#define ARROW_NORETURN +#define ARROW_NOINLINE +#define ARROW_FORCE_INLINE +#define ARROW_PREDICT_FALSE(x) (x) +#define ARROW_PREDICT_TRUE(x) (x) +#define ARROW_PREFETCH(addr) +#endif + +#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) +#define ARROW_RESTRICT __restrict +#else +#define ARROW_RESTRICT +#endif + +// ---------------------------------------------------------------------- +// C++/CLI support macros (see ARROW-1134) + +#ifndef NULLPTR + +#ifdef __cplusplus_cli +#define NULLPTR __nullptr +#else +#define NULLPTR nullptr +#endif + +#endif // ifndef NULLPTR + +// ---------------------------------------------------------------------- + +// clang-format off +// [[deprecated]] is only available in C++14, use this for the time being +// This macro takes an optional deprecation message +#ifdef __COVERITY__ +# define ARROW_DEPRECATED(...) +#else +# define ARROW_DEPRECATED(...) [[deprecated(__VA_ARGS__)]] +#endif + +#ifdef __COVERITY__ +# define ARROW_DEPRECATED_ENUM_VALUE(...) +#else +# define ARROW_DEPRECATED_ENUM_VALUE(...) [[deprecated(__VA_ARGS__)]] +#endif + +// clang-format on + +// Macros to disable deprecation warnings + +#ifdef __clang__ +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("clang diagnostic push"); \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("clang diagnostic pop") +#elif defined(__GNUC__) +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +#define ARROW_SUPPRESS_DEPRECATION_WARNING \ + __pragma(warning(push)) __pragma(warning(disable : 4996)) +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING __pragma(warning(pop)) +#else +#define ARROW_SUPPRESS_DEPRECATION_WARNING +#define ARROW_UNSUPPRESS_DEPRECATION_WARNING +#endif + +// ---------------------------------------------------------------------- + +// macros to disable padding +// these macros are portable across different compilers and platforms +//[https://github.com/google/flatbuffers/blob/master/include/flatbuffers/flatbuffers.h#L1355] +#if !defined(MANUALLY_ALIGNED_STRUCT) +#if defined(_MSC_VER) +#define MANUALLY_ALIGNED_STRUCT(alignment) \ + __pragma(pack(1)); \ + struct __declspec(align(alignment)) +#define STRUCT_END(name, size) \ + __pragma(pack()); \ + static_assert(sizeof(name) == size, "compiler breaks packing rules") +#elif defined(__GNUC__) || defined(__clang__) +#define MANUALLY_ALIGNED_STRUCT(alignment) \ + _Pragma("pack(1)") struct __attribute__((aligned(alignment))) +#define STRUCT_END(name, size) \ + _Pragma("pack()") static_assert(sizeof(name) == size, "compiler breaks packing rules") +#else +#error Unknown compiler, please define structure alignment macros +#endif +#endif // !defined(MANUALLY_ALIGNED_STRUCT) + +// ---------------------------------------------------------------------- +// Convenience macro disabling a particular UBSan check in a function + +#if defined(__clang__) +#define ARROW_DISABLE_UBSAN(feature) __attribute__((no_sanitize(feature))) +#else +#define ARROW_DISABLE_UBSAN(feature) +#endif + +// ---------------------------------------------------------------------- +// Machine information + +#if INTPTR_MAX == INT64_MAX +#define ARROW_BITNESS 64 +#elif INTPTR_MAX == INT32_MAX +#define ARROW_BITNESS 32 +#else +#error Unexpected INTPTR_MAX +#endif + +// ---------------------------------------------------------------------- +// From googletest +// (also in parquet-cpp) + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name) \ + friend class test_case_name##_##test_name##_Test diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h new file mode 100644 index 0000000000000000000000000000000000000000..5523909061d4c096b03c4853584ec9abc0f39a14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/map.h @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/result.h" + +namespace arrow { +namespace internal { + +/// Helper providing single-lookup conditional insertion into std::map or +/// std::unordered_map. If `key` exists in the container, an iterator to that pair +/// will be returned. If `key` does not exist in the container, `gen(key)` will be +/// invoked and its return value inserted. +template +auto GetOrInsertGenerated(Map* map, typename Map::key_type key, Gen&& gen) + -> decltype(map->begin()->second = gen(map->begin()->first), map->begin()) { + decltype(gen(map->begin()->first)) placeholder{}; + + auto it_success = map->emplace(std::move(key), std::move(placeholder)); + if (it_success.second) { + // insertion of placeholder succeeded, overwrite it with gen() + const auto& inserted_key = it_success.first->first; + auto* value = &it_success.first->second; + *value = gen(inserted_key); + } + return it_success.first; +} + +template +auto GetOrInsertGenerated(Map* map, typename Map::key_type key, Gen&& gen) + -> Resultbegin()->second = gen(map->begin()->first).ValueOrDie(), + map->begin())> { + decltype(gen(map->begin()->first).ValueOrDie()) placeholder{}; + + auto it_success = map->emplace(std::move(key), std::move(placeholder)); + if (it_success.second) { + // insertion of placeholder succeeded, overwrite it with gen() + const auto& inserted_key = it_success.first->first; + auto* value = &it_success.first->second; + ARROW_ASSIGN_OR_RAISE(*value, gen(inserted_key)); + } + return it_success.first; +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..4250d0694b7dd283aad6bbb159bd3e36328fe7ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/memory.h @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +// A helper function for doing memcpy with multiple threads. This is required +// to saturate the memory bandwidth of modern cpus. +void parallel_memcopy(uint8_t* dst, const uint8_t* src, int64_t nbytes, + uintptr_t block_size, int num_threads); + +// A helper function for checking if two wrapped objects implementing `Equals` +// are equal. +template +bool SharedPtrEquals(const std::shared_ptr& left, const std::shared_ptr& right) { + if (left == right) return true; + if (left == NULLPTR || right == NULLPTR) return false; + return left->Equals(*right); +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h new file mode 100644 index 0000000000000000000000000000000000000000..768f2328200fb2635213358226cfdb3f9273c808 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/pcg_random.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/pcg/pcg_random.hpp" // IWYU pragma: export + +namespace arrow { +namespace random { + +using pcg32 = ::arrow_vendored::pcg32; +using pcg64 = ::arrow_vendored::pcg64; +using pcg32_fast = ::arrow_vendored::pcg32_fast; +using pcg64_fast = ::arrow_vendored::pcg64_fast; +using pcg32_oneseq = ::arrow_vendored::pcg32_oneseq; +using pcg64_oneseq = ::arrow_vendored::pcg64_oneseq; + +} // namespace random +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h new file mode 100644 index 0000000000000000000000000000000000000000..6c71fa6e155e8818801db2ccb18127d75d6364a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/queue.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/vendored/ProducerConsumerQueue.h" + +namespace arrow { +namespace util { + +template +using SpscQueue = arrow_vendored::folly::ProducerConsumerQueue; + +} +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h new file mode 100644 index 0000000000000000000000000000000000000000..20553287985423970c228308742a7f85464a4a87 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/range.h @@ -0,0 +1,258 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace arrow::internal { + +/// Create a vector containing the values from start up to stop +template +std::vector Iota(T start, T stop) { + if (start > stop) { + return {}; + } + std::vector result(static_cast(stop - start)); + std::iota(result.begin(), result.end(), start); + return result; +} + +/// Create a vector containing the values from 0 up to length +template +std::vector Iota(T length) { + return Iota(static_cast(0), length); +} + +/// Create a range from a callable which takes a single index parameter +/// and returns the value of iterator on each call and a length. +/// Only iterators obtained from the same range should be compared, the +/// behaviour generally similar to other STL containers. +template +class LazyRange { + private: + // callable which generates the values + // has to be defined at the beginning of the class for type deduction + const Generator gen_; + // the length of the range + int64_t length_; +#ifdef _MSC_VER + // workaround to VS2010 not supporting decltype properly + // see https://stackoverflow.com/questions/21782846/decltype-for-class-member-function + static Generator gen_static_; +#endif + + public: +#ifdef _MSC_VER + using return_type = decltype(gen_static_(0)); +#else + using return_type = decltype(gen_(0)); +#endif + + /// Construct a new range from a callable and length + LazyRange(Generator gen, int64_t length) : gen_(gen), length_(length) {} + + // Class of the dependent iterator, created implicitly by begin and end + class RangeIter { + public: + using difference_type = int64_t; + using value_type = return_type; + using reference = const value_type&; + using pointer = const value_type*; + using iterator_category = std::forward_iterator_tag; + +#ifdef _MSC_VER + // msvc complains about unchecked iterators, + // see https://stackoverflow.com/questions/21655496/error-c4996-checked-iterators + using _Unchecked_type = typename LazyRange::RangeIter; +#endif + + RangeIter() = delete; + RangeIter(const RangeIter& other) = default; + RangeIter& operator=(const RangeIter& other) = default; + + RangeIter(const LazyRange& range, int64_t index) + : range_(&range), index_(index) {} + + const return_type operator*() const { return range_->gen_(index_); } + + RangeIter operator+(difference_type length) const { + return RangeIter(*range_, index_ + length); + } + + // pre-increment + RangeIter& operator++() { + ++index_; + return *this; + } + + // post-increment + RangeIter operator++(int) { + auto copy = RangeIter(*this); + ++index_; + return copy; + } + + bool operator==(const typename LazyRange::RangeIter& other) const { + return this->index_ == other.index_ && this->range_ == other.range_; + } + + bool operator!=(const typename LazyRange::RangeIter& other) const { + return this->index_ != other.index_ || this->range_ != other.range_; + } + + int64_t operator-(const typename LazyRange::RangeIter& other) const { + return this->index_ - other.index_; + } + + bool operator<(const typename LazyRange::RangeIter& other) const { + return this->index_ < other.index_; + } + + private: + // parent range reference + const LazyRange* range_; + // current index + int64_t index_; + }; + + friend class RangeIter; + + // Create a new begin const iterator + RangeIter begin() { return RangeIter(*this, 0); } + + // Create a new end const iterator + RangeIter end() { return RangeIter(*this, length_); } +}; + +/// Helper function to create a lazy range from a callable (e.g. lambda) and length +template +LazyRange MakeLazyRange(Generator&& gen, int64_t length) { + return LazyRange(std::forward(gen), length); +} + +/// \brief A helper for iterating multiple ranges simultaneously, similar to C++23's +/// zip() view adapter modelled after python's built-in zip() function. +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// std::function()> GetNames = ... +/// for (auto [table, name] : Zip(tables, GetNames())) { +/// static_assert(std::is_same_v); +/// static_assert(std::is_same_v); +/// // temporaries (like this vector of strings) are kept alive for the +/// // duration of a loop and are safely movable). +/// RegisterTableWithName(std::move(name), &table); +/// } +/// \endcode +/// +/// The zipped sequence ends as soon as any of its member ranges ends. +/// +/// Always use `auto` for the loop's declaration; it will always be a tuple +/// of references so for example using `const auto&` will compile but will +/// *look* like forcing const-ness even though the members of the tuple are +/// still mutable references. +/// +/// NOTE: we *could* make Zip a more full fledged range and enable things like +/// - gtest recognizing it as a container; it currently doesn't since Zip is +/// always mutable so this breaks: +/// EXPECT_THAT(Zip(std::vector{0}, std::vector{1}), +/// ElementsAre(std::tuple{0, 1})); +/// - letting it be random access when possible so we can do things like *sort* +/// parallel ranges +/// - ... +/// +/// However doing this will increase the compile time overhead of using Zip as +/// long as we're still using headers. Therefore until we can use c++20 modules: +/// *don't* extend Zip. +template +struct Zip; + +template +Zip(Ranges&&...) -> Zip, std::index_sequence_for>; + +template +struct Zip, std::index_sequence> { + explicit Zip(Ranges... ranges) : ranges_(std::forward(ranges)...) {} + + std::tuple ranges_; + + using sentinel = std::tuple(ranges_)))...>; + constexpr sentinel end() { return {std::end(std::get(ranges_))...}; } + + struct iterator : std::tuple(ranges_)))...> { + using std::tuple(ranges_)))...>::tuple; + + constexpr auto operator*() { + return std::tuple(*this))...>{*std::get(*this)...}; + } + + constexpr iterator& operator++() { + (++std::get(*this), ...); + return *this; + } + + constexpr bool operator!=(const sentinel& s) const { + bool all_iterators_valid = (... && (std::get(*this) != std::get(s))); + return all_iterators_valid; + } + }; + constexpr iterator begin() { return {std::begin(std::get(ranges_))...}; } +}; + +/// \brief A lazy sequence of integers which starts from 0 and never stops. +/// +/// This can be used in conjunction with Zip() to emulate python's built-in +/// enumerate() function: +/// +/// \code {.cpp} +/// const std::vector& tables = ... +/// for (auto [i, table] : Zip(Enumerate<>, tables)) { +/// std::cout << "#" << i << ": " << table.name() << std::endl; +/// } +/// \endcode +template +constexpr auto Enumerate = [] { + struct { + struct sentinel {}; + constexpr sentinel end() const { return {}; } + + struct iterator { + I value{0}; + + constexpr I operator*() { return value; } + + constexpr iterator& operator++() { + ++value; + return *this; + } + + constexpr std::true_type operator!=(sentinel) const { return {}; } + }; + constexpr iterator begin() const { return {}; } + } out; + + return out; +}(); + +} // namespace arrow::internal diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h new file mode 100644 index 0000000000000000000000000000000000000000..590fbac7153889129e7bca7652125980cb4457cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/regex.h @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +/// Match regex against target and produce string_views out of matches. +inline bool RegexMatch(const std::regex& regex, std::string_view target, + std::initializer_list out_matches) { + assert(regex.mark_count() == out_matches.size()); + + std::match_results match; + if (!std::regex_match(target.begin(), target.end(), match, regex)) { + return false; + } + + // Match #0 is the whole matched sequence + assert(regex.mark_count() + 1 == match.size()); + auto out_it = out_matches.begin(); + for (size_t i = 1; i < match.size(); ++i) { + **out_it++ = target.substr(match.position(i), match.length(i)); + } + return true; +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f5690062a049dd2485fe68461237eb6d9e0265 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/rle_encoding.h @@ -0,0 +1,826 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Imported from Apache Impala (incubating) on 2016-01-29 and modified for use +// in parquet-cpp, Arrow + +#pragma once + +#include +#include +#include +#include + +#include "arrow/util/bit_block_counter.h" +#include "arrow/util/bit_run_reader.h" +#include "arrow/util/bit_stream_utils.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +/// Utility classes to do run length encoding (RLE) for fixed bit width values. If runs +/// are sufficiently long, RLE is used, otherwise, the values are just bit-packed +/// (literal encoding). +/// For both types of runs, there is a byte-aligned indicator which encodes the length +/// of the run and the type of the run. +/// This encoding has the benefit that when there aren't any long enough runs, values +/// are always decoded at fixed (can be precomputed) bit offsets OR both the value and +/// the run length are byte aligned. This allows for very efficient decoding +/// implementations. +/// The encoding is: +/// encoded-block := run* +/// run := literal-run | repeated-run +/// literal-run := literal-indicator < literal bytes > +/// repeated-run := repeated-indicator < repeated value. padded to byte boundary > +/// literal-indicator := varint_encode( number_of_groups << 1 | 1) +/// repeated-indicator := varint_encode( number_of_repetitions << 1 ) +// +/// Each run is preceded by a varint. The varint's least significant bit is +/// used to indicate whether the run is a literal run or a repeated run. The rest +/// of the varint is used to determine the length of the run (eg how many times the +/// value repeats). +// +/// In the case of literal runs, the run length is always a multiple of 8 (i.e. encode +/// in groups of 8), so that no matter the bit-width of the value, the sequence will end +/// on a byte boundary without padding. +/// Given that we know it is a multiple of 8, we store the number of 8-groups rather than +/// the actual number of encoded ints. (This means that the total number of encoded values +/// cannot be determined from the encoded data, since the number of values in the last +/// group may not be a multiple of 8). For the last group of literal runs, we pad +/// the group to 8 with zeros. This allows for 8 at a time decoding on the read side +/// without the need for additional checks. +// +/// There is a break-even point when it is more storage efficient to do run length +/// encoding. For 1 bit-width values, that point is 8 values. They require 2 bytes +/// for both the repeated encoding or the literal encoding. This value can always +/// be computed based on the bit-width. +/// TODO: think about how to use this for strings. The bit packing isn't quite the same. +// +/// Examples with bit-width 1 (eg encoding booleans): +/// ---------------------------------------- +/// 100 1s followed by 100 0s: +/// <1, padded to 1 byte> <0, padded to 1 byte> +/// - (total 4 bytes) +// +/// alternating 1s and 0s (200 total): +/// 200 ints = 25 groups of 8 +/// <25 bytes of values, bitpacked> +/// (total 26 bytes, 1 byte overhead) +// + +/// Decoder class for RLE encoded data. +class RleDecoder { + public: + /// Create a decoder object. buffer/buffer_len is the decoded data. + /// bit_width is the width of each value (before encoding). + RleDecoder(const uint8_t* buffer, int buffer_len, int bit_width) + : bit_reader_(buffer, buffer_len), + bit_width_(bit_width), + current_value_(0), + repeat_count_(0), + literal_count_(0) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + } + + RleDecoder() : bit_width_(-1) {} + + void Reset(const uint8_t* buffer, int buffer_len, int bit_width) { + DCHECK_GE(bit_width, 0); + DCHECK_LE(bit_width, 64); + bit_reader_.Reset(buffer, buffer_len); + bit_width_ = bit_width; + current_value_ = 0; + repeat_count_ = 0; + literal_count_ = 0; + } + + /// Gets the next value. Returns false if there are no more. + template + bool Get(T* val); + + /// Gets a batch of values. Returns the number of decoded elements. + template + int GetBatch(T* values, int batch_size); + + /// Like GetBatch but add spacing for null entries + template + int GetBatchSpaced(int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out); + + /// Like GetBatch but the values are then decoded using the provided dictionary + template + int GetBatchWithDict(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size); + + /// Like GetBatchWithDict but add spacing for null entries + /// + /// Null entries will be zero-initialized in `values` to avoid leaking + /// private data. + template + int GetBatchWithDictSpaced(const T* dictionary, int32_t dictionary_length, T* values, + int batch_size, int null_count, const uint8_t* valid_bits, + int64_t valid_bits_offset); + + protected: + ::arrow::bit_util::BitReader bit_reader_; + /// Number of bits needed to encode the value. Must be between 0 and 64. + int bit_width_; + uint64_t current_value_; + int32_t repeat_count_; + int32_t literal_count_; + + private: + /// Fills literal_count_ and repeat_count_ with next values. Returns false if there + /// are no more. + template + bool NextCounts(); + + /// Utility methods for retrieving spaced values. + template + int GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, T* out); +}; + +/// Class to incrementally build the rle data. This class does not allocate any memory. +/// The encoding has two modes: encoding repeated runs and literal runs. +/// If the run is sufficiently short, it is more efficient to encode as a literal run. +/// This class does so by buffering 8 values at a time. If they are not all the same +/// they are added to the literal run. If they are the same, they are added to the +/// repeated run. When we switch modes, the previous run is flushed out. +class RleEncoder { + public: + /// buffer/buffer_len: preallocated output buffer. + /// bit_width: max number of bits for value. + /// TODO: consider adding a min_repeated_run_length so the caller can control + /// when values should be encoded as repeated runs. Currently this is derived + /// based on the bit_width, which can determine a storage optimal choice. + /// TODO: allow 0 bit_width (and have dict encoder use it) + RleEncoder(uint8_t* buffer, int buffer_len, int bit_width) + : bit_width_(bit_width), bit_writer_(buffer, buffer_len) { + DCHECK_GE(bit_width_, 0); + DCHECK_LE(bit_width_, 64); + max_run_byte_size_ = MinBufferSize(bit_width); + DCHECK_GE(buffer_len, max_run_byte_size_) << "Input buffer not big enough."; + Clear(); + } + + /// Returns the minimum buffer size needed to use the encoder for 'bit_width' + /// This is the maximum length of a single run for 'bit_width'. + /// It is not valid to pass a buffer less than this length. + static int MinBufferSize(int bit_width) { + /// 1 indicator byte and MAX_VALUES_PER_LITERAL_RUN 'bit_width' values. + int max_literal_run_size = 1 + static_cast(::arrow::bit_util::BytesForBits( + MAX_VALUES_PER_LITERAL_RUN * bit_width)); + /// Up to kMaxVlqByteLength indicator and a single 'bit_width' value. + int max_repeated_run_size = + ::arrow::bit_util::BitReader::kMaxVlqByteLength + + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + return std::max(max_literal_run_size, max_repeated_run_size); + } + + /// Returns the maximum byte size it could take to encode 'num_values'. + static int MaxBufferSize(int bit_width, int num_values) { + // For a bit_width > 1, the worst case is the repetition of "literal run of length 8 + // and then a repeated run of length 8". + // 8 values per smallest run, 8 bits per byte + int bytes_per_run = bit_width; + int num_runs = static_cast(::arrow::bit_util::CeilDiv(num_values, 8)); + int literal_max_size = num_runs + num_runs * bytes_per_run; + + // In the very worst case scenario, the data is a concatenation of repeated + // runs of 8 values. Repeated run has a 1 byte varint followed by the + // bit-packed repeated value + int min_repeated_run_size = + 1 + static_cast(::arrow::bit_util::BytesForBits(bit_width)); + int repeated_max_size = num_runs * min_repeated_run_size; + + return std::max(literal_max_size, repeated_max_size); + } + + /// Encode value. Returns true if the value fits in buffer, false otherwise. + /// This value must be representable with bit_width_ bits. + bool Put(uint64_t value); + + /// Flushes any pending values to the underlying buffer. + /// Returns the total number of bytes written + int Flush(); + + /// Resets all the state in the encoder. + void Clear(); + + /// Returns pointer to underlying buffer + uint8_t* buffer() { return bit_writer_.buffer(); } + int32_t len() { return bit_writer_.bytes_written(); } + + private: + /// Flushes any buffered values. If this is part of a repeated run, this is largely + /// a no-op. + /// If it is part of a literal run, this will call FlushLiteralRun, which writes + /// out the buffered literal values. + /// If 'done' is true, the current run would be written even if it would normally + /// have been buffered more. This should only be called at the end, when the + /// encoder has received all values even if it would normally continue to be + /// buffered. + void FlushBufferedValues(bool done); + + /// Flushes literal values to the underlying buffer. If update_indicator_byte, + /// then the current literal run is complete and the indicator byte is updated. + void FlushLiteralRun(bool update_indicator_byte); + + /// Flushes a repeated run to the underlying buffer. + void FlushRepeatedRun(); + + /// Checks and sets buffer_full_. This must be called after flushing a run to + /// make sure there are enough bytes remaining to encode the next run. + void CheckBufferFull(); + + /// The maximum number of values in a single literal run + /// (number of groups encodable by a 1-byte indicator * 8) + static const int MAX_VALUES_PER_LITERAL_RUN = (1 << 6) * 8; + + /// Number of bits needed to encode the value. Must be between 0 and 64. + const int bit_width_; + + /// Underlying buffer. + ::arrow::bit_util::BitWriter bit_writer_; + + /// If true, the buffer is full and subsequent Put()'s will fail. + bool buffer_full_; + + /// The maximum byte size a single run can take. + int max_run_byte_size_; + + /// We need to buffer at most 8 values for literals. This happens when the + /// bit_width is 1 (so 8 values fit in one byte). + /// TODO: generalize this to other bit widths + int64_t buffered_values_[8]; + + /// Number of values in buffered_values_ + int num_buffered_values_; + + /// The current (also last) value that was written and the count of how + /// many times in a row that value has been seen. This is maintained even + /// if we are in a literal run. If the repeat_count_ get high enough, we switch + /// to encoding repeated runs. + uint64_t current_value_; + int repeat_count_; + + /// Number of literals in the current run. This does not include the literals + /// that might be in buffered_values_. Only after we've got a group big enough + /// can we decide if they should part of the literal_count_ or repeat_count_ + int literal_count_; + + /// Pointer to a byte in the underlying buffer that stores the indicator byte. + /// This is reserved as soon as we need a literal run but the value is written + /// when the literal run is complete. + uint8_t* literal_indicator_byte_; +}; + +template +inline bool RleDecoder::Get(T* val) { + return GetBatch(val, 1) == 1; +} + +template +inline int RleDecoder::GetBatch(T* values, int batch_size) { + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { // Repeated value case. + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, static_cast(current_value_)); + + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(remaining, literal_count_); + int actual_read = bit_reader_.GetBatch(bit_width_, out, literal_batch); + if (actual_read != literal_batch) { + return values_read; + } + + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetSpaced(Converter converter, int batch_size, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset, + T* out) { + if (ARROW_PREDICT_FALSE(null_count == batch_size)) { + converter.FillZero(out, out + batch_size); + return batch_size; + } + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + int values_remaining = batch_size - null_count; + + // Assume no bits to start. + arrow::internal::BitRunReader bit_reader(valid_bits, valid_bits_offset, + /*length=*/batch_size); + arrow::internal::BitRun valid_run = bit_reader.NextRun(); + while (values_read < batch_size) { + if (ARROW_PREDICT_FALSE(valid_run.length == 0)) { + valid_run = bit_reader.NextRun(); + } + + DCHECK_GT(batch_size, 0); + DCHECK_GT(valid_run.length, 0); + + if (valid_run.set) { + if ((repeat_count_ == 0) && (literal_count_ == 0)) { + if (!NextCounts()) return values_read; + DCHECK((repeat_count_ > 0) ^ (literal_count_ > 0)); + } + + if (repeat_count_ > 0) { + int repeat_batch = 0; + // Consume the entire repeat counts incrementing repeat_batch to + // be the total of nulls + values consumed, we only need to + // get the total count because we can fill in the same value for + // nulls and non-nulls. This proves to be a big efficiency win. + while (repeat_count_ > 0 && (values_read + repeat_batch) < batch_size) { + DCHECK_GT(valid_run.length, 0); + if (valid_run.set) { + int update_size = std::min(static_cast(valid_run.length), repeat_count_); + repeat_count_ -= update_size; + repeat_batch += update_size; + valid_run.length -= update_size; + values_remaining -= update_size; + } else { + // We can consume all nulls here because we would do so on + // the next loop anyways. + repeat_batch += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + RunType current_value = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!converter.IsValid(current_value))) { + return values_read; + } + converter.Fill(out, out + repeat_batch, current_value); + out += repeat_batch; + values_read += repeat_batch; + } else if (literal_count_ > 0) { + int literal_batch = std::min(values_remaining, literal_count_); + DCHECK_GT(literal_batch, 0); + + // Decode the literals + constexpr int kBufferSize = 1024; + RunType indices[kBufferSize]; + literal_batch = std::min(literal_batch, kBufferSize); + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (!converter.IsValid(indices, /*length=*/actual_read)) { + return values_read; + } + int skipped = 0; + int literals_read = 0; + while (literals_read < literal_batch) { + if (valid_run.set) { + int update_size = std::min(literal_batch - literals_read, + static_cast(valid_run.length)); + converter.Copy(out, indices + literals_read, update_size); + literals_read += update_size; + out += update_size; + valid_run.length -= update_size; + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + skipped += static_cast(valid_run.length); + valid_run.length = 0; + } + if (valid_run.length == 0) { + valid_run = bit_reader.NextRun(); + } + } + literal_count_ -= literal_batch; + values_remaining -= literal_batch; + values_read += literal_batch + skipped; + } + } else { + converter.FillZero(out, out + valid_run.length); + out += valid_run.length; + values_read += static_cast(valid_run.length); + valid_run.length = 0; + } + } + DCHECK_EQ(valid_run.length, 0); + DCHECK_EQ(values_remaining, 0); + return values_read; +} + +// Converter for GetSpaced that handles runs that get returned +// directly as output. +template +struct PlainRleConverter { + T kZero = {}; + inline bool IsValid(const T& values) const { return true; } + inline bool IsValid(const T* values, int32_t length) const { return true; } + inline void Fill(T* begin, T* end, const T& run_value) const { + std::fill(begin, end, run_value); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + inline void Copy(T* out, const T* values, int length) const { + std::memcpy(out, values, length * sizeof(T)); + } +}; + +template +inline int RleDecoder::GetBatchSpaced(int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset, T* out) { + if (null_count == 0) { + return GetBatch(out, batch_size); + } + + PlainRleConverter converter; + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatch(out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +static inline bool IndexInRange(int32_t idx, int32_t dictionary_length) { + return idx >= 0 && idx < dictionary_length; +} + +// Converter for GetSpaced that handles runs of returned dictionary +// indices. +template +struct DictionaryConverter { + T kZero = {}; + const T* dictionary; + int32_t dictionary_length; + + inline bool IsValid(int32_t value) { return IndexInRange(value, dictionary_length); } + + inline bool IsValid(const int32_t* values, int32_t length) const { + using IndexType = int32_t; + IndexType min_index = std::numeric_limits::max(); + IndexType max_index = std::numeric_limits::min(); + for (int x = 0; x < length; x++) { + min_index = std::min(values[x], min_index); + max_index = std::max(values[x], max_index); + } + + return IndexInRange(min_index, dictionary_length) && + IndexInRange(max_index, dictionary_length); + } + inline void Fill(T* begin, T* end, const int32_t& run_value) const { + std::fill(begin, end, dictionary[run_value]); + } + inline void FillZero(T* begin, T* end) { std::fill(begin, end, kZero); } + + inline void Copy(T* out, const int32_t* values, int length) const { + for (int x = 0; x < length; x++) { + out[x] = dictionary[values[x]]; + } + } +}; + +template +inline int RleDecoder::GetBatchWithDict(const T* dictionary, int32_t dictionary_length, + T* values, int batch_size) { + // Per https://github.com/apache/parquet-format/blob/master/Encodings.md, + // the maximum dictionary index width in Parquet is 32 bits. + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + DCHECK_GE(bit_width_, 0); + int values_read = 0; + + auto* out = values; + + while (values_read < batch_size) { + int remaining = batch_size - values_read; + + if (repeat_count_ > 0) { + auto idx = static_cast(current_value_); + if (ARROW_PREDICT_FALSE(!IndexInRange(idx, dictionary_length))) { + return values_read; + } + T val = dictionary[idx]; + + int repeat_batch = std::min(remaining, repeat_count_); + std::fill(out, out + repeat_batch, val); + + /* Upkeep counters */ + repeat_count_ -= repeat_batch; + values_read += repeat_batch; + out += repeat_batch; + } else if (literal_count_ > 0) { + constexpr int kBufferSize = 1024; + IndexType indices[kBufferSize]; + + int literal_batch = std::min(remaining, literal_count_); + literal_batch = std::min(literal_batch, kBufferSize); + + int actual_read = bit_reader_.GetBatch(bit_width_, indices, literal_batch); + if (ARROW_PREDICT_FALSE(actual_read != literal_batch)) { + return values_read; + } + if (ARROW_PREDICT_FALSE(!converter.IsValid(indices, /*length=*/literal_batch))) { + return values_read; + } + converter.Copy(out, indices, literal_batch); + + /* Upkeep counters */ + literal_count_ -= literal_batch; + values_read += literal_batch; + out += literal_batch; + } else { + if (!NextCounts()) return values_read; + } + } + + return values_read; +} + +template +inline int RleDecoder::GetBatchWithDictSpaced(const T* dictionary, + int32_t dictionary_length, T* out, + int batch_size, int null_count, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (null_count == 0) { + return GetBatchWithDict(dictionary, dictionary_length, out, batch_size); + } + arrow::internal::BitBlockCounter block_counter(valid_bits, valid_bits_offset, + batch_size); + using IndexType = int32_t; + DictionaryConverter converter; + converter.dictionary = dictionary; + converter.dictionary_length = dictionary_length; + + int total_processed = 0; + int processed = 0; + arrow::internal::BitBlockCount block; + do { + block = block_counter.NextFourWords(); + if (block.length == 0) { + break; + } + if (block.AllSet()) { + processed = GetBatchWithDict(dictionary, dictionary_length, out, block.length); + } else if (block.NoneSet()) { + converter.FillZero(out, out + block.length); + processed = block.length; + } else { + processed = GetSpaced>( + converter, block.length, block.length - block.popcount, valid_bits, + valid_bits_offset, out); + } + total_processed += processed; + out += block.length; + valid_bits_offset += block.length; + } while (processed == block.length); + return total_processed; +} + +template +bool RleDecoder::NextCounts() { + // Read the next run's indicator int, it could be a literal or repeated run. + // The int is encoded as a vlq-encoded value. + uint32_t indicator_value = 0; + if (!bit_reader_.GetVlqInt(&indicator_value)) return false; + + // lsb indicates if it is a literal run or repeated run + bool is_literal = indicator_value & 1; + uint32_t count = indicator_value >> 1; + if (is_literal) { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX) / 8)) { + return false; + } + literal_count_ = count * 8; + } else { + if (ARROW_PREDICT_FALSE(count == 0 || count > static_cast(INT32_MAX))) { + return false; + } + repeat_count_ = count; + T value = {}; + if (!bit_reader_.GetAligned( + static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8)), &value)) { + return false; + } + current_value_ = static_cast(value); + } + return true; +} + +/// This function buffers input values 8 at a time. After seeing all 8 values, +/// it decides whether they should be encoded as a literal or repeated run. +inline bool RleEncoder::Put(uint64_t value) { + DCHECK(bit_width_ == 64 || value < (1ULL << bit_width_)); + if (ARROW_PREDICT_FALSE(buffer_full_)) return false; + + if (ARROW_PREDICT_TRUE(current_value_ == value)) { + ++repeat_count_; + if (repeat_count_ > 8) { + // This is just a continuation of the current run, no need to buffer the + // values. + // Note that this is the fast path for long repeated runs. + return true; + } + } else { + if (repeat_count_ >= 8) { + // We had a run that was long enough but it has ended. Flush the + // current repeated run. + DCHECK_EQ(literal_count_, 0); + FlushRepeatedRun(); + } + repeat_count_ = 1; + current_value_ = value; + } + + buffered_values_[num_buffered_values_] = value; + if (++num_buffered_values_ == 8) { + DCHECK_EQ(literal_count_ % 8, 0); + FlushBufferedValues(false); + } + return true; +} + +inline void RleEncoder::FlushLiteralRun(bool update_indicator_byte) { + if (literal_indicator_byte_ == NULL) { + // The literal indicator byte has not been reserved yet, get one now. + literal_indicator_byte_ = bit_writer_.GetNextBytePtr(); + DCHECK(literal_indicator_byte_ != NULL); + } + + // Write all the buffered values as bit packed literals + for (int i = 0; i < num_buffered_values_; ++i) { + bool success = bit_writer_.PutValue(buffered_values_[i], bit_width_); + DCHECK(success) << "There is a bug in using CheckBufferFull()"; + } + num_buffered_values_ = 0; + + if (update_indicator_byte) { + // At this point we need to write the indicator byte for the literal run. + // We only reserve one byte, to allow for streaming writes of literal values. + // The logic makes sure we flush literal runs often enough to not overrun + // the 1 byte. + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + int32_t indicator_value = (num_groups << 1) | 1; + DCHECK_EQ(indicator_value & 0xFFFFFF00, 0); + *literal_indicator_byte_ = static_cast(indicator_value); + literal_indicator_byte_ = NULL; + literal_count_ = 0; + CheckBufferFull(); + } +} + +inline void RleEncoder::FlushRepeatedRun() { + DCHECK_GT(repeat_count_, 0); + bool result = true; + // The lsb of 0 indicates this is a repeated run + int32_t indicator_value = repeat_count_ << 1 | 0; + result &= bit_writer_.PutVlqInt(static_cast(indicator_value)); + result &= bit_writer_.PutAligned( + current_value_, static_cast(::arrow::bit_util::CeilDiv(bit_width_, 8))); + DCHECK(result); + num_buffered_values_ = 0; + repeat_count_ = 0; + CheckBufferFull(); +} + +/// Flush the values that have been buffered. At this point we decide whether +/// we need to switch between the run types or continue the current one. +inline void RleEncoder::FlushBufferedValues(bool done) { + if (repeat_count_ >= 8) { + // Clear the buffered values. They are part of the repeated run now and we + // don't want to flush them out as literals. + num_buffered_values_ = 0; + if (literal_count_ != 0) { + // There was a current literal run. All the values in it have been flushed + // but we still need to update the indicator byte. + DCHECK_EQ(literal_count_ % 8, 0); + DCHECK_EQ(repeat_count_, 8); + FlushLiteralRun(true); + } + DCHECK_EQ(literal_count_, 0); + return; + } + + literal_count_ += num_buffered_values_; + DCHECK_EQ(literal_count_ % 8, 0); + int num_groups = literal_count_ / 8; + if (num_groups + 1 >= (1 << 6)) { + // We need to start a new literal run because the indicator byte we've reserved + // cannot store more values. + DCHECK(literal_indicator_byte_ != NULL); + FlushLiteralRun(true); + } else { + FlushLiteralRun(done); + } + repeat_count_ = 0; +} + +inline int RleEncoder::Flush() { + if (literal_count_ > 0 || repeat_count_ > 0 || num_buffered_values_ > 0) { + bool all_repeat = literal_count_ == 0 && (repeat_count_ == num_buffered_values_ || + num_buffered_values_ == 0); + // There is something pending, figure out if it's a repeated or literal run + if (repeat_count_ > 0 && all_repeat) { + FlushRepeatedRun(); + } else { + DCHECK_EQ(literal_count_ % 8, 0); + // Buffer the last group of literals to 8 by padding with 0s. + for (; num_buffered_values_ != 0 && num_buffered_values_ < 8; + ++num_buffered_values_) { + buffered_values_[num_buffered_values_] = 0; + } + literal_count_ += num_buffered_values_; + FlushLiteralRun(true); + repeat_count_ = 0; + } + } + bit_writer_.Flush(); + DCHECK_EQ(num_buffered_values_, 0); + DCHECK_EQ(literal_count_, 0); + DCHECK_EQ(repeat_count_, 0); + + return bit_writer_.bytes_written(); +} + +inline void RleEncoder::CheckBufferFull() { + int bytes_written = bit_writer_.bytes_written(); + if (bytes_written + max_run_byte_size_ > bit_writer_.buffer_len()) { + buffer_full_ = true; + } +} + +inline void RleEncoder::Clear() { + buffer_full_ = false; + current_value_ = 0; + repeat_count_ = 0; + num_buffered_values_ = 0; + literal_count_ = 0; + literal_indicator_byte_ = NULL; + bit_writer_.Clear(); +} + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/rows_to_batches.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/rows_to_batches.h new file mode 100644 index 0000000000000000000000000000000000000000..8ad254df200efc08c5c9a4956e0e781b496b2b07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/rows_to_batches.h @@ -0,0 +1,163 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/table_builder.h" +#include "arrow/util/iterator.h" + +#include + +namespace arrow::util { + +namespace detail { + +// Default identity function row accessor. Used to for the common case where the value +// of each row iterated over is it's self also directly iterable. +[[nodiscard]] constexpr inline auto MakeDefaultRowAccessor() { + return [](auto& x) -> Result { return std::ref(x); }; +} + +// Meta-function to check if a type `T` is a range (iterable using `std::begin()` / +// `std::end()`). `is_range::value` will be false if `T` is not a valid range. +template +struct is_range : std::false_type {}; + +template +struct is_range())), + decltype(std::end(std::declval()))>> : std::true_type { +}; + +} // namespace detail + +/// Delete overload for `const Range&& rows` because the data's lifetime must exceed +/// the lifetime of the function call. `data` will be read when client uses the +/// `RecordBatchReader` +template +[[nodiscard]] typename std::enable_if_t::value, + Result>> +/* Result>> */ RowsToBatches( + const std::shared_ptr& schema, const Range&& rows, + DataPointConvertor&& data_point_convertor, + RowAccessor&& row_accessor = detail::MakeDefaultRowAccessor(), + MemoryPool* pool = default_memory_pool(), + const std::size_t batch_size = 1024) = delete; + +/// \brief Utility function for converting any row-based structure into an +/// `arrow::RecordBatchReader` (this can be easily converted to an `arrow::Table` using +/// `arrow::RecordBatchReader::ToTable()`). +/// +/// Examples of supported types: +/// - `std::vector>>` +/// - `std::vector` + +/// If `rows` (client’s row-based structure) is not a valid C++ range, the client will +/// need to either make it iterable, or make an adapter/wrapper that is a valid C++ +/// range. + +/// The client must provide a `DataPointConvertor` callable type that will convert the +/// structure’s data points into the corresponding arrow types. + +/// Complex nested rows can be supported by providing a custom `row_accessor` instead +/// of the default. + +/// Example usage: +/// \code{.cpp} +/// auto IntConvertor = [](ArrayBuilder& array_builder, int value) { +/// return static_cast(array_builder).Append(value); +/// }; +/// std::vector> data = {{1, 2, 4}, {5, 6, 7}}; +/// auto batches = RowsToBatches(kTestSchema, data, IntConvertor); +/// \endcode + +/// \param[in] schema - The schema to be used in the `RecordBatchReader` + +/// \param[in] rows - Iterable row-based structure that will be converted to arrow +/// batches + +/// \param[in] data_point_convertor - Client provided callable type that will convert +/// the structure’s data points into the corresponding arrow types. The convertor must +/// return an error `Status` if an error happens during conversion. + +/// \param[in] row_accessor - In the common case where the value of each row iterated +/// over is it's self also directly iterable, the client can just use the default. +/// The provided callable must take the values of the `rows` range and return a +/// `std::reference_wrapper` to the data points in a given row. The data points +/// must be in order of their corresponding fields in the schema. +/// see: /ref `MakeDefaultRowAccessor` + +/// \param[in] pool - The MemoryPool to use for allocations. + +/// \param[in] batch_size - Number of rows to insert into each RecordBatch. + +/// \return `Result>>` result will be a +/// `std::shared_ptr>` if not errors occurred, else an error status. +template +[[nodiscard]] typename std::enable_if_t::value, + Result>> +/* Result>> */ RowsToBatches( + const std::shared_ptr& schema, const Range& rows, + DataPointConvertor&& data_point_convertor, + RowAccessor&& row_accessor = detail::MakeDefaultRowAccessor(), + MemoryPool* pool = default_memory_pool(), const std::size_t batch_size = 1024) { + auto make_next_batch = + [pool = pool, batch_size = batch_size, rows_ittr = std::begin(rows), + rows_ittr_end = std::end(rows), schema = schema, + row_accessor = std::forward(row_accessor), + data_point_convertor = std::forward( + data_point_convertor)]() mutable -> Result> { + if (rows_ittr == rows_ittr_end) return NULLPTR; + + ARROW_ASSIGN_OR_RAISE(auto record_batch_builder, + RecordBatchBuilder::Make(schema, pool, batch_size)); + + for (size_t i = 0; i < batch_size && (rows_ittr != rows_ittr_end); + i++, std::advance(rows_ittr, 1)) { + int col_index = 0; + ARROW_ASSIGN_OR_RAISE(const auto row, row_accessor(*rows_ittr)); + + // If the accessor returns a `std::reference_wrapper` unwrap if + const auto& row_unwrapped = [&]() { + if constexpr (detail::is_range::value) + return row; + else + return row.get(); + }(); + + for (auto& data_point : row_unwrapped) { + ArrayBuilder* array_builder = record_batch_builder->GetField(col_index); + ARROW_RETURN_IF(array_builder == NULLPTR, + Status::Invalid("array_builder == NULLPTR")); + + ARROW_RETURN_NOT_OK(data_point_convertor(*array_builder, data_point)); + col_index++; + } + } + + ARROW_ASSIGN_OR_RAISE(auto result, record_batch_builder->Flush()); + return result; + }; + return RecordBatchReader::MakeFromIterator(MakeFunctionIterator(make_next_batch), + schema); +} + +} // namespace arrow::util diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..52e191c4c07846b922a5bd830c2cbbde50538eba --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/small_vector.h @@ -0,0 +1,511 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/util/aligned_storage.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace internal { + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + void destroy() noexcept {} +}; + +template +struct StaticVectorStorageBase { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + + ~StaticVectorStorageBase() noexcept { destroy(); } + + void destroy() noexcept { storage_type::destroy_several(static_data_, size_); } +}; + +template ::value> +struct StaticVectorStorage : public StaticVectorStorageBase { + using Base = StaticVectorStorageBase; + using typename Base::storage_type; + + using Base::size_; + using Base::static_data_; + + StaticVectorStorage() noexcept = default; + + constexpr storage_type* storage_ptr() { return static_data_; } + + constexpr const storage_type* const_storage_ptr() const { return static_data_; } + + // Adjust storage size, but don't initialize any objects + void bump_size(size_t addend) { + assert(size_ + addend <= N); + size_ += addend; + } + + void ensure_capacity(size_t min_capacity) { assert(min_capacity <= N); } + + // Adjust storage size, but don't destroy any objects + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + // Move objects from another storage, but don't destroy any objects currently + // stored in *this. + // You need to call destroy() first if necessary (e.g. in a + // move assignment operator). + void move_construct(StaticVectorStorage&& other) noexcept { + size_ = other.size_; + if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return N; } + + constexpr size_t max_size() const { return N; } + + void reserve(size_t n) {} + + void clear() { + storage_type::destroy_several(static_data_, size_); + size_ = 0; + } +}; + +template +struct SmallVectorStorage { + using storage_type = AlignedStorage; + + storage_type static_data_[N]; + size_t size_ = 0; + storage_type* data_ = static_data_; + size_t dynamic_capacity_ = 0; + + SmallVectorStorage() noexcept = default; + + ~SmallVectorStorage() { destroy(); } + + constexpr storage_type* storage_ptr() { return data_; } + + constexpr const storage_type* const_storage_ptr() const { return data_; } + + void bump_size(size_t addend) { + const size_t new_size = size_ + addend; + ensure_capacity(new_size); + size_ = new_size; + } + + void ensure_capacity(size_t min_capacity) { + if (dynamic_capacity_) { + // Grow dynamic storage if necessary + if (min_capacity > dynamic_capacity_) { + size_t new_capacity = std::max(dynamic_capacity_ * 2, min_capacity); + reallocate_dynamic(new_capacity); + } + } else if (min_capacity > N) { + switch_to_dynamic(min_capacity); + } + } + + void reduce_size(size_t reduce_by) { + assert(reduce_by <= size_); + size_ -= reduce_by; + } + + void destroy() noexcept { + storage_type::destroy_several(data_, size_); + if (dynamic_capacity_) { + delete[] data_; + } + } + + void move_construct(SmallVectorStorage&& other) noexcept { + size_ = other.size_; + dynamic_capacity_ = other.dynamic_capacity_; + if (dynamic_capacity_) { + data_ = other.data_; + other.data_ = other.static_data_; + other.dynamic_capacity_ = 0; + other.size_ = 0; + } else if (size_ != 0) { + // Use a compile-time memcpy size (N) for trivial types + storage_type::move_construct_several(other.static_data_, static_data_, size_, N); + } + } + + constexpr size_t capacity() const { return dynamic_capacity_ ? dynamic_capacity_ : N; } + + constexpr size_t max_size() const { return std::numeric_limits::max(); } + + void reserve(size_t n) { + if (dynamic_capacity_) { + if (n > dynamic_capacity_) { + reallocate_dynamic(n); + } + } else if (n > N) { + switch_to_dynamic(n); + } + } + + void clear() { + storage_type::destroy_several(data_, size_); + size_ = 0; + } + + private: + void switch_to_dynamic(size_t new_capacity) { + dynamic_capacity_ = new_capacity; + data_ = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(static_data_, data_, size_); + } + + void reallocate_dynamic(size_t new_capacity) { + assert(new_capacity >= size_); + auto new_data = new storage_type[new_capacity]; + storage_type::move_construct_several_and_destroy_source(data_, new_data, size_); + delete[] data_; + dynamic_capacity_ = new_capacity; + data_ = new_data; + } +}; + +template +class StaticVectorImpl { + private: + Storage storage_; + + T* data_ptr() { return storage_.storage_ptr()->get(); } + + constexpr const T* const_data_ptr() const { + return storage_.const_storage_ptr()->get(); + } + + public: + using size_type = size_t; + using difference_type = ptrdiff_t; + using value_type = T; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = T*; + using const_iterator = const T*; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + constexpr StaticVectorImpl() noexcept = default; + + // Move and copy constructors + StaticVectorImpl(StaticVectorImpl&& other) noexcept { + storage_.move_construct(std::move(other.storage_)); + } + + StaticVectorImpl& operator=(StaticVectorImpl&& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + // TODO move_assign? + storage_.destroy(); + storage_.move_construct(std::move(other.storage_)); + } + return *this; + } + + StaticVectorImpl(const StaticVectorImpl& other) { + init_by_copying(other.storage_.size_, other.const_data_ptr()); + } + + StaticVectorImpl& operator=(const StaticVectorImpl& other) noexcept { + if (ARROW_PREDICT_TRUE(&other != this)) { + assign_by_copying(other.storage_.size_, other.data()); + } + return *this; + } + + // Automatic conversion from std::vector, for convenience + StaticVectorImpl(const std::vector& other) { // NOLINT: explicit + init_by_copying(other.size(), other.data()); + } + + StaticVectorImpl(std::vector&& other) noexcept { // NOLINT: explicit + init_by_moving(other.size(), other.data()); + } + + StaticVectorImpl& operator=(const std::vector& other) { + assign_by_copying(other.size(), other.data()); + return *this; + } + + StaticVectorImpl& operator=(std::vector&& other) noexcept { + assign_by_moving(other.size(), other.data()); + return *this; + } + + // Constructing from count and optional initialization value + explicit StaticVectorImpl(size_t count) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(); + } + } + + StaticVectorImpl(size_t count, const T& value) { + storage_.bump_size(count); + auto* p = storage_.storage_ptr(); + for (size_t i = 0; i < count; ++i) { + p[i].construct(value); + } + } + + StaticVectorImpl(std::initializer_list values) { + storage_.bump_size(values.size()); + auto* p = storage_.storage_ptr(); + for (auto&& v : values) { + // Unfortunately, cannot move initializer values + p++->construct(v); + } + } + + // Size inspection + + constexpr bool empty() const { return storage_.size_ == 0; } + + constexpr size_t size() const { return storage_.size_; } + + constexpr size_t capacity() const { return storage_.capacity(); } + + constexpr size_t max_size() const { return storage_.max_size(); } + + // Data access + + T& operator[](size_t i) { return data_ptr()[i]; } + + constexpr const T& operator[](size_t i) const { return const_data_ptr()[i]; } + + T& front() { return data_ptr()[0]; } + + constexpr const T& front() const { return const_data_ptr()[0]; } + + T& back() { return data_ptr()[storage_.size_ - 1]; } + + constexpr const T& back() const { return const_data_ptr()[storage_.size_ - 1]; } + + T* data() { return data_ptr(); } + + constexpr const T* data() const { return const_data_ptr(); } + + // Iterators + + iterator begin() { return iterator(data_ptr()); } + + constexpr const_iterator begin() const { return const_iterator(const_data_ptr()); } + + constexpr const_iterator cbegin() const { return const_iterator(const_data_ptr()); } + + iterator end() { return iterator(data_ptr() + storage_.size_); } + + constexpr const_iterator end() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + constexpr const_iterator cend() const { + return const_iterator(const_data_ptr() + storage_.size_); + } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + + constexpr const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + constexpr const_reverse_iterator crbegin() const { + return const_reverse_iterator(end()); + } + + reverse_iterator rend() { return reverse_iterator(begin()); } + + constexpr const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + constexpr const_reverse_iterator crend() const { + return const_reverse_iterator(begin()); + } + + // Mutations + + void reserve(size_t n) { storage_.reserve(n); } + + void clear() { storage_.clear(); } + + void push_back(const T& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(value); + } + + void push_back(T&& value) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::move(value)); + } + + template + void emplace_back(Args&&... args) { + storage_.bump_size(1); + storage_.storage_ptr()[storage_.size_ - 1].construct(std::forward(args)...); + } + + template + iterator insert(const_iterator insert_at, InputIt first, InputIt last) { + const size_t n = storage_.size_; + const size_t it_size = static_cast(last - first); // XXX might be O(n)? + const size_t pos = static_cast(insert_at - const_data_ptr()); + storage_.bump_size(it_size); + auto* p = storage_.storage_ptr(); + if (it_size == 0) { + return p[pos].get(); + } + const size_t end_pos = pos + it_size; + + // Move [pos; n) to [end_pos; end_pos + n - pos) + size_t i = n; + size_t j = end_pos + n - pos; + while (j > std::max(n, end_pos)) { + p[--j].move_construct(&p[--i]); + } + while (j > end_pos) { + p[--j].move_assign(&p[--i]); + } + assert(j == end_pos); + // Copy [first; last) to [pos; end_pos) + j = pos; + while (j < std::min(n, end_pos)) { + p[j++].assign(*first++); + } + while (j < end_pos) { + p[j++].construct(*first++); + } + assert(first == last); + return p[pos].get(); + } + + void resize(size_t n) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(T{}); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + void resize(size_t n, const T& value) { + const size_t old_size = storage_.size_; + if (n > storage_.size_) { + storage_.bump_size(n - old_size); + auto* p = storage_.storage_ptr(); + for (size_t i = old_size; i < n; ++i) { + p[i].construct(value); + } + } else { + auto* p = storage_.storage_ptr(); + for (size_t i = n; i < old_size; ++i) { + p[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + private: + template + void init_by_copying(size_t n, InputIt src) { + storage_.bump_size(n); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } + + template + void init_by_moving(size_t n, InputIt src) { + init_by_copying(n, std::make_move_iterator(src)); + } + + template + void assign_by_copying(size_t n, InputIt src) { + const size_t old_size = storage_.size_; + if (n > old_size) { + storage_.bump_size(n - old_size); + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < old_size; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = old_size; i < n; ++i, ++src) { + dest[i].construct(*src); + } + } else { + auto* dest = storage_.storage_ptr(); + for (size_t i = 0; i < n; ++i, ++src) { + dest[i].assign(*src); + } + for (size_t i = n; i < old_size; ++i) { + dest[i].destroy(); + } + storage_.reduce_size(old_size - n); + } + } + + template + void assign_by_moving(size_t n, InputIt src) { + assign_by_copying(n, std::make_move_iterator(src)); + } +}; + +template +using StaticVector = StaticVectorImpl>; + +template +using SmallVector = StaticVectorImpl>; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h new file mode 100644 index 0000000000000000000000000000000000000000..cdffe0b2317e5ba555c37ec16e5294bc912a49d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/sort.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace arrow { +namespace internal { + +template > +std::vector ArgSort(const std::vector& values, Cmp&& cmp = {}) { + std::vector indices(values.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), + [&](int64_t i, int64_t j) -> bool { return cmp(values[i], values[j]); }); + return indices; +} + +template +size_t Permute(const std::vector& indices, std::vector* values) { + if (indices.size() <= 1) { + return indices.size(); + } + + // mask indicating which of values are in the correct location + std::vector sorted(indices.size(), false); + + size_t cycle_count = 0; + + for (auto cycle_start = sorted.begin(); cycle_start != sorted.end(); + cycle_start = std::find(cycle_start, sorted.end(), false)) { + ++cycle_count; + + // position in which an element belongs WRT sort + auto sort_into = static_cast(cycle_start - sorted.begin()); + + if (indices[sort_into] == sort_into) { + // trivial cycle + sorted[sort_into] = true; + continue; + } + + // resolve this cycle + const auto end = sort_into; + for (int64_t take_from = indices[sort_into]; take_from != end; + take_from = indices[sort_into]) { + std::swap(values->at(sort_into), values->at(take_from)); + sorted[sort_into] = true; + sort_into = take_from; + } + sorted[sort_into] = true; + } + + return cycle_count; +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h new file mode 100644 index 0000000000000000000000000000000000000000..8265e1d22ae0e78d7343b2fce6a0de4bc669ccc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/spaced.h @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/bit_run_reader.h" + +namespace arrow { +namespace util { +namespace internal { + +/// \brief Compress the buffer to spaced, excluding the null entries. +/// +/// \param[in] src the source buffer +/// \param[in] num_values the size of source buffer +/// \param[in] valid_bits bitmap data indicating position of valid slots +/// \param[in] valid_bits_offset offset into valid_bits +/// \param[out] output the output buffer spaced +/// \return The size of spaced buffer. +template +inline int SpacedCompress(const T* src, int num_values, const uint8_t* valid_bits, + int64_t valid_bits_offset, T* output) { + int num_valid_values = 0; + + arrow::internal::SetBitRunReader reader(valid_bits, valid_bits_offset, num_values); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + std::memcpy(output + num_valid_values, src + run.position, run.length * sizeof(T)); + num_valid_values += static_cast(run.length); + } + + return num_valid_values; +} + +/// \brief Relocate values in buffer into positions of non-null values as indicated by +/// a validity bitmap. +/// +/// \param[in, out] buffer the in-place buffer +/// \param[in] num_values total size of buffer including null slots +/// \param[in] null_count number of null slots +/// \param[in] valid_bits bitmap data indicating position of valid slots +/// \param[in] valid_bits_offset offset into valid_bits +/// \return The number of values expanded, including nulls. +template +inline int SpacedExpand(T* buffer, int num_values, int null_count, + const uint8_t* valid_bits, int64_t valid_bits_offset) { + // Point to end as we add the spacing from the back. + int idx_decode = num_values - null_count; + + // Depending on the number of nulls, some of the value slots in buffer may + // be uninitialized, and this will cause valgrind warnings / potentially UB + std::memset(static_cast(buffer + idx_decode), 0, null_count * sizeof(T)); + if (idx_decode == 0) { + // All nulls, nothing more to do + return num_values; + } + + arrow::internal::ReverseSetBitRunReader reader(valid_bits, valid_bits_offset, + num_values); + while (true) { + const auto run = reader.NextRun(); + if (run.length == 0) { + break; + } + idx_decode -= static_cast(run.length); + assert(idx_decode >= 0); + std::memmove(buffer + run.position, buffer + idx_decode, run.length * sizeof(T)); + } + + // Otherwise caller gave an incorrect null_count + assert(idx_decode == 0); + return num_values; +} + +} // namespace internal +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h new file mode 100644 index 0000000000000000000000000000000000000000..71cf9ed44890a78675e4187e03b4c01bff60ae54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/span.h @@ -0,0 +1,156 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +namespace arrow::util { + +template +class span; + +// This trait is used to check if a type R can be used to construct a span. +// Specifically, it checks if std::data(R) and std::size(R) are valid expressions +// that may be passed to the span(T*, size_t) constructor. The reason this trait +// is needed rather than expressing this directly in the relevant span constructor +// is that this check requires instantiating span, which would violate the +// C++ standard if written directly in the constructor's enable_if clause +// because span is an incomplete type at that point. By defining this trait +// instead, we add an extra level of indirection that lets us delay the +// evaluation of the template until the first time the associated constructor +// is actually called, at which point span is a complete type. +// +// Note that most compilers do support the noncompliant construct, but nvcc +// does not. See https://github.com/apache/arrow/issues/40252 +template +struct ConstructibleFromDataAndSize : std::false_type {}; + +template +struct ConstructibleFromDataAndSize< + span, R, + std::void_t{std::data(std::declval()), + std::size(std::declval())})>> : std::true_type {}; + +/// std::span polyfill. +/// +/// Does not support static extents. +template +class span { + static_assert(sizeof(T), + R"( +std::span allows contiguous_iterators instead of just pointers, the enforcement +of which requires T to be a complete type. arrow::util::span does not support +contiguous_iterators, but T is still required to be a complete type to prevent +writing code which would break when it is replaced by std::span.)"); + + public: + using element_type = T; + using value_type = std::remove_cv_t; + using iterator = T*; + using const_iterator = T const*; + + span() = default; + span(const span&) = default; + span& operator=(const span&) = default; + + template >> + // NOLINTNEXTLINE runtime/explicit + constexpr span(span mut) : span{mut.data(), mut.size()} {} + + constexpr span(T* data, size_t count) : data_{data}, size_{count} {} + + constexpr span(T* begin, T* end) + : data_{begin}, size_{static_cast(end - begin)} {} + + template < + typename R, + std::enable_if_t, R>::value, bool> = true, + typename DisableUnlessSimilarTypes = std::enable_if_t()))>>, + std::decay_t>>> + // NOLINTNEXTLINE runtime/explicit, non-const reference + constexpr span(R&& range) : span{std::data(range), std::size(range)} {} + + constexpr T* begin() const { return data_; } + constexpr T* end() const { return data_ + size_; } + constexpr T* data() const { return data_; } + + constexpr size_t size() const { return size_; } + constexpr size_t size_bytes() const { return size_ * sizeof(T); } + constexpr bool empty() const { return size_ == 0; } + + constexpr T& operator[](size_t i) { return data_[i]; } + constexpr const T& operator[](size_t i) const { return data_[i]; } + + constexpr span subspan(size_t offset) const { + if (offset > size_) return {data_, data_}; + return {data_ + offset, size_ - offset}; + } + + constexpr span subspan(size_t offset, size_t count) const { + auto out = subspan(offset); + if (count < out.size_) { + out.size_ = count; + } + return out; + } + + constexpr bool operator==(span const& other) const { + if (size_ != other.size_) return false; + + if constexpr (std::is_integral_v) { + if (size_ == 0) { + return true; // memcmp does not handle null pointers, even if size_ == 0 + } + return std::memcmp(data_, other.data_, size_bytes()) == 0; + } else { + T* ptr = data_; + for (T const& e : other) { + if (*ptr++ != e) return false; + } + return true; + } + } + constexpr bool operator!=(span const& other) const { return !(*this == other); } + + private: + T* data_{}; + size_t size_{}; +}; + +template +span(R& range) -> span>; + +template +span(T*, size_t) -> span; + +template +constexpr span as_bytes(span s) { + return {reinterpret_cast(s.data()), s.size_bytes()}; +} + +template +constexpr span as_writable_bytes(span s) { + return {reinterpret_cast(s.data()), s.size_bytes()}; +} + +} // namespace arrow::util diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db4e67f59ed6e3afb5c90cb758b7998dd9d510f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/stopwatch.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace internal { + +class StopWatch { + // This clock should give us wall clock time + using ClockType = std::chrono::steady_clock; + + public: + StopWatch() {} + + void Start() { start_ = ClockType::now(); } + + // Returns time in nanoseconds. + uint64_t Stop() { + auto stop = ClockType::now(); + std::chrono::nanoseconds d = stop - start_; + assert(d.count() >= 0); + return static_cast(d.count()); + } + + private: + std::chrono::time_point start_; +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..511daed1ecaac688b6d444349bf1c63fb6c53ad6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/test_common.h @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/iterator.h" + +namespace arrow { + +struct TestInt { + TestInt(); + TestInt(int i); // NOLINT runtime/explicit + int value; + + bool operator==(const TestInt& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestInt& v); +}; + +template <> +struct IterationTraits { + static TestInt End() { return TestInt(); } + static bool IsEnd(const TestInt& val) { return val == IterationTraits::End(); } +}; + +struct TestStr { + TestStr(); + TestStr(const std::string& s); // NOLINT runtime/explicit + TestStr(const char* s); // NOLINT runtime/explicit + explicit TestStr(const TestInt& test_int); + std::string value; + + bool operator==(const TestStr& other) const; + + friend std::ostream& operator<<(std::ostream& os, const TestStr& v); +}; + +template <> +struct IterationTraits { + static TestStr End() { return TestStr(); } + static bool IsEnd(const TestStr& val) { return val == IterationTraits::End(); } +}; + +std::vector RangeVector(unsigned int max, unsigned int step = 1); + +template +inline Iterator VectorIt(std::vector v) { + return MakeVectorIterator(std::move(v)); +} + +template +inline Iterator PossiblySlowVectorIt(std::vector v, bool slow = false) { + auto iterator = MakeVectorIterator(std::move(v)); + if (slow) { + return MakeTransformedIterator(std::move(iterator), + [](T item) -> Result> { + SleepABit(); + return TransformYield(item); + }); + } else { + return iterator; + } +} + +template +inline void AssertIteratorExhausted(Iterator& it) { + ASSERT_OK_AND_ASSIGN(T next, it.Next()); + ASSERT_TRUE(IsIterationEnd(next)); +} + +Transformer MakeFilter(std::function filter); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..44b1e227b0e5fac7ed104df5c487bdc223e44f26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/thread_pool.h @@ -0,0 +1,620 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/cancel.h" +#include "arrow/util/config.h" +#include "arrow/util/functional.h" +#include "arrow/util/future.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +#if defined(_MSC_VER) +// Disable harmless warning for decorated name length limit +#pragma warning(disable : 4503) +#endif + +namespace arrow { + +/// \brief Get the capacity of the global thread pool +/// +/// Return the number of worker threads in the thread pool to which +/// Arrow dispatches various CPU-bound tasks. This is an ideal number, +/// not necessarily the exact number of threads at a given point in time. +/// +/// You can change this number using SetCpuThreadPoolCapacity(). +ARROW_EXPORT int GetCpuThreadPoolCapacity(); + +/// \brief Set the capacity of the global thread pool +/// +/// Set the number of worker threads int the thread pool to which +/// Arrow dispatches various CPU-bound tasks. +/// +/// The current number is returned by GetCpuThreadPoolCapacity(). +ARROW_EXPORT Status SetCpuThreadPoolCapacity(int threads); + +namespace internal { + +// Hints about a task that may be used by an Executor. +// They are ignored by the provided ThreadPool implementation. +struct TaskHints { + // The lower, the more urgent + int32_t priority = 0; + // The IO transfer size in bytes + int64_t io_size = -1; + // The approximate CPU cost in number of instructions + int64_t cpu_cost = -1; + // An application-specific ID + int64_t external_id = -1; +}; + +class ARROW_EXPORT Executor { + public: + using StopCallback = internal::FnOnce; + + virtual ~Executor(); + + // Spawn a fire-and-forget task. + template + Status Spawn(Function&& func) { + return SpawnReal(TaskHints{}, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(Function&& func, StopToken stop_token) { + return SpawnReal(TaskHints{}, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func) { + return SpawnReal(hints, std::forward(func), StopToken::Unstoppable(), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + StopCallback{}); + } + template + Status Spawn(TaskHints hints, Function&& func, StopToken stop_token, + StopCallback stop_callback) { + return SpawnReal(hints, std::forward(func), std::move(stop_token), + std::move(stop_callback)); + } + + // Transfers a future to this executor. Any continuations added to the + // returned future will run in this executor. Otherwise they would run + // on the same thread that called MarkFinished. + // + // This is necessary when (for example) an I/O task is completing a future. + // The continuations of that future should run on the CPU thread pool keeping + // CPU heavy work off the I/O thread pool. So the I/O task should transfer + // the future to the CPU executor before returning. + // + // By default this method will only transfer if the future is not already completed. If + // the future is already completed then any callback would be run synchronously and so + // no transfer is typically necessary. However, in cases where you want to force a + // transfer (e.g. to help the scheduler break up units of work across multiple cores) + // then you can override this behavior with `always_transfer`. + template + Future Transfer(Future future) { + return DoTransfer(std::move(future), false); + } + + // Overload of Transfer which will always schedule callbacks on new threads even if the + // future is finished when the callback is added. + // + // This can be useful in cases where you want to ensure parallelism + template + Future TransferAlways(Future future) { + return DoTransfer(std::move(future), true); + } + + // Submit a callable and arguments for execution. Return a future that + // will return the callable's result value once. + // The callable's arguments are copied before execution. + template > + Result Submit(TaskHints hints, StopToken stop_token, Function&& func, + Args&&... args) { + using ValueType = typename FutureType::ValueType; + + auto future = FutureType::Make(); + auto task = std::bind(::arrow::detail::ContinueFuture{}, future, + std::forward(func), std::forward(args)...); + struct { + WeakFuture weak_fut; + + void operator()(const Status& st) { + auto fut = weak_fut.get(); + if (fut.is_valid()) { + fut.MarkFinished(st); + } + } + } stop_callback{WeakFuture(future)}; + ARROW_RETURN_NOT_OK(SpawnReal(hints, std::move(task), std::move(stop_token), + std::move(stop_callback))); + + return future; + } + + template > + Result Submit(StopToken stop_token, Function&& func, Args&&... args) { + return Submit(TaskHints{}, stop_token, std::forward(func), + std::forward(args)...); + } + + template > + Result Submit(TaskHints hints, Function&& func, Args&&... args) { + return Submit(std::move(hints), StopToken::Unstoppable(), + std::forward(func), std::forward(args)...); + } + + template > + Result Submit(Function&& func, Args&&... args) { + return Submit(TaskHints{}, StopToken::Unstoppable(), std::forward(func), + std::forward(args)...); + } + + // Return the level of parallelism (the number of tasks that may be executed + // concurrently). This may be an approximate number. + virtual int GetCapacity() = 0; + + // Return true if the thread from which this function is called is owned by this + // Executor. Returns false if this Executor does not support this property. + virtual bool OwnsThisThread() { return false; } + + // Return true if this is the current executor being called + // n.b. this defaults to just calling OwnsThisThread + // unless the threadpool is disabled + virtual bool IsCurrentExecutor() { return OwnsThisThread(); } + + /// \brief An interface to represent something with a custom destructor + /// + /// \see KeepAlive + class ARROW_EXPORT Resource { + public: + virtual ~Resource() = default; + }; + + /// \brief Keep a resource alive until all executor threads have terminated + /// + /// Executors may have static storage duration. In particular, the CPU and I/O + /// executors are currently implemented this way. These threads may access other + /// objects with static storage duration such as the OpenTelemetry runtime context + /// the default memory pool, or other static executors. + /// + /// The order in which these objects are destroyed is difficult to control. In order + /// to ensure those objects remain alive until all threads have finished those objects + /// should be wrapped in a Resource object and passed into this method. The given + /// shared_ptr will be kept alive until all threads have finished their worker loops. + virtual void KeepAlive(std::shared_ptr resource); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Executor); + + Executor() = default; + + template , typename FTSync = typename FT::SyncType> + Future DoTransfer(Future future, bool always_transfer = false) { + auto transferred = Future::Make(); + if (always_transfer) { + CallbackOptions callback_options = CallbackOptions::Defaults(); + callback_options.should_schedule = ShouldSchedule::Always; + callback_options.executor = this; + auto sync_callback = [transferred](const FTSync& result) mutable { + transferred.MarkFinished(result); + }; + future.AddCallback(sync_callback, callback_options); + return transferred; + } + + // We could use AddCallback's ShouldSchedule::IfUnfinished but we can save a bit of + // work by doing the test here. + auto callback = [this, transferred](const FTSync& result) mutable { + auto spawn_status = + Spawn([transferred, result]() mutable { transferred.MarkFinished(result); }); + if (!spawn_status.ok()) { + transferred.MarkFinished(spawn_status); + } + }; + auto callback_factory = [&callback]() { return callback; }; + if (future.TryAddCallback(callback_factory)) { + return transferred; + } + // If the future is already finished and we aren't going to force spawn a thread + // then we don't need to add another layer of callback and can return the original + // future + return future; + } + + // Subclassing API + virtual Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) = 0; +}; + +/// \brief An executor implementation that runs all tasks on a single thread using an +/// event loop. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT SerialExecutor : public Executor { + public: + template + using TopLevelTask = internal::FnOnce(Executor*)>; + + ~SerialExecutor() override; + + int GetCapacity() override { return 1; }; + bool OwnsThisThread() override; + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + /// \brief Runs the TopLevelTask and any scheduled tasks + /// + /// The TopLevelTask (or one of the tasks it schedules) must either return an invalid + /// status or call the finish signal. Failure to do this will result in a deadlock. For + /// this reason it is preferable (if possible) to use the helper methods (below) + /// RunSynchronously/RunSerially which delegates the responsibility onto a Future + /// producer's existing responsibility to always mark a future finished (which can + /// someday be aided by ARROW-12207). + template , + typename FTSync = typename FT::SyncType> + static FTSync RunInSerialExecutor(TopLevelTask initial_task) { + Future fut = SerialExecutor().Run(std::move(initial_task)); + return FutureToSync(fut); + } + + /// \brief Transform an AsyncGenerator into an Iterator + /// + /// An event loop will be created and each call to Next will power the event loop with + /// the calling thread until the next item is ready to be delivered. + /// + /// Note: The iterator's destructor will run until the given generator is fully + /// exhausted. If you wish to abandon iteration before completion then the correct + /// approach is to use a stop token to cause the generator to exhaust early. + template + static Iterator IterateGenerator( + internal::FnOnce()>>(Executor*)> initial_task) { + auto serial_executor = std::unique_ptr(new SerialExecutor()); + auto maybe_generator = std::move(initial_task)(serial_executor.get()); + if (!maybe_generator.ok()) { + return MakeErrorIterator(maybe_generator.status()); + } + auto generator = maybe_generator.MoveValueUnsafe(); + struct SerialIterator { + SerialIterator(std::unique_ptr executor, + std::function()> generator) + : executor(std::move(executor)), generator(std::move(generator)) {} + ARROW_DISALLOW_COPY_AND_ASSIGN(SerialIterator); + ARROW_DEFAULT_MOVE_AND_ASSIGN(SerialIterator); + ~SerialIterator() { + // A serial iterator must be consumed before it can be destroyed. Allowing it to + // do otherwise would lead to resource leakage. There will likely be deadlocks at + // this spot in the future but these will be the result of other bugs and not the + // fact that we are forcing consumption here. + + // If a streaming API needs to support early abandonment then it should be done so + // with a cancellation token and not simply discarding the iterator and expecting + // the underlying work to clean up correctly. + if (executor && !executor->IsFinished()) { + while (true) { + Result maybe_next = Next(); + if (!maybe_next.ok() || IsIterationEnd(*maybe_next)) { + break; + } + } + } + } + + Result Next() { + executor->Unpause(); + // This call may lead to tasks being scheduled in the serial executor + Future next_fut = generator(); + next_fut.AddCallback([this](const Result& res) { + // If we're done iterating we should drain the rest of the tasks in the executor + if (!res.ok() || IsIterationEnd(*res)) { + executor->Finish(); + return; + } + // Otherwise we will break out immediately, leaving the remaining tasks for + // the next call. + executor->Pause(); + }); +#ifdef ARROW_ENABLE_THREADING + // future must run on this thread + // Borrow this thread and run tasks until the future is finished + executor->RunLoop(); +#else + next_fut.Wait(); +#endif + if (!next_fut.is_finished()) { + // Not clear this is possible since RunLoop wouldn't generally exit + // unless we paused/finished which would imply next_fut has been + // finished. + return Status::Invalid( + "Serial executor terminated before next result computed"); + } + // At this point we may still have tasks in the executor, that is ok. + // We will run those tasks the next time through. + return next_fut.result(); + } + + std::unique_ptr executor; + std::function()> generator; + }; + return Iterator(SerialIterator{std::move(serial_executor), std::move(generator)}); + } + +#ifndef ARROW_ENABLE_THREADING + // run a pending task from loop + // returns true if any tasks were run in the last go round the loop (i.e. if it + // returns false, all executors are waiting) + static bool RunTasksOnAllExecutors(); + static SerialExecutor* GetCurrentExecutor(); + + bool IsCurrentExecutor() override; + +#endif + + protected: + virtual void RunLoop(); + + // State uses mutex + struct State; + std::shared_ptr state_; + + SerialExecutor(); + + // We mark the serial executor "finished" when there should be + // no more tasks scheduled on it. It's not strictly needed but + // can help catch bugs where we are trying to use the executor + // after we are done with it. + void Finish(); + bool IsFinished(); + // We pause the executor when we are running an async generator + // and we have received an item that we can deliver. + void Pause(); + void Unpause(); + + template ::SyncType> + Future Run(TopLevelTask initial_task) { + auto final_fut = std::move(initial_task)(this); + final_fut.AddCallback([this](const FTSync&) { Finish(); }); + RunLoop(); + return final_fut; + } + +#ifndef ARROW_ENABLE_THREADING + // we have to run tasks from all live executors + // during RunLoop if we don't have threading + static std::unordered_set all_executors; + // a pointer to the last one called by the loop + // so all tasks get spawned equally + // on multiple calls to RunTasksOnAllExecutors + static SerialExecutor* last_called_executor; + // without threading we can't tell which executor called the + // current process - so we set it in spawning the task + static SerialExecutor* current_executor; +#endif // ARROW_ENABLE_THREADING +}; + +#ifdef ARROW_ENABLE_THREADING + +/// An Executor implementation spawning tasks in FIFO manner on a fixed-size +/// pool of worker threads. +/// +/// Note: Any sort of nested parallelism will deadlock this executor. Blocking waits are +/// fine but if one task needs to wait for another task it must be expressed as an +/// asynchronous continuation. +class ARROW_EXPORT ThreadPool : public Executor { + public: + // Construct a thread pool with the given number of worker threads + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + // Return the number of tasks either running or in the queue. + int GetNumTasks(); + + bool OwnsThisThread() override; + // Dynamically change the number of worker threads. + // + // This function always returns immediately. + // If fewer threads are running than this number, new threads are spawned + // on-demand when needed for task execution. + // If more threads are running than this number, excess threads are reaped + // as soon as possible. + Status SetCapacity(int threads); + + // Heuristic for the default capacity of a thread pool for CPU-bound tasks. + // This is exposed as a static method to help with testing. + static int DefaultCapacity(); + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + void KeepAlive(std::shared_ptr resource) override; + + struct State; + + protected: + FRIEND_TEST(TestThreadPool, SetCapacity); + FRIEND_TEST(TestGlobalThreadPool, Capacity); + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + ThreadPool(); + + Status SpawnReal(TaskHints hints, FnOnce task, StopToken, + StopCallback&&) override; + + // Collect finished worker threads, making sure the OS threads have exited + void CollectFinishedWorkersUnlocked(); + // Launch a given number of additional workers + void LaunchWorkersUnlocked(int threads); + // Get the current actual capacity + int GetActualCapacity(); + + static std::shared_ptr MakeCpuThreadPool(); + + std::shared_ptr sp_state_; + State* state_; + bool shutdown_on_destroy_; +}; +#else // ARROW_ENABLE_THREADING +// an executor implementation which pretends to be a thread pool but runs everything +// on the main thread using a static queue (shared between all thread pools, otherwise +// cross-threadpool dependencies will break everything) +class ARROW_EXPORT ThreadPool : public SerialExecutor { + public: + ARROW_FRIEND_EXPORT friend ThreadPool* GetCpuThreadPool(); + + static Result> Make(int threads); + + // Like Make(), but takes care that the returned ThreadPool is compatible + // with destruction late at process exit. + static Result> MakeEternal(int threads); + + // Destroy thread pool; the pool will first be shut down + ~ThreadPool() override; + + // Return the desired number of worker threads. + // The actual number of workers may lag a bit before being adjusted to + // match this value. + int GetCapacity() override; + + virtual int GetActualCapacity(); + + bool OwnsThisThread() override { return true; } + + // Dynamically change the number of worker threads. + // without threading this is equal to the + // number of tasks that can be running at once + // (inside each other) + Status SetCapacity(int threads); + + static int DefaultCapacity() { return 8; } + + // Shutdown the pool. Once the pool starts shutting down, new tasks + // cannot be submitted anymore. + // If "wait" is true, shutdown waits for all pending tasks to be finished. + // If "wait" is false, workers are stopped as soon as currently executing + // tasks are finished. + Status Shutdown(bool wait = true); + + // Wait for the thread pool to become idle + // + // This is useful for sequencing tests + void WaitForIdle(); + + protected: + static std::shared_ptr MakeCpuThreadPool(); + ThreadPool(); +}; + +#endif // ARROW_ENABLE_THREADING + +// Return the process-global thread pool for CPU-bound tasks. +ARROW_EXPORT ThreadPool* GetCpuThreadPool(); + +/// \brief Potentially run an async operation serially (if use_threads is false) +/// \see RunSerially +/// +/// If `use_threads` is true, the global CPU executor is used. +/// If `use_threads` is false, a temporary SerialExecutor is used. +/// `get_future` is called (from this thread) with the chosen executor and must +/// return a future that will eventually finish. This function returns once the +/// future has finished. +template +typename Fut::SyncType RunSynchronously(FnOnce get_future, + bool use_threads) { + if (use_threads) { + auto fut = std::move(get_future)(GetCpuThreadPool()); + return FutureToSync(fut); + } else { + return SerialExecutor::RunInSerialExecutor(std::move(get_future)); + } +} + +/// \brief Potentially iterate an async generator serially (if use_threads is false) +/// \see IterateGenerator +/// +/// If `use_threads` is true, the global CPU executor will be used. Each call to +/// the iterator will simply wait until the next item is available. Tasks may run in +/// the background between calls. +/// +/// If `use_threads` is false, the calling thread only will be used. Each call to +/// the iterator will use the calling thread to do enough work to generate one item. +/// Tasks will be left in a queue until the next call and no work will be done between +/// calls. +template +Iterator IterateSynchronously( + FnOnce()>>(Executor*)> get_gen, bool use_threads) { + if (use_threads) { + auto maybe_gen = std::move(get_gen)(GetCpuThreadPool()); + if (!maybe_gen.ok()) { + return MakeErrorIterator(maybe_gen.status()); + } + return MakeGeneratorIterator(*maybe_gen); + } else { + return SerialExecutor::IterateGenerator(std::move(get_gen)); + } +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/time.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/time.h new file mode 100644 index 0000000000000000000000000000000000000000..981eab59676ada65656a6c5dbfbe2c26b332d804 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/time.h @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +enum DivideOrMultiply { + MULTIPLY, + DIVIDE, +}; + +ARROW_EXPORT +std::pair GetTimestampConversion(TimeUnit::type in_unit, + TimeUnit::type out_unit); + +// Converts a Timestamp value into another Timestamp value. +// +// This function takes care of properly transforming from one unit to another. +// +// \param[in] in the input type. Must be TimestampType. +// \param[in] out the output type. Must be TimestampType. +// \param[in] value the input value. +// +// \return The converted value, or an error. +ARROW_EXPORT Result ConvertTimestampValue(const std::shared_ptr& in, + const std::shared_ptr& out, + int64_t value); + +template +decltype(std::declval()(std::chrono::seconds{}, std::declval()...)) +VisitDuration(TimeUnit::type unit, Visitor&& visitor, Args&&... args) { + switch (unit) { + default: + case TimeUnit::SECOND: + break; + case TimeUnit::MILLI: + return visitor(std::chrono::milliseconds{}, std::forward(args)...); + case TimeUnit::MICRO: + return visitor(std::chrono::microseconds{}, std::forward(args)...); + case TimeUnit::NANO: + return visitor(std::chrono::nanoseconds{}, std::forward(args)...); + } + return visitor(std::chrono::seconds{}, std::forward(args)...); +} + +/// Convert a count of seconds to the corresponding count in a different TimeUnit +struct CastSecondsToUnitImpl { + template + int64_t operator()(Duration, int64_t seconds) { + auto duration = std::chrono::duration_cast(std::chrono::seconds{seconds}); + return static_cast(duration.count()); + } +}; + +inline int64_t CastSecondsToUnit(TimeUnit::type unit, int64_t seconds) { + return VisitDuration(unit, CastSecondsToUnitImpl{}, seconds); +} + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h new file mode 100644 index 0000000000000000000000000000000000000000..d7808256418eef0faaf54a189d11c6896583d68b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/tracing.h @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { +namespace tracing { + +class ARROW_EXPORT SpanDetails { + public: + virtual ~SpanDetails() {} +}; + +class ARROW_EXPORT Span { + public: + Span() noexcept; + /// True if this span has been started with START_SPAN + bool valid() const; + /// End the span early + void reset(); + std::unique_ptr details; +}; + +} // namespace tracing +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h new file mode 100644 index 0000000000000000000000000000000000000000..7815d4d1ecc1d66ba20c45eddb6c626833aa54e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/trie.h @@ -0,0 +1,243 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace internal { + +// A non-zero-terminated small string class. +// std::string usually has a small string optimization +// (see review at https://shaharmike.com/cpp/std-string/) +// but this one allows tight control and optimization of memory layout. +template +class SmallString { + public: + SmallString() : length_(0) {} + + template + SmallString(const T& v) { // NOLINT implicit constructor + *this = std::string_view(v); + } + + SmallString& operator=(const std::string_view s) { +#ifndef NDEBUG + CheckSize(s.size()); +#endif + length_ = static_cast(s.size()); + std::memcpy(data_, s.data(), length_); + return *this; + } + + SmallString& operator=(const std::string& s) { + *this = std::string_view(s); + return *this; + } + + SmallString& operator=(const char* s) { + *this = std::string_view(s); + return *this; + } + + explicit operator std::string_view() const { return std::string_view(data_, length_); } + + const char* data() const { return data_; } + size_t length() const { return length_; } + bool empty() const { return length_ == 0; } + char operator[](size_t pos) const { +#ifdef NDEBUG + assert(pos <= length_); +#endif + return data_[pos]; + } + + SmallString substr(size_t pos) const { + return SmallString(std::string_view(*this).substr(pos)); + } + + SmallString substr(size_t pos, size_t count) const { + return SmallString(std::string_view(*this).substr(pos, count)); + } + + template + bool operator==(T&& other) const { + return std::string_view(*this) == std::string_view(std::forward(other)); + } + + template + bool operator!=(T&& other) const { + return std::string_view(*this) != std::string_view(std::forward(other)); + } + + protected: + uint8_t length_; + char data_[N]; + + void CheckSize(size_t n) { assert(n <= N); } +}; + +template +std::ostream& operator<<(std::ostream& os, const SmallString& str) { + return os << std::string_view(str); +} + +// A trie class for byte strings, optimized for small sets of short strings. +// This class is immutable by design, use a TrieBuilder to construct it. +class ARROW_EXPORT Trie { + using index_type = int16_t; + using fast_index_type = int_fast16_t; + static constexpr auto kMaxIndex = std::numeric_limits::max(); + + public: + Trie() : size_(0) {} + Trie(Trie&&) = default; + Trie& operator=(Trie&&) = default; + + int32_t Find(std::string_view s) const { + const Node* node = &nodes_[0]; + fast_index_type pos = 0; + if (s.length() > static_cast(kMaxIndex)) { + return -1; + } + fast_index_type remaining = static_cast(s.length()); + + while (remaining > 0) { + auto substring_length = node->substring_length(); + if (substring_length > 0) { + auto substring_data = node->substring_data(); + if (remaining < substring_length) { + // Input too short + return -1; + } + for (fast_index_type i = 0; i < substring_length; ++i) { + if (s[pos++] != substring_data[i]) { + // Mismatching substring + return -1; + } + --remaining; + } + if (remaining == 0) { + // Matched node exactly + return node->found_index_; + } + } + // Lookup child using next input character + if (node->child_lookup_ == -1) { + // Input too long + return -1; + } + auto c = static_cast(s[pos++]); + --remaining; + auto child_index = lookup_table_[node->child_lookup_ * 256 + c]; + if (child_index == -1) { + // Child not found + return -1; + } + node = &nodes_[child_index]; + } + + // Input exhausted + if (node->substring_.empty()) { + // Matched node exactly + return node->found_index_; + } else { + return -1; + } + } + + Status Validate() const; + + void Dump() const; + + protected: + static constexpr size_t kNodeSize = 16; + static constexpr auto kMaxSubstringLength = + kNodeSize - 2 * sizeof(index_type) - sizeof(int8_t); + + struct Node { + // If this node is a valid end of string, index of found string, otherwise -1 + index_type found_index_; + // Base index for child lookup in lookup_table_ (-1 if no child nodes) + index_type child_lookup_; + // The substring for this node. + SmallString substring_; + + fast_index_type substring_length() const { + return static_cast(substring_.length()); + } + const char* substring_data() const { return substring_.data(); } + }; + + static_assert(sizeof(Node) == kNodeSize, "Unexpected node size"); + + ARROW_DISALLOW_COPY_AND_ASSIGN(Trie); + + void Dump(const Node* node, const std::string& indent) const; + + // Node table: entry 0 is the root node + std::vector nodes_; + + // Indexed lookup structure: gives index in node table, or -1 if not found + std::vector lookup_table_; + + // Number of entries + index_type size_; + + friend class TrieBuilder; +}; + +class ARROW_EXPORT TrieBuilder { + using index_type = Trie::index_type; + using fast_index_type = Trie::fast_index_type; + + public: + TrieBuilder(); + Status Append(std::string_view s, bool allow_duplicate = false); + Trie Finish(); + + protected: + // Extend the lookup table by 256 entries, return the index of the new span + Status ExtendLookupTable(index_type* out_lookup_index); + // Split the node given by the index at the substring index `split_at` + Status SplitNode(fast_index_type node_index, fast_index_type split_at); + // Append an already constructed child node to the parent + Status AppendChildNode(Trie::Node* parent, uint8_t ch, Trie::Node&& node); + // Create a matching child node from this parent + Status CreateChildNode(Trie::Node* parent, uint8_t ch, std::string_view substring); + Status CreateChildNode(Trie::Node* parent, char ch, std::string_view substring); + + Trie trie_; + + static constexpr auto kMaxIndex = std::numeric_limits::max(); +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..3174881f4d018c6193ff5c12a7d308e39ed75561 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_fwd.h @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { + +namespace internal { +struct Empty; +} // namespace internal + +template +class WeakFuture; +class FutureWaiter; + +class TimestampParser; + +namespace internal { + +class Executor; +class TaskGroup; +class ThreadPool; +class CpuInfo; + +namespace tracing { + +struct Scope; + +} // namespace tracing +} // namespace internal + +struct Compression { + /// \brief Compression algorithm + enum type { + UNCOMPRESSED, + SNAPPY, + GZIP, + BROTLI, + ZSTD, + LZ4, + LZ4_FRAME, + LZO, + BZ2, + LZ4_HADOOP + }; +}; + +namespace util { +class AsyncTaskScheduler; +class Compressor; +class Decompressor; +class Codec; +class Uri; +} // namespace util + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..c1906152423c97e11ef9f577f46c7f4d4d124597 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/type_traits.h @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace internal { + +/// \brief Metafunction to allow checking if a type matches any of another set of types +template +struct IsOneOf : std::false_type {}; /// Base case: nothing has matched + +template +struct IsOneOf { + /// Recursive case: T == U or T matches any other types provided (not including U). + static constexpr bool value = std::is_same::value || IsOneOf::value; +}; + +/// \brief Shorthand for using IsOneOf + std::enable_if +template +using EnableIfIsOneOf = typename std::enable_if::value, T>::type; + +/// \brief is_null_pointer from C++17 +template +struct is_null_pointer : std::is_same::type> { +}; + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h new file mode 100644 index 0000000000000000000000000000000000000000..900d8011dfd69506ec7ee546f6f32109c448e5f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/ubsan.h @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Contains utilities for making UBSan happy. + +#pragma once + +#include +#include +#include + +#include "arrow/util/macros.h" + +namespace arrow { +namespace util { + +namespace internal { + +constexpr uint8_t kNonNullFiller = 0; + +} // namespace internal + +/// \brief Returns maybe_null if not null or a non-null pointer to an arbitrary memory +/// that shouldn't be dereferenced. +/// +/// Memset/Memcpy are undefined when a nullptr is passed as an argument use this utility +/// method to wrap locations where this could happen. +/// +/// Note: Flatbuffers has UBSan warnings if a zero length vector is passed. +/// https://github.com/google/flatbuffers/pull/5355 is trying to resolve +/// them. +template +inline T* MakeNonNull(T* maybe_null = NULLPTR) { + if (ARROW_PREDICT_TRUE(maybe_null != NULLPTR)) { + return maybe_null; + } + + return const_cast(reinterpret_cast(&internal::kNonNullFiller)); +} + +template +inline std::enable_if_t, T> SafeLoadAs( + const uint8_t* unaligned) { + std::remove_const_t ret; + std::memcpy(&ret, unaligned, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t, T> SafeLoad(const T* unaligned) { + std::remove_const_t ret; + std::memcpy(&ret, unaligned, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t && + std::is_trivially_copyable_v && sizeof(T) == sizeof(U), + U> +SafeCopy(T value) { + std::remove_const_t ret; + std::memcpy(&ret, &value, sizeof(T)); + return ret; +} + +template +inline std::enable_if_t, void> SafeStore(void* unaligned, + T value) { + std::memcpy(unaligned, &value, sizeof(T)); +} + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/union_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/union_util.h new file mode 100644 index 0000000000000000000000000000000000000000..0f30d5a32781924a3c64904a203a03d9d3d48d79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/union_util.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include "arrow/array/data.h" + +namespace arrow { +namespace union_util { + +/// \brief Compute the number of of logical nulls in a sparse union array +int64_t LogicalSparseUnionNullCount(const ArraySpan& span); + +/// \brief Compute the number of of logical nulls in a dense union array +int64_t LogicalDenseUnionNullCount(const ArraySpan& span); + +} // namespace union_util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h new file mode 100644 index 0000000000000000000000000000000000000000..d2e383e714b3eb8e0a0b6a23b1086913093a5c29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/unreachable.h @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/visibility.h" + +#include + +namespace arrow { + +[[noreturn]] ARROW_EXPORT void Unreachable(const char* message = "Unreachable"); + +[[noreturn]] ARROW_EXPORT void Unreachable(std::string_view message); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h new file mode 100644 index 0000000000000000000000000000000000000000..74dbe924ff23740fb603c558e87fc54253392030 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/uri.h @@ -0,0 +1,119 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow::util { + +/// \brief A parsed URI +class ARROW_EXPORT Uri { + public: + Uri(); + ~Uri(); + Uri(Uri&&); + Uri& operator=(Uri&&); + + // XXX Should we use std::string_view instead? These functions are + // not performance-critical. + + /// The URI scheme, such as "http", or the empty string if the URI has no + /// explicit scheme. + std::string scheme() const; + + /// Convenience function that returns true if the scheme() is "file" + bool is_file_scheme() const; + + /// Whether the URI has an explicit host name. This may return true if + /// the URI has an empty host (e.g. "file:///tmp/foo"), while it returns + /// false is the URI has not host component at all (e.g. "file:/tmp/foo"). + bool has_host() const; + /// The URI host name, such as "localhost", "127.0.0.1" or "::1", or the empty + /// string is the URI does not have a host component. + std::string host() const; + + /// The URI port number, as a string such as "80", or the empty string is the URI + /// does not have a port number component. + std::string port_text() const; + /// The URI port parsed as an integer, or -1 if the URI does not have a port + /// number component. + int32_t port() const; + + /// The username specified in the URI. + std::string username() const; + /// The password specified in the URI. + std::string password() const; + + /// The URI path component. + std::string path() const; + + /// The URI query string + std::string query_string() const; + + /// The URI query items + /// + /// Note this API doesn't allow differentiating between an empty value + /// and a missing value, such in "a&b=1" vs. "a=&b=1". + Result>> query_items() const; + + /// Get the string representation of this URI. + const std::string& ToString() const; + + /// Factory function to parse a URI from its string representation. + Status Parse(const std::string& uri_string); + + /// Factory function to parse a URI from its string representation. + static Result FromString(const std::string& uri_string); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +/// Percent-encode the input string, for use e.g. as a URI query parameter. +/// +/// This will escape directory separators, making this function unsuitable +/// for encoding URI paths directly. See UriFromAbsolutePath() instead. +ARROW_EXPORT +std::string UriEscape(std::string_view s); + +ARROW_EXPORT +std::string UriUnescape(std::string_view s); + +/// Encode a host for use within a URI, such as "localhost", +/// "127.0.0.1", or "[::1]". +ARROW_EXPORT +std::string UriEncodeHost(std::string_view host); + +/// Whether the string is a syntactically valid URI scheme according to RFC 3986. +ARROW_EXPORT +bool IsValidUriScheme(std::string_view s); + +/// Create a file uri from a given absolute path +ARROW_EXPORT +Result UriFromAbsolutePath(std::string_view path); + +} // namespace arrow::util diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h new file mode 100644 index 0000000000000000000000000000000000000000..ca93fab5b9f4e1f43d451689f0e75cb5572ce983 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/utf8.h @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace util { + +// Convert a UTF8 string to a wstring (either UTF16 or UTF32, depending +// on the wchar_t width). +ARROW_EXPORT Result UTF8ToWideString(std::string_view source); + +// Similarly, convert a wstring to a UTF8 string. +ARROW_EXPORT Result WideStringToUTF8(const std::wstring& source); + +// Convert UTF8 string to a UTF16 string. +ARROW_EXPORT Result UTF8StringToUTF16(std::string_view source); + +// Convert UTF16 string to a UTF8 string. +ARROW_EXPORT Result UTF16StringToUTF8(std::u16string_view source); + +// This function needs to be called before doing UTF8 validation. +ARROW_EXPORT void InitializeUTF8(); + +ARROW_EXPORT bool ValidateUTF8(const uint8_t* data, int64_t size); + +ARROW_EXPORT bool ValidateUTF8(std::string_view str); + +// Skip UTF8 byte order mark, if any. +ARROW_EXPORT +Result SkipUTF8BOM(const uint8_t* data, int64_t size); + +static constexpr uint32_t kMaxUnicodeCodepoint = 0x110000; + +} // namespace util +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/value_parsing.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/value_parsing.h new file mode 100644 index 0000000000000000000000000000000000000000..609906052cd20714de07ad81824ba81bb30f9b5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/value_parsing.h @@ -0,0 +1,945 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This is a private header for string-to-number parsing utilities + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/config.h" +#include "arrow/util/macros.h" +#include "arrow/util/time.h" +#include "arrow/util/visibility.h" +#include "arrow/vendored/datetime.h" +#include "arrow/vendored/strptime.h" + +namespace arrow { + +/// \brief A virtual string to timestamp parser +class ARROW_EXPORT TimestampParser { + public: + virtual ~TimestampParser() = default; + + virtual bool operator()(const char* s, size_t length, TimeUnit::type out_unit, + int64_t* out, + bool* out_zone_offset_present = NULLPTR) const = 0; + + virtual const char* kind() const = 0; + + virtual const char* format() const; + + /// \brief Create a TimestampParser that recognizes strptime-like format strings + static std::shared_ptr MakeStrptime(std::string format); + + /// \brief Create a TimestampParser that recognizes (locale-agnostic) ISO8601 + /// timestamps + static std::shared_ptr MakeISO8601(); +}; + +namespace internal { + +/// \brief The entry point for conversion from strings. +/// +/// Specializations of StringConverter for `ARROW_TYPE` must define: +/// - A default constructible member type `value_type` which will be yielded on a +/// successful parse. +/// - The static member function `Convert`, callable with signature +/// `(const ARROW_TYPE& t, const char* s, size_t length, value_type* out)`. +/// `Convert` returns truthy for successful parses and assigns the parsed values to +/// `*out`. Parameters required for parsing (for example a timestamp's TimeUnit) +/// are acquired from the type parameter `t`. +template +struct StringConverter; + +template +struct is_parseable { + template ::value_type> + static std::true_type Test(U*); + + template + static std::false_type Test(...); + + static constexpr bool value = decltype(Test(NULLPTR))::value; +}; + +template +using enable_if_parseable = enable_if_t::value, R>; + +template <> +struct StringConverter { + using value_type = bool; + + bool Convert(const BooleanType&, const char* s, size_t length, value_type* out) { + if (length == 1) { + // "0" or "1"? + if (s[0] == '0') { + *out = false; + return true; + } + if (s[0] == '1') { + *out = true; + return true; + } + return false; + } + if (length == 4) { + // "true"? + *out = true; + return ((s[0] == 't' || s[0] == 'T') && (s[1] == 'r' || s[1] == 'R') && + (s[2] == 'u' || s[2] == 'U') && (s[3] == 'e' || s[3] == 'E')); + } + if (length == 5) { + // "false"? + *out = false; + return ((s[0] == 'f' || s[0] == 'F') && (s[1] == 'a' || s[1] == 'A') && + (s[2] == 'l' || s[2] == 'L') && (s[3] == 's' || s[3] == 'S') && + (s[4] == 'e' || s[4] == 'E')); + } + return false; + } +}; + +// Ideas for faster float parsing: +// - http://rapidjson.org/md_doc_internals.html#ParsingDouble +// - https://github.com/google/double-conversion [used here] +// - https://github.com/achan001/dtoa-fast + +ARROW_EXPORT +bool StringToFloat(const char* s, size_t length, char decimal_point, float* out); + +ARROW_EXPORT +bool StringToFloat(const char* s, size_t length, char decimal_point, double* out); + +ARROW_EXPORT +bool StringToFloat(const char* s, size_t length, char decimal_point, uint16_t* out); + +template <> +struct StringConverter { + using value_type = float; + + explicit StringConverter(char decimal_point = '.') : decimal_point(decimal_point) {} + + bool Convert(const FloatType&, const char* s, size_t length, value_type* out) { + return ARROW_PREDICT_TRUE(StringToFloat(s, length, decimal_point, out)); + } + + private: + const char decimal_point; +}; + +template <> +struct StringConverter { + using value_type = double; + + explicit StringConverter(char decimal_point = '.') : decimal_point(decimal_point) {} + + bool Convert(const DoubleType&, const char* s, size_t length, value_type* out) { + return ARROW_PREDICT_TRUE(StringToFloat(s, length, decimal_point, out)); + } + + private: + const char decimal_point; +}; + +template <> +struct StringConverter { + using value_type = uint16_t; + + explicit StringConverter(char decimal_point = '.') : decimal_point(decimal_point) {} + + bool Convert(const HalfFloatType&, const char* s, size_t length, value_type* out) { + return ARROW_PREDICT_TRUE(StringToFloat(s, length, decimal_point, out)); + } + + private: + const char decimal_point; +}; + +// NOTE: HalfFloatType would require a half<->float conversion library + +inline uint8_t ParseDecimalDigit(char c) { return static_cast(c - '0'); } + +#define PARSE_UNSIGNED_ITERATION(C_TYPE) \ + if (length > 0) { \ + uint8_t digit = ParseDecimalDigit(*s++); \ + result = static_cast(result * 10U); \ + length--; \ + if (ARROW_PREDICT_FALSE(digit > 9U)) { \ + /* Non-digit */ \ + return false; \ + } \ + result = static_cast(result + digit); \ + } else { \ + break; \ + } + +#define PARSE_UNSIGNED_ITERATION_LAST(C_TYPE) \ + if (length > 0) { \ + if (ARROW_PREDICT_FALSE(result > std::numeric_limits::max() / 10U)) { \ + /* Overflow */ \ + return false; \ + } \ + uint8_t digit = ParseDecimalDigit(*s++); \ + result = static_cast(result * 10U); \ + C_TYPE new_result = static_cast(result + digit); \ + if (ARROW_PREDICT_FALSE(--length > 0)) { \ + /* Too many digits */ \ + return false; \ + } \ + if (ARROW_PREDICT_FALSE(digit > 9U)) { \ + /* Non-digit */ \ + return false; \ + } \ + if (ARROW_PREDICT_FALSE(new_result < result)) { \ + /* Overflow */ \ + return false; \ + } \ + result = new_result; \ + } + +inline bool ParseUnsigned(const char* s, size_t length, uint8_t* out) { + uint8_t result = 0; + + do { + PARSE_UNSIGNED_ITERATION(uint8_t); + PARSE_UNSIGNED_ITERATION(uint8_t); + PARSE_UNSIGNED_ITERATION_LAST(uint8_t); + } while (false); + *out = result; + return true; +} + +inline bool ParseUnsigned(const char* s, size_t length, uint16_t* out) { + uint16_t result = 0; + do { + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION(uint16_t); + PARSE_UNSIGNED_ITERATION_LAST(uint16_t); + } while (false); + *out = result; + return true; +} + +inline bool ParseUnsigned(const char* s, size_t length, uint32_t* out) { + uint32_t result = 0; + do { + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + PARSE_UNSIGNED_ITERATION(uint32_t); + + PARSE_UNSIGNED_ITERATION_LAST(uint32_t); + } while (false); + *out = result; + return true; +} + +inline bool ParseUnsigned(const char* s, size_t length, uint64_t* out) { + uint64_t result = 0; + do { + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + PARSE_UNSIGNED_ITERATION(uint64_t); + + PARSE_UNSIGNED_ITERATION_LAST(uint64_t); + } while (false); + *out = result; + return true; +} + +#undef PARSE_UNSIGNED_ITERATION +#undef PARSE_UNSIGNED_ITERATION_LAST + +template +bool ParseHex(const char* s, size_t length, T* out) { + // lets make sure that the length of the string is not too big + if (!ARROW_PREDICT_TRUE(sizeof(T) * 2 >= length && length > 0)) { + return false; + } + T result = 0; + for (size_t i = 0; i < length; i++) { + result = static_cast(result << 4); + if (s[i] >= '0' && s[i] <= '9') { + result = static_cast(result | (s[i] - '0')); + } else if (s[i] >= 'A' && s[i] <= 'F') { + result = static_cast(result | (s[i] - 'A' + 10)); + } else if (s[i] >= 'a' && s[i] <= 'f') { + result = static_cast(result | (s[i] - 'a' + 10)); + } else { + /* Non-digit */ + return false; + } + } + *out = result; + return true; +} + +template +struct StringToUnsignedIntConverterMixin { + using value_type = typename ARROW_TYPE::c_type; + + bool Convert(const ARROW_TYPE&, const char* s, size_t length, value_type* out) { + if (ARROW_PREDICT_FALSE(length == 0)) { + return false; + } + // If it starts with 0x then its hex + if (length > 2 && s[0] == '0' && ((s[1] == 'x') || (s[1] == 'X'))) { + length -= 2; + s += 2; + + return ARROW_PREDICT_TRUE(ParseHex(s, length, out)); + } + // Skip leading zeros + while (length > 0 && *s == '0') { + length--; + s++; + } + return ParseUnsigned(s, length, out); + } +}; + +template <> +struct StringConverter : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template <> +struct StringConverter + : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template <> +struct StringConverter + : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template <> +struct StringConverter + : public StringToUnsignedIntConverterMixin { + using StringToUnsignedIntConverterMixin::StringToUnsignedIntConverterMixin; +}; + +template +struct StringToSignedIntConverterMixin { + using value_type = typename ARROW_TYPE::c_type; + using unsigned_type = typename std::make_unsigned::type; + + bool Convert(const ARROW_TYPE&, const char* s, size_t length, value_type* out) { + static constexpr auto max_positive = + static_cast(std::numeric_limits::max()); + // Assuming two's complement + static constexpr unsigned_type max_negative = max_positive + 1; + bool negative = false; + unsigned_type unsigned_value = 0; + + if (ARROW_PREDICT_FALSE(length == 0)) { + return false; + } + // If it starts with 0x then its hex + if (length > 2 && s[0] == '0' && ((s[1] == 'x') || (s[1] == 'X'))) { + length -= 2; + s += 2; + + if (!ARROW_PREDICT_TRUE(ParseHex(s, length, &unsigned_value))) { + return false; + } + *out = static_cast(unsigned_value); + return true; + } + + if (*s == '-') { + negative = true; + s++; + if (--length == 0) { + return false; + } + } + // Skip leading zeros + while (length > 0 && *s == '0') { + length--; + s++; + } + if (!ARROW_PREDICT_TRUE(ParseUnsigned(s, length, &unsigned_value))) { + return false; + } + if (negative) { + if (ARROW_PREDICT_FALSE(unsigned_value > max_negative)) { + return false; + } + // To avoid both compiler warnings (with unsigned negation) + // and undefined behaviour (with signed negation overflow), + // use the expanded formula for 2's complement negation. + *out = static_cast(~unsigned_value + 1); + } else { + if (ARROW_PREDICT_FALSE(unsigned_value > max_positive)) { + return false; + } + *out = static_cast(unsigned_value); + } + return true; + } +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template <> +struct StringConverter : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +namespace detail { + +// Inline-able ISO-8601 parser + +using ts_type = TimestampType::c_type; + +template +static inline bool ParseHH(const char* s, Duration* out) { + uint8_t hours = 0; + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours)); + return true; +} + +template +static inline bool ParseHH_MM(const char* s, Duration* out) { + uint8_t hours = 0; + uint8_t minutes = 0; + if (ARROW_PREDICT_FALSE(s[2] != ':')) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 3, 2, &minutes))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + if (ARROW_PREDICT_FALSE(minutes >= 60)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours) + + std::chrono::minutes(minutes)); + return true; +} + +template +static inline bool ParseHHMM(const char* s, Duration* out) { + uint8_t hours = 0; + uint8_t minutes = 0; + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 2, 2, &minutes))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + if (ARROW_PREDICT_FALSE(minutes >= 60)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours) + + std::chrono::minutes(minutes)); + return true; +} + +template +static inline bool ParseHH_MM_SS(const char* s, Duration* out) { + uint8_t hours = 0; + uint8_t minutes = 0; + uint8_t seconds = 0; + if (ARROW_PREDICT_FALSE(s[2] != ':') || ARROW_PREDICT_FALSE(s[5] != ':')) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 2, &hours))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 3, 2, &minutes))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 6, 2, &seconds))) { + return false; + } + if (ARROW_PREDICT_FALSE(hours >= 24)) { + return false; + } + if (ARROW_PREDICT_FALSE(minutes >= 60)) { + return false; + } + if (ARROW_PREDICT_FALSE(seconds >= 60)) { + return false; + } + *out = std::chrono::duration_cast(std::chrono::hours(hours) + + std::chrono::minutes(minutes) + + std::chrono::seconds(seconds)); + return true; +} + +static inline bool ParseSubSeconds(const char* s, size_t length, TimeUnit::type unit, + uint32_t* out) { + // The decimal point has been peeled off at this point + + // Fail if number of decimal places provided exceeds what the unit can hold. + // Calculate how many trailing decimal places are omitted for the unit + // e.g. if 4 decimal places are provided and unit is MICRO, 2 are missing + size_t omitted = 0; + switch (unit) { + case TimeUnit::MILLI: + if (ARROW_PREDICT_FALSE(length > 3)) { + return false; + } + if (length < 3) { + omitted = 3 - length; + } + break; + case TimeUnit::MICRO: + if (ARROW_PREDICT_FALSE(length > 6)) { + return false; + } + if (length < 6) { + omitted = 6 - length; + } + break; + case TimeUnit::NANO: + if (ARROW_PREDICT_FALSE(length > 9)) { + return false; + } + if (length < 9) { + omitted = 9 - length; + } + break; + default: + return false; + } + + if (ARROW_PREDICT_TRUE(omitted == 0)) { + return ParseUnsigned(s, length, out); + } else { + uint32_t subseconds = 0; + bool success = ParseUnsigned(s, length, &subseconds); + if (ARROW_PREDICT_TRUE(success)) { + switch (omitted) { + case 1: + *out = subseconds * 10; + break; + case 2: + *out = subseconds * 100; + break; + case 3: + *out = subseconds * 1000; + break; + case 4: + *out = subseconds * 10000; + break; + case 5: + *out = subseconds * 100000; + break; + case 6: + *out = subseconds * 1000000; + break; + case 7: + *out = subseconds * 10000000; + break; + case 8: + *out = subseconds * 100000000; + break; + default: + // Impossible case + break; + } + return true; + } else { + return false; + } + } +} + +} // namespace detail + +template +static inline bool ParseYYYY_MM_DD(const char* s, Duration* since_epoch) { + uint16_t year = 0; + uint8_t month = 0; + uint8_t day = 0; + if (ARROW_PREDICT_FALSE(s[4] != '-') || ARROW_PREDICT_FALSE(s[7] != '-')) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 0, 4, &year))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 5, 2, &month))) { + return false; + } + if (ARROW_PREDICT_FALSE(!ParseUnsigned(s + 8, 2, &day))) { + return false; + } + arrow_vendored::date::year_month_day ymd{arrow_vendored::date::year{year}, + arrow_vendored::date::month{month}, + arrow_vendored::date::day{day}}; + if (ARROW_PREDICT_FALSE(!ymd.ok())) return false; + + *since_epoch = std::chrono::duration_cast( + arrow_vendored::date::sys_days{ymd}.time_since_epoch()); + return true; +} + +static inline bool ParseTimestampISO8601(const char* s, size_t length, + TimeUnit::type unit, TimestampType::c_type* out, + bool* out_zone_offset_present = NULLPTR) { + using seconds_type = std::chrono::duration; + + // We allow the following zone offset formats: + // - (none) + // - Z + // - [+-]HH(:?MM)? + // + // We allow the following formats for all units: + // - "YYYY-MM-DD" + // - "YYYY-MM-DD[ T]hhZ?" + // - "YYYY-MM-DD[ T]hh:mmZ?" + // - "YYYY-MM-DD[ T]hh:mm:ssZ?" + // + // We allow the following formats for unit == MILLI, MICRO, or NANO: + // - "YYYY-MM-DD[ T]hh:mm:ss.s{1,3}Z?" + // + // We allow the following formats for unit == MICRO, or NANO: + // - "YYYY-MM-DD[ T]hh:mm:ss.s{4,6}Z?" + // + // We allow the following formats for unit == NANO: + // - "YYYY-MM-DD[ T]hh:mm:ss.s{7,9}Z?" + // + // UTC is always assumed, and the DataType's timezone is ignored. + // + + if (ARROW_PREDICT_FALSE(length < 10)) return false; + + seconds_type seconds_since_epoch; + if (ARROW_PREDICT_FALSE(!ParseYYYY_MM_DD(s, &seconds_since_epoch))) { + return false; + } + + if (length == 10) { + *out = util::CastSecondsToUnit(unit, seconds_since_epoch.count()); + return true; + } + + if (ARROW_PREDICT_FALSE(s[10] != ' ') && ARROW_PREDICT_FALSE(s[10] != 'T')) { + return false; + } + + if (out_zone_offset_present) { + *out_zone_offset_present = false; + } + + seconds_type zone_offset(0); + if (s[length - 1] == 'Z') { + --length; + if (out_zone_offset_present) *out_zone_offset_present = true; + } else if (s[length - 3] == '+' || s[length - 3] == '-') { + // [+-]HH + length -= 3; + if (ARROW_PREDICT_FALSE(!detail::ParseHH(s + length + 1, &zone_offset))) { + return false; + } + if (s[length] == '+') zone_offset *= -1; + if (out_zone_offset_present) *out_zone_offset_present = true; + } else if (s[length - 5] == '+' || s[length - 5] == '-') { + // [+-]HHMM + length -= 5; + if (ARROW_PREDICT_FALSE(!detail::ParseHHMM(s + length + 1, &zone_offset))) { + return false; + } + if (s[length] == '+') zone_offset *= -1; + if (out_zone_offset_present) *out_zone_offset_present = true; + } else if ((s[length - 6] == '+' || s[length - 6] == '-') && (s[length - 3] == ':')) { + // [+-]HH:MM + length -= 6; + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM(s + length + 1, &zone_offset))) { + return false; + } + if (s[length] == '+') zone_offset *= -1; + if (out_zone_offset_present) *out_zone_offset_present = true; + } + + seconds_type seconds_since_midnight; + switch (length) { + case 13: // YYYY-MM-DD[ T]hh + if (ARROW_PREDICT_FALSE(!detail::ParseHH(s + 11, &seconds_since_midnight))) { + return false; + } + break; + case 16: // YYYY-MM-DD[ T]hh:mm + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM(s + 11, &seconds_since_midnight))) { + return false; + } + break; + case 19: // YYYY-MM-DD[ T]hh:mm:ss + case 21: // YYYY-MM-DD[ T]hh:mm:ss.s + case 22: // YYYY-MM-DD[ T]hh:mm:ss.ss + case 23: // YYYY-MM-DD[ T]hh:mm:ss.sss + case 24: // YYYY-MM-DD[ T]hh:mm:ss.ssss + case 25: // YYYY-MM-DD[ T]hh:mm:ss.sssss + case 26: // YYYY-MM-DD[ T]hh:mm:ss.ssssss + case 27: // YYYY-MM-DD[ T]hh:mm:ss.sssssss + case 28: // YYYY-MM-DD[ T]hh:mm:ss.ssssssss + case 29: // YYYY-MM-DD[ T]hh:mm:ss.sssssssss + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM_SS(s + 11, &seconds_since_midnight))) { + return false; + } + break; + default: + return false; + } + + seconds_since_epoch += seconds_since_midnight; + seconds_since_epoch += zone_offset; + + if (length <= 19) { + *out = util::CastSecondsToUnit(unit, seconds_since_epoch.count()); + return true; + } + + if (ARROW_PREDICT_FALSE(s[19] != '.')) { + return false; + } + + uint32_t subseconds = 0; + if (ARROW_PREDICT_FALSE( + !detail::ParseSubSeconds(s + 20, length - 20, unit, &subseconds))) { + return false; + } + + *out = util::CastSecondsToUnit(unit, seconds_since_epoch.count()) + subseconds; + return true; +} + +#if defined(_WIN32) || defined(ARROW_WITH_MUSL) +static constexpr bool kStrptimeSupportsZone = false; +#else +static constexpr bool kStrptimeSupportsZone = true; +#endif + +/// \brief Returns time since the UNIX epoch in the requested unit +static inline bool ParseTimestampStrptime(const char* buf, size_t length, + const char* format, bool ignore_time_in_day, + bool allow_trailing_chars, TimeUnit::type unit, + int64_t* out) { + // NOTE: strptime() is more than 10x faster than arrow_vendored::date::parse(). + // The buffer may not be nul-terminated + std::string clean_copy(buf, length); + struct tm result; + memset(&result, 0, sizeof(struct tm)); +#ifdef _WIN32 + char* ret = arrow_strptime(clean_copy.c_str(), format, &result); +#else + char* ret = strptime(clean_copy.c_str(), format, &result); +#endif + if (ret == NULLPTR) { + return false; + } + if (!allow_trailing_chars && static_cast(ret - clean_copy.c_str()) != length) { + return false; + } + // ignore the time part + arrow_vendored::date::sys_seconds secs = + arrow_vendored::date::sys_days(arrow_vendored::date::year(result.tm_year + 1900) / + (result.tm_mon + 1) / std::max(result.tm_mday, 1)); + if (!ignore_time_in_day) { + secs += (std::chrono::hours(result.tm_hour) + std::chrono::minutes(result.tm_min) + + std::chrono::seconds(result.tm_sec)); +#ifndef _WIN32 + secs -= std::chrono::seconds(result.tm_gmtoff); +#endif + } + *out = util::CastSecondsToUnit(unit, secs.time_since_epoch().count()); + return true; +} + +template <> +struct StringConverter { + using value_type = int64_t; + + bool Convert(const TimestampType& type, const char* s, size_t length, value_type* out) { + return ParseTimestampISO8601(s, length, type.unit(), out); + } +}; + +template <> +struct StringConverter + : public StringToSignedIntConverterMixin { + using StringToSignedIntConverterMixin::StringToSignedIntConverterMixin; +}; + +template +struct StringConverter> { + using value_type = typename DATE_TYPE::c_type; + + using duration_type = + typename std::conditional::value, + arrow_vendored::date::days, + std::chrono::milliseconds>::type; + + bool Convert(const DATE_TYPE& type, const char* s, size_t length, value_type* out) { + if (ARROW_PREDICT_FALSE(length != 10)) { + return false; + } + + duration_type since_epoch; + if (ARROW_PREDICT_FALSE(!ParseYYYY_MM_DD(s, &since_epoch))) { + return false; + } + + *out = static_cast(since_epoch.count()); + return true; + } +}; + +template +struct StringConverter> { + using value_type = typename TIME_TYPE::c_type; + + // We allow the following formats for all units: + // - "hh:mm" + // - "hh:mm:ss" + // + // We allow the following formats for unit == MILLI, MICRO, or NANO: + // - "hh:mm:ss.s{1,3}" + // + // We allow the following formats for unit == MICRO, or NANO: + // - "hh:mm:ss.s{4,6}" + // + // We allow the following formats for unit == NANO: + // - "hh:mm:ss.s{7,9}" + + bool Convert(const TIME_TYPE& type, const char* s, size_t length, value_type* out) { + const auto unit = type.unit(); + std::chrono::seconds since_midnight; + + if (length == 5) { + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM(s, &since_midnight))) { + return false; + } + *out = + static_cast(util::CastSecondsToUnit(unit, since_midnight.count())); + return true; + } + + if (ARROW_PREDICT_FALSE(length < 8)) { + return false; + } + if (ARROW_PREDICT_FALSE(!detail::ParseHH_MM_SS(s, &since_midnight))) { + return false; + } + + *out = static_cast(util::CastSecondsToUnit(unit, since_midnight.count())); + + if (length == 8) { + return true; + } + + if (ARROW_PREDICT_FALSE(s[8] != '.')) { + return false; + } + + uint32_t subseconds_count = 0; + if (ARROW_PREDICT_FALSE( + !detail::ParseSubSeconds(s + 9, length - 9, unit, &subseconds_count))) { + return false; + } + + *out += subseconds_count; + return true; + } +}; + +/// \brief Convenience wrappers around internal::StringConverter. +template +bool ParseValue(const T& type, const char* s, size_t length, + typename StringConverter::value_type* out) { + return StringConverter{}.Convert(type, s, length, out); +} + +template +enable_if_parameter_free ParseValue( + const char* s, size_t length, typename StringConverter::value_type* out) { + static T type; + return StringConverter{}.Convert(type, s, length, out); +} + +} // namespace internal +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/visibility.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..1498d2085a03d8555305823b29945d5dafda3770 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/visibility.h @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +// Windows + +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#if defined(__cplusplus) && defined(__GNUC__) && !defined(__clang__) +// Use C++ attribute syntax where possible to avoid GCC parser bug +// (https://stackoverflow.com/questions/57993818/gcc-how-to-combine-attribute-dllexport-and-nodiscard-in-a-struct-de) +#define ARROW_DLLEXPORT [[gnu::dllexport]] +#define ARROW_DLLIMPORT [[gnu::dllimport]] +#else +#define ARROW_DLLEXPORT __declspec(dllexport) +#define ARROW_DLLIMPORT __declspec(dllimport) +#endif + +// _declspec(dllexport) even when the #included by a non-arrow source +#define ARROW_FORCE_EXPORT ARROW_DLLEXPORT + +#ifdef ARROW_STATIC +#define ARROW_EXPORT +#define ARROW_FRIEND_EXPORT +#define ARROW_TEMPLATE_EXPORT +#elif defined(ARROW_EXPORTING) +#define ARROW_EXPORT ARROW_DLLEXPORT +// For some reason [[gnu::dllexport]] doesn't work well with friend declarations +#define ARROW_FRIEND_EXPORT __declspec(dllexport) +#define ARROW_TEMPLATE_EXPORT ARROW_DLLEXPORT +#else +#define ARROW_EXPORT ARROW_DLLIMPORT +#define ARROW_FRIEND_EXPORT __declspec(dllimport) +#define ARROW_TEMPLATE_EXPORT ARROW_DLLIMPORT +#endif + +#define ARROW_NO_EXPORT + +#else + +// Non-Windows + +#if defined(__cplusplus) && (defined(__GNUC__) || defined(__clang__)) +#ifndef ARROW_EXPORT +#define ARROW_EXPORT [[gnu::visibility("default")]] +#endif +#ifndef ARROW_NO_EXPORT +#define ARROW_NO_EXPORT [[gnu::visibility("hidden")]] +#endif +#else +// Not C++, or not gcc/clang +#ifndef ARROW_EXPORT +#define ARROW_EXPORT +#endif +#ifndef ARROW_NO_EXPORT +#define ARROW_NO_EXPORT +#endif +#endif + +#define ARROW_FRIEND_EXPORT +#define ARROW_TEMPLATE_EXPORT + +// [[gnu::visibility("default")]] even when #included by a non-arrow source +#define ARROW_FORCE_EXPORT [[gnu::visibility("default")]] + +#endif // Non-Windows diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h new file mode 100644 index 0000000000000000000000000000000000000000..2949ac4ab768890d866be6133babbe6f92459ab3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/windows_fixup.h @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This header needs to be included multiple times. + +#ifdef _WIN32 + +#ifdef max +#undef max +#endif +#ifdef min +#undef min +#endif + +// The Windows API defines macros from *File resolving to either +// *FileA or *FileW. Need to undo them. +#ifdef CopyFile +#undef CopyFile +#endif +#ifdef CreateFile +#undef CreateFile +#endif +#ifdef DeleteFile +#undef DeleteFile +#endif + +// Other annoying Windows macro definitions... +#ifdef IN +#undef IN +#endif +#ifdef OUT +#undef OUT +#endif + +// Note that we can't undefine OPTIONAL, because it can be used in other +// Windows headers... + +#endif // _WIN32